Merge "Revert "Bump cirros version to 0.6.1""
diff --git a/.zuul.yaml b/.zuul.yaml
index 6ad7148..fa7f180 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -82,7 +82,7 @@
     name: devstack-single-node-fedora-latest
     nodes:
       - name: controller
-        label: fedora-35
+        label: fedora-36
     groups:
       - name: tempest
         nodes:
@@ -99,6 +99,26 @@
           - controller
 
 - nodeset:
+    name: devstack-single-node-rockylinux-9
+    nodes:
+      - name: controller
+        label: rockylinux-9
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-openeuler-22.03
+    nodes:
+      - name: controller
+        label: openEuler-22-03-LTS
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: openstack-two-node
     nodes:
       - name: controller
@@ -159,6 +179,36 @@
           - compute1
 
 - nodeset:
+    name: openstack-two-node-jammy
+    nodes:
+      - name: controller
+        label: ubuntu-jammy
+      - name: compute1
+        label: ubuntu-jammy
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
     name: openstack-two-node-focal
     nodes:
       - name: controller
@@ -335,7 +385,6 @@
     required-projects:
       - opendev.org/openstack/devstack
     roles:
-      - zuul: opendev.org/openstack/devstack-gate
       - zuul: opendev.org/openstack/openstack-zuul-jobs
     vars:
       devstack_localrc:
@@ -446,7 +495,7 @@
     description: |
       Minimal devstack base job, intended for use by jobs that need
       less than the normal minimum set of required-projects.
-    nodeset: openstack-single-node-focal
+    nodeset: openstack-single-node-jammy
     required-projects:
       - opendev.org/openstack/requirements
     vars:
@@ -460,6 +509,7 @@
         dstat: false
         etcd3: true
         memory_tracker: true
+        file_tracker: true
         mysql: true
         rabbit: true
     group-vars:
@@ -468,6 +518,7 @@
           # Shared services
           dstat: false
           memory_tracker: true
+          file_tracker: true
         devstack_localrc:
           # Multinode specific settings
           HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -515,6 +566,7 @@
       - opendev.org/openstack/swift
     timeout: 7200
     vars:
+      configure_swap_size: 4096
       devstack_localrc:
         # Common OpenStack services settings
         SWIFT_REPLICAS: 1
@@ -535,6 +587,7 @@
         dstat: false
         etcd3: true
         memory_tracker: true
+        file_tracker: true
         mysql: true
         rabbit: true
         tls-proxy: true
@@ -584,6 +637,7 @@
           # Shared services
           dstat: false
           memory_tracker: true
+          file_tracker: true
           tls-proxy: true
           # Nova services
           n-cpu: true
@@ -638,10 +692,13 @@
 - job:
     name: devstack-multinode
     parent: devstack
-    nodeset: openstack-two-node-focal
+    nodeset: openstack-two-node-jammy
     description: |
       Simple multinode test to verify multinode functionality on devstack side.
       This is not meant to be used as a parent job.
+    vars:
+      devstack_localrc:
+        MYSQL_REDUCE_MEMORY: true
 
 # NOTE(ianw) Platform tests have traditionally been non-voting because
 # we often have to rush things through devstack to stabilise the gate,
@@ -656,8 +713,6 @@
     # TODO(kopecmartin) n-v until the following is resolved:
     # https://bugs.launchpad.net/neutron/+bug/1979047
     voting: false
-    vars:
-      configure_swap_size: 4096
 
 - job:
     name: devstack-platform-debian-bullseye
@@ -669,16 +724,20 @@
       configure_swap_size: 4096
 
 - job:
-    name: devstack-platform-ubuntu-jammy
+    name: devstack-platform-rocky-blue-onyx
     parent: tempest-full-py3
-    description: Ubuntu 22.04 LTS (jammy) platform test
-    nodeset: openstack-single-node-jammy
+    description: Rocky Linux 9 Blue Onyx platform test
+    nodeset: devstack-single-node-rockylinux-9
     timeout: 9000
     vars:
       configure_swap_size: 4096
-      devstack_services:
-        # Horizon doesn't like py310
-        horizon: false
+
+- job:
+    name: devstack-platform-ubuntu-focal
+    parent: tempest-full-py3
+    description: Ubuntu 20.04 LTS (focal) platform test
+    nodeset: openstack-single-node-focal
+    timeout: 9000
 
 - job:
     name: devstack-platform-ubuntu-jammy-ovn-source
@@ -706,8 +765,62 @@
         Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
         Q_ML2_TENANT_NETWORK_TYPE: vxlan
       devstack_services:
-        # Horizon doesn't like py310
-        horizon: false
+        # Disable OVN services
+        ovn-northd: false
+        ovn-controller: false
+        ovs-vswitchd: false
+        ovsdb-server: false
+        # Disable Neutron ML2/OVN services
+        q-ovn-metadata-agent: false
+        # Enable Neutron ML2/OVS services
+        q-agt: true
+        q-dhcp: true
+        q-l3: true
+        q-meta: true
+        q-metering: true
+    group-vars:
+      subnode:
+        devstack_services:
+          # Disable OVN services
+          ovn-controller: false
+          ovs-vswitchd: false
+          ovsdb-server: false
+          # Disable Neutron ML2/OVN services
+          q-ovn-metadata-agent: false
+          # Enable Neutron ML2/OVS services
+          q-agt: true
+
+- job:
+    name: devstack-platform-openEuler-22.03-ovn-source
+    parent: tempest-full-py3
+    description: openEuler 22.03 LTS platform test (OVN)
+    nodeset: devstack-single-node-openeuler-22.03
+    voting: false
+    timeout: 9000
+    vars:
+      configure_swap_size: 4096
+      devstack_localrc:
+        # NOTE(wxy):  OVN package is not supported by openEuler yet. Build it
+        # from source instead.
+        OVN_BUILD_FROM_SOURCE: True
+        OVN_BRANCH: "v21.06.0"
+        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+        OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+    name: devstack-platform-openEuler-22.03-ovs
+    parent: tempest-full-py3
+    description: openEuler 22.03 LTS platform test (OVS)
+    nodeset: devstack-single-node-openeuler-22.03
+    voting: false
+    timeout: 9000
+    vars:
+      configure_swap_size: 8192
+      devstack_localrc:
+        Q_AGENT: openvswitch
+        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+      devstack_services:
         # Disable OVN services
         ovn-northd: false
         ovn-controller: false
@@ -750,12 +863,6 @@
     description: Fedora latest platform test
     nodeset: devstack-single-node-fedora-latest
     voting: false
-    vars:
-      configure_swap_size: 4096
-      # Python 3.10 dependency issues; see
-      # https://bugs.launchpad.net/horizon/+bug/1960204
-      devstack_services:
-        horizon: false
 
 - job:
     name: devstack-platform-fedora-latest-virt-preview
@@ -764,7 +871,6 @@
     nodeset: devstack-single-node-fedora-latest
     voting: false
     vars:
-      configure_swap_size: 4096
       devstack_localrc:
         ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
 
@@ -824,7 +930,7 @@
 
 - job:
     name: devstack-unit-tests
-    nodeset: ubuntu-focal
+    nodeset: ubuntu-jammy
     description: |
       Runs unit tests on devstack project.
 
@@ -844,9 +950,12 @@
         - devstack-platform-fedora-latest
         - devstack-platform-centos-9-stream
         - devstack-platform-debian-bullseye
-        - devstack-platform-ubuntu-jammy
+        - devstack-platform-rocky-blue-onyx
+        - devstack-platform-ubuntu-focal
         - devstack-platform-ubuntu-jammy-ovn-source
         - devstack-platform-ubuntu-jammy-ovs
+        - devstack-platform-openEuler-22.03-ovn-source
+        - devstack-platform-openEuler-22.03-ovs
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
@@ -894,7 +1003,7 @@
         # https://bugs.launchpad.net/neutron/+bug/1979047
         # - devstack-platform-centos-9-stream
         - devstack-platform-debian-bullseye
-        - devstack-platform-ubuntu-jammy
+        - devstack-platform-ubuntu-focal
         - devstack-enforce-scope
         - devstack-multinode
         - devstack-unit-tests
@@ -979,3 +1088,7 @@
     periodic:
       jobs:
         - devstack-no-tls-proxy
+    periodic-weekly:
+      jobs:
+        - devstack-platform-openEuler-22.03-ovn-source
+        - devstack-platform-openEuler-22.03-ovs
diff --git a/clean.sh b/clean.sh
index 870dfd4..6a31cc6 100755
--- a/clean.sh
+++ b/clean.sh
@@ -50,7 +50,6 @@
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
 
 set -o xtrace
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 757b400..a83b2de 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -181,6 +181,9 @@
 If the ``*_PASSWORD`` variables are not set here you will be prompted to
 enter values for them by ``stack.sh``.
 
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
+
 The network ranges must not overlap with any networks in use on the
 host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly
 used for both the local networking and Nova's fixed and floating ranges.
@@ -636,7 +639,7 @@
 ::
 
     $ cd /opt/stack/tempest
-    $ tox -efull  tempest.scenario.test_network_basic_ops
+    $ tox -e smoke
 
 By default tempest is downloaded and the config file is generated, but the
 tempest package is not installed in the system's global site-packages (the
@@ -669,6 +672,35 @@
 or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value.  (The default for
 each is 10.)
 
+DevStack's Cinder LVM configuration module currently supports both iSCSI and
+NVMe connections, and we can choose which one to use with options
+``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``,
+and ``CINDER_TARGET_PORT``.
+
+Defaults use iSCSI with the LIO target manager::
+
+  CINDER_TARGET_HELPER="lioadm"
+  CINDER_TARGET_PROTOCOL="iscsi"
+  CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:"
+  CINDER_TARGET_PORT=3260
+
+Additionally there are 3 supported transport protocols for NVMe,
+``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target
+is selected the protocol, prefix, and port defaults will change to more
+sensible defaults for NVMe::
+
+  CINDER_TARGET_HELPER="nvmet"
+  CINDER_TARGET_PROTOCOL="nvmet_rdma"
+  CINDER_TARGET_PREFIX="nvme-subsystem-1"
+  CINDER_TARGET_PORT=4420
+
+When selecting the RDMA transport protocol DevStack will create on Cinder nodes
+a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined
+then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``.
+
+This Soft-RoCE device will always be created on the Nova compute side since we
+cannot tell beforehand whether there will be an RDMA connection or not.
+
 
 Keystone
 ~~~~~~~~
@@ -719,7 +751,7 @@
 
 ::
 
-    openstack --os-cloud devstack-system-admin registered limit update \
+    openstack --os-cloud devstack-system-admin registered limit set \
       --service glance --default-limit 5000 --region RegionOne image_size_total
 
 .. _arch-configuration:
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 4de238f..8b5a85b 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -42,8 +42,9 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 All changes proposed to the Devstack require two ``Code-Review +2`` votes from
 Devstack core reviewers before one of the core reviewers can approve the patch
-by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
-which can be approved by single core reviewers.
+by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to
+unblock the gate and patches that do not relate to the Devstack's core logic,
+like for example old job cleanups, can be approved by single core reviewers.
 
 Project Team Lead Duties
 ~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
index fd0d9cd..3ca0ad9 100644
--- a/doc/source/debugging.rst
+++ b/doc/source/debugging.rst
@@ -20,6 +20,12 @@
 falling (i.e. processes are consuming memory).  It also provides
 output showing locked (unswappable) memory.
 
+file_tracker
+------------
+
+The ``file_tracker`` service periodically monitors the number of
+open files in the system.
+
 tcpdump
 -------
 
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index e7ec629..e7b46b6 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -20,7 +20,7 @@
    guides/neutron
    guides/devstack-with-nested-kvm
    guides/nova
-   guides/devstack-with-lbaas-v2
+   guides/devstack-with-octavia
    guides/devstack-with-ldap
 
 All-In-One Single VM
@@ -69,10 +69,10 @@
 
 Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
 
-Configure Load-Balancer Version 2
------------------------------------
+Configure Octavia
+-----------------
 
-Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+Guide on :doc:`Configure Octavia <guides/devstack-with-octavia>`.
 
 Deploying DevStack with LDAP
 ----------------------------
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
deleted file mode 100644
index 5d96ca7..0000000
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ /dev/null
@@ -1,145 +0,0 @@
-Devstack with Octavia Load Balancing
-====================================
-
-Starting with the OpenStack Pike release, Octavia is now a standalone service
-providing load balancing services for OpenStack.
-
-This guide will show you how to create a devstack with `Octavia API`_ enabled.
-
-.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
-
-Phase 1: Create DevStack + 2 nova instances
---------------------------------------------
-
-First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find
-useful.
-
-Install devstack
-
-::
-
-    git clone https://opendev.org/openstack/devstack
-    cd devstack/tools
-    sudo ./create-stack-user.sh
-    cd ../..
-    sudo mv devstack /opt/stack
-    sudo chown -R stack.stack /opt/stack/devstack
-
-This will clone the current devstack code locally, then setup the "stack"
-account that devstack services will run under. Finally, it will move devstack
-into its default location in /opt/stack/devstack.
-
-Edit your ``/opt/stack/devstack/local.conf`` to look like
-
-::
-
-    [[local|localrc]]
-    enable_plugin octavia https://opendev.org/openstack/octavia
-    # If you are enabling horizon, include the octavia dashboard
-    # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git
-    # If you are enabling barbican for TLS offload in Octavia, include it here.
-    # enable_plugin barbican https://opendev.org/openstack/barbican
-
-    # ===== BEGIN localrc =====
-    DATABASE_PASSWORD=password
-    ADMIN_PASSWORD=password
-    SERVICE_PASSWORD=password
-    SERVICE_TOKEN=password
-    RABBIT_PASSWORD=password
-    # Enable Logging
-    LOGFILE=$DEST/logs/stack.sh.log
-    VERBOSE=True
-    LOG_COLOR=True
-    # Pre-requisite
-    ENABLED_SERVICES=rabbit,mysql,key
-    # Horizon - enable for the OpenStack web GUI
-    # ENABLED_SERVICES+=,horizon
-    # Nova
-    ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
-    ENABLED_SERVICES+=,placement-api,placement-client
-    # Glance
-    ENABLED_SERVICES+=,g-api
-    # Neutron
-    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
-    ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-    # Cinder
-    ENABLED_SERVICES+=,c-api,c-vol,c-sch
-    # Tempest
-    ENABLED_SERVICES+=,tempest
-    # Barbican - Optionally used for TLS offload in Octavia
-    # ENABLED_SERVICES+=,barbican
-    # ===== END localrc =====
-
-Run stack.sh and do some sanity checks
-
-::
-
-    sudo su - stack
-    cd /opt/stack/devstack
-    ./stack.sh
-    . ./openrc
-
-    openstack network list  # should show public and private networks
-
-Create two nova instances that we can use as test http servers:
-
-::
-
-    #create nova instances on private network
-    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
-    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
-    openstack server list # should show the nova instances just created
-
-    #add secgroup rules to allow ssh etc..
-    openstack security group rule create default --protocol icmp
-    openstack security group rule create default --protocol tcp --dst-port 22:22
-    openstack security group rule create default --protocol tcp --dst-port 80:80
-
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
-
-::
-
-    MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
-    while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
-Phase 2: Create your load balancer
-----------------------------------
-
-Make sure you have the 'openstack loadbalancer' commands:
-
-::
-
-    pip install python-octaviaclient
-
-Create your load balancer:
-
-::
-
-    openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
-
-Please note: The <web server # address> fields are the IP addresses of the nova
-servers created in Phase 1.
-Also note, using the API directly you can do all of the above commands in one
-API call.
-
-Phase 3: Test your load balancer
---------------------------------
-
-::
-
-    openstack loadbalancer show lb1 # Note the vip_address
-    curl http://<vip_address>
-    curl http://<vip_address>
-
-This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 3732f06..ba483e9 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -1,3 +1,5 @@
+.. _kvm_nested_virt:
+
 =======================================================
 Configure DevStack with KVM-based Nested Virtualization
 =======================================================
diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst
new file mode 100644
index 0000000..55939f0
--- /dev/null
+++ b/doc/source/guides/devstack-with-octavia.rst
@@ -0,0 +1,144 @@
+Devstack with Octavia Load Balancing
+====================================
+
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
+
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
+
+Phase 1: Create DevStack + 2 nova instances
+--------------------------------------------
+
+First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space,
+make sure it is updated. Install git and any other developer tools you find
+useful.
+
+Install devstack::
+
+    git clone https://opendev.org/openstack/devstack
+    cd devstack/tools
+    sudo ./create-stack-user.sh
+    cd ../..
+    sudo mv devstack /opt/stack
+    sudo chown -R stack.stack /opt/stack/devstack
+
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
+
+Edit your ``/opt/stack/devstack/local.conf`` to look like::
+
+    [[local|localrc]]
+    # ===== BEGIN localrc =====
+    DATABASE_PASSWORD=password
+    ADMIN_PASSWORD=password
+    SERVICE_PASSWORD=password
+    SERVICE_TOKEN=password
+    RABBIT_PASSWORD=password
+    GIT_BASE=https://opendev.org
+    # Optional settings:
+    # OCTAVIA_AMP_BASE_OS=centos
+    # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream
+    # OCTAVIA_AMP_IMAGE_SIZE=3
+    # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY
+    # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True
+    # LIBS_FROM_GIT+=octavia-lib,
+    # Enable Logging
+    LOGFILE=$DEST/logs/stack.sh.log
+    VERBOSE=True
+    LOG_COLOR=True
+    enable_service rabbit
+    enable_plugin neutron $GIT_BASE/openstack/neutron
+    # Octavia supports using QoS policies on the VIP port:
+    enable_service q-qos
+    enable_service placement-api placement-client
+    # Octavia services
+    enable_plugin octavia $GIT_BASE/openstack/octavia master
+    enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard
+    enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider
+    enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin
+    enable_service octavia o-api o-cw o-hm o-hk o-da
+    # If you are enabling barbican for TLS offload in Octavia, include it here.
+    # enable_plugin barbican $GIT_BASE/openstack/barbican
+    # enable_service barbican
+    # Cinder (optional)
+    disable_service c-api c-vol c-sch
+    # Tempest
+    enable_service tempest
+    # ===== END localrc =====
+
+.. note::
+    For best performance it is highly recommended to use KVM
+    virtualization instead of QEMU.
+    Also make sure nested virtualization is enabled as documented in
+    :ref:`the respective guide <kvm_nested_virt>`.
+    By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your
+    ``local.conf`` you enable the guest VMs to make use of all features your
+    host's CPU provides.
+
+Run stack.sh and do some sanity checks::
+
+    sudo su - stack
+    cd /opt/stack/devstack
+    ./stack.sh
+    . ./openrc
+
+    openstack network list  # should show public and private networks
+
+Create two nova instances that we can use as test http servers::
+
+    # create nova instances on private network
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+    openstack server list # should show the nova instances just created
+
+    # add secgroup rules to allow ssh etc..
+    openstack security group rule create default --protocol icmp
+    openstack security group rule create default --protocol tcp --dst-port 22:22
+    openstack security group rule create default --protocol tcp --dst-port 80:80
+
+Set up a simple web server on each of these instances. One possibility is to use
+the `Golang test server`_ that is used by the Octavia project for CI testing
+as well.
+Copy the binary to your instances and start it as shown below
+(username 'cirros', password 'gocubsgo')::
+
+    INST_IP=<instance IP>
+    scp -O test_server.bin cirros@${INST_IP}:
+    ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP}
+
+When started this way the test server will respond to HTTP requests with
+its own IP.
+
+Phase 2: Create your load balancer
+----------------------------------
+
+Create your load balancer::
+
+    openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet
+    openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1
+    openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+    openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+    openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+    openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+    openstack loadbalancer show lb1 # Note the vip_address
+    curl http://<vip_address>
+    curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
+
+
+.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 0529e30..a4385b5 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -106,6 +106,9 @@
 -  Set the service password. This is used by the OpenStack services
    (Nova, Glance, etc) to authenticate with Keystone.
 
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
+
 ``local.conf`` should look something like this:
 
 .. code-block:: ini
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 0434d68..1e932f8 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,9 +38,10 @@
 
 Start with a clean and minimal install of a Linux system. DevStack
 attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
+latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and
+openEuler.
 
-If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
+If you do not have a preference, Ubuntu 22.04 (Jammy) is the
 most tested, and will probably go the smoothest.
 
 Add Stack User (optional)
@@ -101,7 +102,10 @@
 This is the minimum required config to get started with DevStack.
 
 .. note:: There is a sample :download:`local.conf </assets/local.conf>` file
-    under the *samples* directory in the devstack repository.
+   under the *samples* directory in the devstack repository.
+
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
 
 Start the install
 -----------------
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 33a55f8..19f158f 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-redhat-lsb-core # not:rhel9
+redhat-lsb-core # not:rhel9,openEuler-22.03
 xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 7697513..b6866de 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -26,9 +26,9 @@
 postgresql-devel  # psycopg2
 psmisc
 python3-devel
-python3-pip
+python3-pip # not:openEuler-22.03
 python3-systemd
-redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
+redhat-rpm-config # not:openEuler-22.03  missing dep for gcc hardening flags, see rhbz#1217376
 tar
 tcpdump
 unzip
diff --git a/files/rpms/nova b/files/rpms/nova
index f2824ee..e0f13b8 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -4,7 +4,7 @@
 genisoimage # not:rhel9 required for config_drive
 iptables
 iputils
-kernel-modules
+kernel-modules # not:openEuler-22.03
 kpartx
 parted
 polkit
diff --git a/files/rpms/swift b/files/rpms/swift
index 7d906aa..49a1833 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -4,4 +4,4 @@
 rsync-daemon
 sqlite
 xfsprogs
-xinetd # not:f35,rhel9
+xinetd # not:f36,rhel9
diff --git a/functions-common b/functions-common
index 92a6678..4eed5d8 100644
--- a/functions-common
+++ b/functions-common
@@ -399,7 +399,7 @@
     elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
         sudo zypper -n install lsb-release
     elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
-        sudo dnf install -y redhat-lsb-core
+        sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb
     else
         die $LINENO "Unable to find or auto-install lsb_release"
     fi
@@ -418,6 +418,9 @@
         os_RELEASE=${VERSION_ID}
         os_CODENAME="n/a"
         os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+    elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then
+        os_VENDOR="Rocky"
+        os_RELEASE=${VERSION_ID}
     else
         _ensure_lsb_release
 
@@ -466,10 +469,13 @@
         "$os_VENDOR" =~ (AlmaLinux) || \
         "$os_VENDOR" =~ (Scientific) || \
         "$os_VENDOR" =~ (OracleServer) || \
+        "$os_VENDOR" =~ (Rocky) || \
         "$os_VENDOR" =~ (Virtuozzo) ]]; then
         # Drop the . release as we assume it's compatible
         # XXX re-evaluate when we get RHEL10
         DISTRO="rhel${os_RELEASE::1}"
+    elif [[ "$os_VENDOR" =~ (openEuler) ]]; then
+        DISTRO="openEuler-$os_RELEASE"
     else
         # We can't make a good choice here.  Setting a sensible DISTRO
         # is part of the problem, but not the major issue -- we really
@@ -513,7 +519,7 @@
 
 
 # Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS, etc).
+# (Fedora, RHEL, CentOS, Rocky, etc).
 # is_fedora
 function is_fedora {
     if [[ -z "$os_VENDOR" ]]; then
@@ -521,8 +527,10 @@
     fi
 
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
+        [ "$os_VENDOR" = "openEuler" ] || \
         [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
         [ "$os_VENDOR" = "RedHatEnterprise" ] || \
+        [ "$os_VENDOR" = "Rocky" ] || \
         [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
         [ "$os_VENDOR" = "AlmaLinux" ] || \
         [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
@@ -570,6 +578,14 @@
     [ "$os_PACKAGE" = "deb" ]
 }
 
+# Determine if current distribution is an openEuler distribution
+# is_openeuler
+function is_openeuler {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    [ "$os_VENDOR" = "openEuler" ]
+}
 # Git Functions
 # =============
 
@@ -875,14 +891,9 @@
 # Usage: get_or_create_domain <name> <description>
 function get_or_create_domain {
     local domain_id
-    # Gets domain id
     domain_id=$(
-        # Gets domain id
-        openstack --os-cloud devstack-system-admin domain show $1 \
-            -f value -c id 2>/dev/null ||
-        # Creates new domain
         openstack --os-cloud devstack-system-admin domain create $1 \
-            --description "$2" \
+            --description "$2" --or-show \
             -f value -c id
     )
     echo $domain_id
@@ -971,29 +982,22 @@
 # Usage: get_or_add_user_project_role <role> <user> <project> [<user_domain> <project_domain>]
 function get_or_add_user_project_role {
     local user_role_id
+    local domain_args
 
     domain_args=$(_get_domain_args $4 $5)
 
-    # Gets user role id
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack --os-cloud devstack-system-admin role add $1 \
+        --user $2 \
+        --project $3 \
+        $domain_args
     user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --user $2 \
         --project $3 \
         $domain_args \
-        | grep '^|\s[a-f0-9]\+' | get_field 1)
-    if [[ -z "$user_role_id" ]]; then
-        # Adds role to user and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --user $2 \
-            --project $3 \
-            $domain_args
-        user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --user $2 \
-            --project $3 \
-            $domain_args \
-            | grep '^|\s[a-f0-9]\+' | get_field 1)
-    fi
+        -c Role -f value)
     echo $user_role_id
 }
 
@@ -1001,23 +1005,18 @@
 # Usage: get_or_add_user_domain_role <role> <user> <domain>
 function get_or_add_user_domain_role {
     local user_role_id
-    # Gets user role id
+
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack --os-cloud devstack-system-admin role add $1 \
+        --user $2 \
+        --domain $3
     user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --user $2 \
         --domain $3 \
-        | grep '^|\s[a-f0-9]\+' | get_field 1)
-    if [[ -z "$user_role_id" ]]; then
-        # Adds role to user and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --user $2 \
-            --domain $3
-        user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --user $2 \
-            --domain $3 \
-            | grep '^|\s[a-f0-9]\+' | get_field 1)
-    fi
+        -c Role -f value)
+
     echo $user_role_id
 }
 
@@ -1056,23 +1055,18 @@
 # Usage: get_or_add_group_project_role <role> <group> <project>
 function get_or_add_group_project_role {
     local group_role_id
-    # Gets group role id
+
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack role add $1 \
+        --group $2 \
+        --project $3
     group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --group $2 \
         --project $3 \
-        -f value)
-    if [[ -z "$group_role_id" ]]; then
-        # Adds role to group and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --group $2 \
-            --project $3
-        group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --group $2 \
-            --project $3 \
-            -f value)
-    fi
+        -f value -c Role)
+
     echo $group_role_id
 }
 
diff --git a/lib/apache b/lib/apache
index 94f3cfc..dd8c9a0 100644
--- a/lib/apache
+++ b/lib/apache
@@ -95,7 +95,7 @@
     # didn't fix Python 3.10 compatibility before release.  Should be
     # fixed in uwsgi 4.9.0; can remove this when packages available
     # or we drop this release
-    elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then
+    elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then
         # Note httpd comes with mod_proxy_uwsgi and it is loaded by
         # default; the mod_proxy_uwsgi package actually conflicts now.
         # See:
diff --git a/lib/cinder b/lib/cinder
index 7dd7539..2424f92 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -43,6 +43,13 @@
 GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
 CINDER_DIR=$DEST/cinder
 
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+    CINDER_MY_IP="$HOST_IPV6"
+else
+    CINDER_MY_IP="$HOST_IP"
+fi
+
+
 # Cinder virtual environment
 if [[ ${USE_VENV} = True ]]; then
     PROJECT_VENV["cinder"]=${CINDER_DIR}.venv
@@ -88,13 +95,32 @@
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
-# Default to lioadm
-CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+
+if [[ -n "$CINDER_ISCSI_HELPER" ]]; then
+    if [[ -z "$CINDER_TARGET_HELPER" ]]; then
+        deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead'
+        CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER"
+    else
+        deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER'
+    fi
+fi
+CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm}
+
+if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then
+    CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'}
+    CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'}
+    CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420}
+else
+    CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'}
+    CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'}
+    CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260}
+fi
+
 
 # EL and SUSE should only use lioadm
 if is_fedora || is_suse; then
-    if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
-        die "lioadm is the only valid Cinder target_helper config on this platform"
+    if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then
+        die "lioadm and nvmet are the only valid Cinder target_helper config on this platform"
     fi
 fi
 
@@ -187,7 +213,7 @@
 function cleanup_cinder {
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
-    if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+    if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
         local targets
         targets=$(sudo tgtadm --op show --mode target)
         if [ $? -ne 0 ]; then
@@ -215,8 +241,14 @@
         else
             stop_service tgtd
         fi
-    else
+    elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
         sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
+    elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+        # If we don't disconnect everything vgremove will block
+        sudo nvme disconnect-all
+        sudo nvmetcli clear
+    else
+        die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER"
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -267,7 +299,7 @@
 
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
-    iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER"
     iniset $CINDER_CONF database connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
@@ -275,11 +307,7 @@
     iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
-    if [[ $SERVICE_IP_VERSION == 6 ]]; then
-        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6"
-    else
-        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
-    fi
+    iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP"
     iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
     iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
     if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
@@ -383,6 +411,9 @@
     if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $CINDER_CONF oslo_policy enforce_scope true
         iniset $CINDER_CONF oslo_policy enforce_new_defaults true
+    else
+        iniset $CINDER_CONF oslo_policy enforce_scope false
+        iniset $CINDER_CONF oslo_policy enforce_new_defaults false
     fi
 }
 
@@ -473,9 +504,9 @@
 function install_cinder {
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
     setup_develop $CINDER_DIR
-    if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
+    if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then
         install_package tgt
-    elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
+    elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then
         if is_ubuntu; then
             # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
             sudo mkdir -p /etc/target
@@ -484,6 +515,43 @@
         else
             install_package targetcli
         fi
+    elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then
+        install_package nvme-cli
+
+        # TODO: Remove manual installation of the dependency when the
+        # requirement is added to nvmetcli:
+        # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html
+        if is_ubuntu; then
+            install_package python3-configshell-fb
+        else
+            install_package python3-configshell
+        fi
+        # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3
+        pip_install git+git://git.infradead.org/users/hch/nvmetcli.git
+
+        sudo modprobe nvmet
+        sudo modprobe nvme-fabrics
+
+        if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+            install_package rdma-core
+            sudo modprobe nvme-rdma
+
+            # Create the Soft-RoCE device over the networking interface
+            local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`}
+            if [[ -z "$iface" ]]; then
+                die $LINENO "Cannot find interface to bind Soft-RoCE"
+            fi
+
+            if ! sudo rdma link | grep $iface ; then
+                sudo rdma link add rxe_$iface type rxe netdev $iface
+            fi
+
+        elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+            sudo modprobe nvme-tcp
+
+        else  # 'nvmet_fc'
+            sudo modprobe nvme-fc
+        fi
     fi
 }
 
@@ -520,7 +588,7 @@
         service_port=$CINDER_SERVICE_PORT_INT
         service_protocol="http"
     fi
-    if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+    if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
         if is_service_enabled c-vol; then
             # Delete any old stack.conf
             sudo rm -f /etc/tgt/conf.d/stack.conf
diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate
index 3ffd9a6..3b9f1d1 100644
--- a/lib/cinder_backends/fake_gate
+++ b/lib/cinder_backends/fake_gate
@@ -50,7 +50,7 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
 
     if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index e03ef14..4286511 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -50,7 +50,10 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
+    iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL"
+    iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT"
+    iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
     iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
 }
diff --git a/lib/databases/mysql b/lib/databases/mysql
index b292da2..e805b3e 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -100,8 +100,13 @@
 
     # Set the root password - only works the first time. For Ubuntu, we already
     # did that with debconf before installing the package, but we still try,
-    # because the package might have been installed already.
-    sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+    # because the package might have been installed already. We don't do this
+    # for Ubuntu 22.04 (jammy) because the authorization model change in
+    # version 10.4 of mariadb. See
+    # https://mariadb.org/authentication-in-mariadb-10-4/
+    if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+        sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+    fi
 
     # In case of Mariadb, giving hostname in arguments causes permission
     # problems as it expects connection through socket
@@ -115,13 +120,21 @@
     # as root so it works only as sudo. To restore old "mysql like" behaviour,
     # we need to change auth plugin for root user
     if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
-        sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
-        sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+        if [[ "$DISTRO" == "jammy" ]]; then
+            # For Ubuntu 22.04 (jammy) we follow the model outlined in
+            # https://mariadb.org/authentication-in-mariadb-10-4/
+            sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');"
+        else
+            sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+            sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+        fi
     fi
-    # Create DB user if it does not already exist
-    sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
-    # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
-    sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+    if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+        # Create DB user if it does not already exist
+        sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+        # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
+        sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+    fi
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
@@ -163,6 +176,16 @@
                 count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
     fi
 
+    if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then
+        iniset -sudo $my_conf mysqld read_buffer_size 64K
+        iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M
+        iniset -sudo $my_conf mysqld thread_stack 192K
+        iniset -sudo $my_conf mysqld thread_cache_size 8
+        iniset -sudo $my_conf mysqld tmp_table_size 8M
+        iniset -sudo $my_conf mysqld sort_buffer_size 8M
+        iniset -sudo $my_conf mysqld max_allowed_packet 8M
+    fi
+
     restart_service $MYSQL_SERVICE_NAME
 }
 
diff --git a/lib/dstat b/lib/dstat
index eb03ae0..870c901 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -40,12 +40,18 @@
     if is_service_enabled peakmem_tracker; then
         die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead"
     fi
+
+    # To enable file_tracker add:
+    #    enable_service file_tracker
+    # to your localrc
+    run_process file_tracker "$TOP_DIR/tools/file_tracker.sh"
 }
 
 # stop_dstat() stop dstat process
 function stop_dstat {
     stop_process dstat
     stop_process memory_tracker
+    stop_process file_tracker
 }
 
 # Restore xtrace
diff --git a/lib/glance b/lib/glance
index ba98f41..041acaf 100644
--- a/lib/glance
+++ b/lib/glance
@@ -436,6 +436,10 @@
         iniset $GLANCE_API_CONF oslo_policy enforce_scope true
         iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
         iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
+    else
+        iniset $GLANCE_API_CONF oslo_policy enforce_scope false
+        iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false
+        iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false
     fi
 }
 
diff --git a/lib/keystone b/lib/keystone
index 80a136f..6cb4aac 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -265,10 +265,15 @@
         iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
         iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
     fi
+
+    iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+
     if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $KEYSTONE_CONF oslo_policy enforce_scope true
         iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
-        iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+    else
+        iniset $KEYSTONE_CONF oslo_policy enforce_scope false
+        iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false
     fi
 }
 
diff --git a/lib/lvm b/lib/lvm
index d3f6bf1..57ffb96 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -130,7 +130,7 @@
     local size=$2
 
     # Start the tgtd service on Fedora and SUSE if tgtadm is used
-    if  is_fedora || is_suse  && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+    if  is_fedora || is_suse  && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then
         start_service tgtd
     fi
 
@@ -138,10 +138,14 @@
     _create_lvm_volume_group $vg $size
 
     # Remove iscsi targets
-    if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then
+    if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
         sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
-    else
+    elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
         sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
+    elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+        # If we don't disconnect everything vgremove will block
+        sudo nvme disconnect-all
+        sudo nvmetcli clear
     fi
     _clean_lvm_volume_group $vg
 }
diff --git a/lib/neutron b/lib/neutron
index b3e3d72..8708bf4 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -1,118 +1,155 @@
 #!/bin/bash
 #
 # lib/neutron
-# Install and start **Neutron** network services
+# functions - functions specific to neutron
 
 # Dependencies:
-#
 # ``functions`` file
 # ``DEST`` must be defined
+# ``STACK_USER`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# - is_XXXX_enabled
-# - install_XXXX
-# - configure_XXXX
-# - init_XXXX
-# - start_XXXX
-# - stop_XXXX
-# - cleanup_XXXX
+# - install_neutron_agent_packages
+# - install_neutronclient
+# - install_neutron
+# - install_neutron_third_party
+# - configure_neutron
+# - init_neutron
+# - configure_neutron_third_party
+# - init_neutron_third_party
+# - start_neutron_third_party
+# - create_nova_conf_neutron
+# - configure_neutron_after_post_config
+# - start_neutron_service_and_check
+# - check_neutron_third_party_integration
+# - start_neutron_agents
+# - create_neutron_initial_network
+#
+# ``unstack.sh`` calls the entry points in this order:
+#
+# - stop_neutron
+# - stop_neutron_third_party
+# - cleanup_neutron
 
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# Functions in lib/neutron are classified into the following categories:
+#
+# - entry points (called from stack.sh or unstack.sh)
+# - internal functions
+# - neutron exercises
+# - 3rd party programs
 
-# Defaults
+
+# Neutron Networking
+# ------------------
+
+# Make sure that neutron is enabled in ``ENABLED_SERVICES``.  If you want
+# to run Neutron on this host, make sure that q-svc is also in
+# ``ENABLED_SERVICES``.
+#
+# See "Neutron Network Configuration" below for additional variables
+# that must be set in localrc for connectivity across hosts with
+# Neutron.
+
+# Settings
 # --------
 
+
+# Neutron Network Configuration
+# -----------------------------
+
+if is_service_enabled tls-proxy; then
+    Q_PROTOCOL="https"
+fi
+
+
 # Set up default directories
 GITDIR["python-neutronclient"]=$DEST/python-neutronclient
 
+
+NEUTRON_DIR=$DEST/neutron
+NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
+
+# Support entry points installation of console scripts
+if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
+    NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
+else
+    NEUTRON_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+NEUTRON_CONF_DIR=/etc/neutron
+NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
+export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
+
 # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
 # - False (default) : Run neutron under Eventlet
 # - True : Run neutron under uwsgi
 # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
 # enough
 NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
-NEUTRON_DIR=$DEST/neutron
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
 
 # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
 # and "enforce_new_defaults" to True in the Neutron's config to enforce usage
 # of the new RBAC policies and scopes.
 NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
 
-NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
-# Distributed Virtual Router (DVR) configuration
-# Can be:
-# - ``legacy``          - No DVR functionality
-# - ``dvr_snat``        - Controller or single node DVR
-# - ``dvr``             - Compute node in multi-node DVR
-# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
-#
-# Default is 'dvr_snat' since it can handle both DVR and legacy routers
-NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat}
+# Agent binaries.  Note, binary paths for other agents are set in per-service
+# scripts in lib/neutron_plugins/services/
+AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
+AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
+AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
 
-NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-NEUTRON_DHCP_BINARY="neutron-dhcp-agent"
+# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
+# loaded from per-plugin  scripts in lib/neutron_plugins/
+Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
+# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE
+Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
+# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_L3_CONF=$Q_L3_CONF_FILE
+Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
 
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
-NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
+# Default name for Neutron database
+Q_DB_NAME=${Q_DB_NAME:-neutron}
+# Default Neutron Plugin
+Q_PLUGIN=${Q_PLUGIN:-ml2}
+# Default Neutron Port
+Q_PORT=${Q_PORT:-9696}
+# Default Neutron Internal Port when using TLS proxy
+Q_PORT_INT=${Q_PORT_INT:-19696}
+# Default Neutron Host
+Q_HOST=${Q_HOST:-$SERVICE_HOST}
+# Default protocol
+Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
+# Default listen address
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
+# Default admin username
+Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
+# Default auth strategy
+Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
+# RHEL's support for namespaces requires using veths with ovs
+Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
+Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
+Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
+# Meta data IP
+Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
+# Allow Overlapping IP among subnets
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
+Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
-NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
-NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
-NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/
-NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
+# Allow to skip stopping of OVN services
+SKIP_STOP_OVN=${SKIP_STOP_OVN:-False}
 
-NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# By default, use the ML2 plugin
-NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
-NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
-NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
-
-NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini}
-NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME
-
-NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
-NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
-NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent}
-NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent}
-
-# Public facing bits
-if is_service_enabled tls-proxy; then
-    NEUTRON_SERVICE_PROTOCOL="https"
-fi
-NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST}
-NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696}
-NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696}
-NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone}
-NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
-NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
-
-# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
-# an external network bridge
-PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
-PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
-
-# Network type - default vxlan, however enables vlan based jobs to override
-# using the legacy environment variable as well as a new variable in greater
-# alignment with the naming scheme of this plugin.
-NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan}
-
-NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}}
-
-# Physical network for VLAN network usage.
-NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
+# /etc/neutron is assumed by many of devstack plugins.  Do not change.
+_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
 
 # The name of the service in the endpoint URL
 NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
@@ -120,9 +157,155 @@
     NEUTRON_ENDPOINT_SERVICE_NAME="networking"
 fi
 
+# List of config file names in addition to the main plugin config file
+# To add additional plugin config files, use ``neutron_server_config_add``
+# utility function.  For example:
+#
+#    ``neutron_server_config_add file1``
+#
+# These config files are relative to ``/etc/neutron``.  The above
+# example would specify ``--config-file /etc/neutron/file1`` for
+# neutron server.
+declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
 
-# Additional neutron api config files
-declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
+# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
+declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
+
+
+Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
+if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+    Q_RR_COMMAND="sudo"
+else
+    NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
+    Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+    fi
+fi
+
+
+# Distributed Virtual Router (DVR) configuration
+# Can be:
+# - ``legacy``   - No DVR functionality
+# - ``dvr_snat`` - Controller or single node DVR
+# - ``dvr``      - Compute node in multi-node DVR
+# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
+#
+Q_DVR_MODE=${Q_DVR_MODE:-legacy}
+if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
+fi
+
+# Provider Network Configurations
+# --------------------------------
+
+# The following variables control the Neutron ML2 plugins' allocation
+# of tenant networks and availability of provider networks. If these
+# are not configured in ``localrc``, tenant networks will be local to
+# the host (with no remote connectivity), and no physical resources
+# will be available for the allocation of provider networks.
+
+# To disable tunnels (GRE or VXLAN) for tenant networks,
+# set to False in ``local.conf``.
+# GRE tunnels are only supported by the openvswitch.
+ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
+
+# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
+# specify the range of IDs from which tenant networks are
+# allocated. Can be overridden in ``localrc`` if necessary.
+TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
+
+# To use VLANs for tenant networks, set to True in localrc. VLANs
+# are supported by the ML2 plugins, requiring additional configuration
+# described below.
+ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+
+# If using VLANs for tenant networks, set in ``localrc`` to specify
+# the range of VLAN VIDs from which tenant networks are
+# allocated. An external network switch must be configured to
+# trunk these VLANs between hosts for multi-host connectivity.
+#
+# Example: ``TENANT_VLAN_RANGE=1000:1999``
+TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
+
+# If using VLANs for tenant networks, or if using flat or VLAN
+# provider networks, set in ``localrc`` to the name of the physical
+# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
+# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
+# agent, as described below.
+#
+# Example: ``PHYSICAL_NETWORK=default``
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
+
+# With the openvswitch agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the OVS bridge to use for the physical network. The
+# bridge will be created if it does not already exist, but a
+# physical interface must be manually added to the bridge as a
+# port for external connectivity.
+#
+# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
+
+# With the linuxbridge agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the network interface to use for the physical
+# network.
+#
+# Example: ``LB_PHYSICAL_INTERFACE=eth1``
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+    LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
+
+# With the openvswitch plugin, set to True in ``localrc`` to enable
+# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+#
+# Example: ``OVS_ENABLE_TUNNELING=True``
+OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
+# Use DHCP agent for providing metadata service in the case of
+# without L3 agent (No Route Agent), set to True in localrc.
+ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
+
+# Add a static route as dhcp option, so the request to 169.254.169.254
+# will be able to reach through a route(DHCP agent)
+# This option require ENABLE_ISOLATED_METADATA = True
+ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
+# Neutron plugin specific functions
+# ---------------------------------
+
+# Please refer to ``lib/neutron_plugins/README.md`` for details.
+if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
+    source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+fi
+
+# Agent metering service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/metering
+
+# L3 Service functions
+source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
+
+# Use security group or not
+if has_neutron_plugin_security_group; then
+    Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
+else
+    Q_USE_SECGROUP=False
+fi
+
+# Save trace setting
+_XTRACE_NEUTRON=$(set +o | grep xtrace)
+set +o xtrace
+
 
 # Functions
 # ---------
@@ -136,310 +319,194 @@
 }
 
 # Test if any Neutron services are enabled
-# is_neutron_enabled
+# TODO(slaweq): this is not really needed now and we should remove it as soon
+# as it will not be called from any other Devstack plugins, like e.g. Neutron
+# plugin
 function is_neutron_legacy_enabled {
-    # first we need to remove all "neutron-" from DISABLED_SERVICES list
-    disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g')
-    [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1
-    [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
-    return 1
+    return 0
 }
 
-if is_neutron_legacy_enabled; then
-    source $TOP_DIR/lib/neutron-legacy
-fi
-
-# cleanup_neutron() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
-    if is_neutron_ovs_base_plugin; then
-        neutron_ovs_base_cleanup
+function _determine_config_server {
+    if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
+        if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
+            deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+        else
+            die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+        fi
     fi
-
-    if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-        neutron_lb_cleanup
+    if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
+        deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated.  Use neutron_server_config_add instead."
     fi
-    # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
-        sudo ip netns delete ${ns}
+    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+        _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
     done
+
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
+        opts+=" --config-file $cfg_file"
+    done
+    echo "$opts"
 }
 
-# configure_root_helper_options() - Configure agent rootwrap helper options
-function configure_root_helper_options {
-    local conffile=$1
-    iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD"
-    iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD"
+function _determine_config_l3 {
+    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
+    echo "$opts"
 }
 
-# configure_neutron() - Set config files, create data dirs, etc
-function configure_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-
-    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
-    configure_neutron_rootwrap
-
-    mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
-
-    # NOTE(yamamoto): A decomposed plugin should prepare the config file in
-    # its devstack plugin.
-    if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then
-        cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
+# For services and agents that require it, dynamically construct a list of
+# --config-file arguments that are passed to the binary.
+function determine_config_files {
+    local opts=""
+    case "$1" in
+        "neutron-server") opts="$(_determine_config_server)" ;;
+        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
+    esac
+    if [ -z "$opts" ] ; then
+        die $LINENO "Could not determine config files for $1."
     fi
+    echo "$opts"
+}
 
-    iniset $NEUTRON_CONF database connection `database_connection_url neutron`
-    iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
-    iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
-    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
-
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
+# configure_neutron()
+# Set common config for all neutron server and agents.
+function configure_neutron {
+    _configure_neutron_common
     iniset_rpc_backend neutron $NEUTRON_CONF
 
-    # Neutron API server & Neutron plugin
-    if is_service_enabled neutron-api; then
-        local policy_file=$NEUTRON_CONF_DIR/policy.json
-        # Allow neutron user to administer neutron to match neutron account
-        # NOTE(amotoki): This is required for nova works correctly with neutron.
-        if [ -f $NEUTRON_DIR/etc/policy.json ]; then
-            cp $NEUTRON_DIR/etc/policy.json $policy_file
-            sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
-        else
-            echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $policy_file
-        fi
-
-        cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
-
-        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
-
-        iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
-        iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
-
-        iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
-        configure_keystone_authtoken_middleware $NEUTRON_CONF neutron
-        configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
-        # Configure tenant network type
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE
-
-        local mech_drivers="openvswitch"
-        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-            mech_drivers+=",l2population"
-        else
-            mech_drivers+=",linuxbridge"
-        fi
-        if [[ "$mech_drivers" == *"linuxbridge"* ]]; then
-            iniset $NEUTRON_CONF experimental linuxbridge True
-        fi
-
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION
-
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
-        if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE}
-        fi
-        if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
-            neutron_ml2_extension_driver_add port_security
-        fi
-        configure_rbac_policies
+    if is_service_enabled q-metering neutron-metering; then
+        _configure_neutron_metering
+    fi
+    if is_service_enabled q-agt neutron-agent; then
+        _configure_neutron_plugin_agent
+    fi
+    if is_service_enabled q-dhcp neutron-dhcp; then
+        _configure_neutron_dhcp_agent
+    fi
+    if is_service_enabled q-l3 neutron-l3; then
+        _configure_neutron_l3_agent
+    fi
+    if is_service_enabled q-meta neutron-metadata-agent; then
+        _configure_neutron_metadata_agent
     fi
 
-    # Neutron OVS or LB agent
-    if is_service_enabled neutron-agent; then
-        iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
-        iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF
+    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+        _configure_dvr
+    fi
+    if is_service_enabled ceilometer; then
+        _configure_neutron_ceilometer_notifications
+    fi
 
-        # Configure the neutron agent
-        if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
-            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP
-        elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
-            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP
+    if [[ $Q_AGENT == "ovn" ]]; then
+        configure_ovn
+        configure_ovn_plugin
+    fi
 
-            if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True
-            fi
-        fi
-
-        if ! running_in_container; then
-            enable_kernel_bridge_firewall
+    # Configure Neutron's advanced services
+    if is_service_enabled q-placement neutron-placement; then
+        configure_placement_extension
+    fi
+    if is_service_enabled q-trunk neutron-trunk; then
+        configure_trunk_extension
+    fi
+    if is_service_enabled q-qos neutron-qos; then
+        configure_qos
+        if is_service_enabled q-l3 neutron-l3; then
+            configure_l3_agent_extension_fip_qos
+            configure_l3_agent_extension_gateway_ip_qos
         fi
     fi
 
-    # DHCP Agent
-    if is_service_enabled neutron-dhcp; then
-        cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
-
-        iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        # make it so we have working DNS from guests
-        iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
-
-        configure_root_helper_options $NEUTRON_DHCP_CONF
-        iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
+    # Finally configure Neutron server and core plugin
+    if is_service_enabled q-agt neutron-agent q-svc neutron-api; then
+        _configure_neutron_service
     fi
 
-    if is_service_enabled neutron-l3; then
-        cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
-        iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        neutron_service_plugin_class_add router
-        configure_root_helper_options $NEUTRON_L3_CONF
-        iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
+    iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
+    # devstack is not a tool for running uber scale OpenStack
+    # clouds, therefore running without a dedicated RPC worker
+    # for state reports is more than adequate.
+    iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
 
-        # Configure the neutron agent to serve external network ports
-        if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
-        else
-            iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
-        fi
-
-        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-            iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE
-        fi
-    fi
-
-    # Metadata
-    if is_service_enabled neutron-metadata-agent; then
-        cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
-
-        iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST
-        iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
-        # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
-        configure_root_helper_options $NEUTRON_META_CONF
-
-        # TODO(dtroyer): remove the v2.0 hard code below
-        iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
-        configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT
-    fi
-
-    # Format logging
-    setup_logging $NEUTRON_CONF
-
-    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
-        # Set the service port for a proxy to take the original
-        iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
-        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
-    fi
-
-    # Metering
-    if is_service_enabled neutron-metering; then
-        cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF
-        neutron_service_plugin_class_add metering
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
     fi
 }
 
-# configure_neutron_rootwrap() - configure Neutron's rootwrap
-function configure_neutron_rootwrap {
-    # Deploy new rootwrap filters files (owned by root).
-    # Wipe any existing rootwrap.d files first
-    if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then
-        sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d
+function configure_neutron_nova {
+    create_nova_conf_neutron $NOVA_CONF
+    if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            local conf
+            conf=$(conductor_conf $i)
+            create_nova_conf_neutron $conf
+        done
     fi
-
-    # Deploy filters to /etc/neutron/rootwrap.d
-    sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
-    sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-
-    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
-    sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR
-    sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf
-
-    # Set up the rootwrap sudoers for Neutron
-    tempfile=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile
-    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile
-    chmod 0440 $tempfile
-    sudo chown root:root $tempfile
-    sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap
 }
 
-# Make Neutron-required changes to nova.conf
-# Takes a single optional argument which is the config file to update,
-# if not passed $NOVA_CONF is used.
-function configure_neutron_nova_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+function create_nova_conf_neutron {
     local conf=${1:-$NOVA_CONF}
     iniset $conf neutron auth_type "password"
     iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf neutron username neutron
+    iniset $conf neutron username "$Q_ADMIN_USERNAME"
     iniset $conf neutron password "$SERVICE_PASSWORD"
-    iniset $conf neutron user_domain_name "Default"
-    iniset $conf neutron project_name "$SERVICE_TENANT_NAME"
-    iniset $conf neutron project_domain_name "Default"
-    iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
+    iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
+    iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
     iniset $conf neutron region_name "$REGION_NAME"
 
     # optionally set options in nova_conf
     neutron_plugin_create_nova_conf $conf
 
-    if is_service_enabled neutron-metadata-agent; then
+    if is_service_enabled q-meta neutron-metadata-agent; then
         iniset $conf neutron service_metadata_proxy "True"
     fi
 
+    iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
+# create_neutron_accounts() - Set up common required neutron accounts
+
 # Tenant               User       Roles
 # ------------------------------------------------------------------
 # service              neutron    admin        # if enabled
 
-# create_neutron_accounts() - Create required service accounts
-function create_neutron_accounts_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+# Migrated from keystone_data.sh
+function create_neutron_accounts {
     local neutron_url
-
     if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/
     else
-        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
     fi
     if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
         neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
     fi
 
-
-    if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
+    if is_service_enabled q-svc neutron-api; then
 
         create_service_user "neutron"
 
-        neutron_service=$(get_or_create_service "neutron" \
-            "network" "Neutron Service")
-        get_or_create_endpoint $neutron_service \
+        get_or_create_service "neutron" "network" "Neutron Service"
+        get_or_create_endpoint \
+            "network" \
             "$REGION_NAME" "$neutron_url"
     fi
 }
 
 # init_neutron() - Initialize databases, etc.
-function init_neutron_new {
-
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    recreate_database neutron
-
+function init_neutron {
+    recreate_database $Q_DB_NAME
     time_start "dbsync"
     # Run Neutron db migrations
-    $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads
+    $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
     time_stop "dbsync"
 }
 
 # install_neutron() - Collect source and prepare
-function install_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
-    setup_develop $NEUTRON_DIR
-
+function install_neutron {
     # Install neutron-lib from git so we make sure we're testing
     # the latest code.
     if use_library_from_git "neutron-lib"; then
@@ -447,17 +514,12 @@
         setup_dev_lib "neutron-lib"
     fi
 
-    # L3 service requires radvd
-    if is_service_enabled neutron-l3; then
-        install_package radvd
-    fi
+    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
+    setup_develop $NEUTRON_DIR
 
-    if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then
-        #TODO(sc68cal) - kind of ugly
-        source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
-        neutron_plugin_install_agent_packages
+    if [[ $Q_AGENT == "ovn" ]]; then
+        install_ovn
     fi
-
 }
 
 # install_neutronclient() - Collect source and prepare
@@ -465,189 +527,33 @@
     if use_library_from_git "python-neutronclient"; then
         git_clone_by_name "python-neutronclient"
         setup_dev_lib "python-neutronclient"
-        sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion
     fi
 }
 
-# start_neutron_api() - Start the API process ahead of other things
-function start_neutron_api {
-    local service_port=$NEUTRON_SERVICE_PORT
-    local service_protocol=$NEUTRON_SERVICE_PROTOCOL
-    local neutron_url
-    if is_service_enabled tls-proxy; then
-        service_port=$NEUTRON_SERVICE_PORT_INT
-        service_protocol="http"
+# install_neutron_agent_packages() - Collect source and prepare
+function install_neutron_agent_packages {
+    # radvd doesn't come with the OS. Install it if the l3 service is enabled.
+    if is_service_enabled q-l3 neutron-l3; then
+        install_package radvd
     fi
-
-    local opts=""
-    opts+=" --config-file $NEUTRON_CONF"
-    opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
-    local cfg_file
-    for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do
-        opts+=" --config-file $cfg_file"
-    done
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
-        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/
-        enable_service neutron-rpc-server
-        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
-    else
-        # Start the Neutron service
-        # TODO(sc68cal) Stop hard coding this
-        run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
-        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/
-        # Start proxy if enabled
-        if is_service_enabled tls-proxy; then
-            start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
-        fi
-    fi
-    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
-        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
-    fi
-
-    if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
-        die $LINENO "neutron-api did not start"
+    # install packages that are specific to plugin agent(s)
+    if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then
+        neutron_plugin_install_agent_packages
     fi
 }
 
-# start_neutron() - Start running processes
-function start_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    # Start up the neutron agents if enabled
-    # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
-    # can resolve the $NEUTRON_AGENT_BINARY
-    if is_service_enabled neutron-agent; then
-        # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files
-        run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
+# Finish neutron configuration
+function configure_neutron_after_post_config {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
+        iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
     fi
-    if is_service_enabled neutron-dhcp; then
-        neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
-        run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF"
-    fi
-    if is_service_enabled neutron-l3; then
-        run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF"
-    fi
-    if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
-        # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
-        # of the code in lib/neutron_plugins/services/l3
-        if type -p neutron_plugin_create_initial_networks > /dev/null; then
-            neutron_plugin_create_initial_networks
-        else
-            # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
-            source $TOP_DIR/lib/neutron_plugins/services/l3
-            # Create the networks using servic
-            create_neutron_initial_network
-        fi
-    fi
-    if is_service_enabled neutron-metadata-agent; then
-        run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF"
-    fi
-
-    if is_service_enabled neutron-metering; then
-        run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
-    fi
-}
-
-# stop_neutron() - Stop running processes
-function stop_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    for serv in neutron-api neutron-agent neutron-l3; do
-        stop_process $serv
-    done
-
-    if is_service_enabled neutron-rpc-server; then
-        stop_process neutron-rpc-server
-    fi
-
-    if is_service_enabled neutron-dhcp; then
-        stop_process neutron-dhcp
-        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
-        [ ! -z "$pid" ] && sudo kill -9 $pid
-    fi
-
-    if is_service_enabled neutron-metadata-agent; then
-        stop_process neutron-metadata-agent
-    fi
-}
-
-# neutron_service_plugin_class_add() - add service plugin class
-function neutron_service_plugin_class_add_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    local service_plugin_class=$1
-    local plugins=""
-
-    plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
-    if [ $plugins ]; then
-        plugins+=","
-    fi
-    plugins+="${service_plugin_class}"
-    iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
-}
-
-function _neutron_ml2_extension_driver_add {
-    local driver=$1
-    local drivers=""
-
-    drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers)
-    if [ $drivers ]; then
-        drivers+=","
-    fi
-    drivers+="${driver}"
-    iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers
-}
-
-function neutron_server_config_add_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
-function neutron_deploy_rootwrap_filters_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    local srcdir=$1
-    sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
-    sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-}
-
-# Dispatch functions
-# These are needed for compatibility between the old and new implementations
-# where there are function name overlaps.  These will be removed when
-# neutron-legacy is removed.
-# TODO(sc68cal) Remove when neutron-legacy is no more.
-function cleanup_neutron {
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        stop_process neutron-api
-        stop_process neutron-rpc-server
-        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
-        sudo rm -f $(apache_site_config_for neutron-api)
-    fi
-
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        cleanup_mutnauq "$@"
-    else
-        cleanup_neutron_new "$@"
-    fi
-}
-
-function configure_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        configure_mutnauq "$@"
-    else
-        configure_neutron_new "$@"
-    fi
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
-    fi
+    configure_rbac_policies
 }
 
 # configure_rbac_policies() - Configure Neutron to enforce new RBAC
 # policies and scopes if NEUTRON_ENFORCE_SCOPE == True
 function configure_rbac_policies {
-    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
         iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
         iniset $NEUTRON_CONF oslo_policy enforce_scope True
     else
@@ -656,120 +562,595 @@
     fi
 }
 
-
-function configure_neutron_nova {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        create_nova_conf_neutron $NOVA_CONF
-        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
-            for i in $(seq 1 $NOVA_NUM_CELLS); do
-                local conf
-                conf=$(conductor_conf $i)
-                create_nova_conf_neutron $conf
-            done
-        fi
-    else
-        configure_neutron_nova_new $NOVA_CONF
-        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
-            for i in $(seq 1 $NOVA_NUM_CELLS); do
-                local conf
-                conf=$(conductor_conf $i)
-                configure_neutron_nova_new $conf
-            done
+# Start running OVN processes
+function start_ovn_services {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        init_ovn
+        start_ovn
+        if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
+            if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
+                echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
+                echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
+            else
+                create_public_bridge
+            fi
         fi
     fi
 }
 
-function create_neutron_accounts {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        create_mutnauq_accounts "$@"
-    else
-        create_neutron_accounts_new "$@"
-    fi
-}
+# Start running processes
+function start_neutron_service_and_check {
+    local service_port=$Q_PORT
+    local service_protocol=$Q_PROTOCOL
+    local cfg_file_options
+    local neutron_url
 
-function init_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        init_mutnauq "$@"
-    else
-        init_neutron_new "$@"
-    fi
-}
+    cfg_file_options="$(determine_config_files neutron-server)"
 
-function install_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        install_mutnauq "$@"
-    else
-        install_neutron_new "$@"
+    if is_service_enabled tls-proxy; then
+        service_port=$Q_PORT_INT
+        service_protocol="http"
     fi
-}
+    # Start the Neutron service
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        enable_service neutron-api
+        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$Q_PROTOCOL://$Q_HOST/
+        enable_service neutron-rpc-server
+        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+    else
+        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+        neutron_url=$service_protocol://$Q_HOST:$service_port/
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+        fi
+    fi
+    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+    fi
+    echo "Waiting for Neutron to start..."
 
-function neutron_service_plugin_class_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_service_plugin_class_add "$@"
-    else
-        neutron_service_plugin_class_add_new "$@"
-    fi
-}
-
-function neutron_ml2_extension_driver_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_ml2_extension_driver_add_old "$@"
-    else
-        _neutron_ml2_extension_driver_add "$@"
-    fi
-}
-
-function install_neutron_agent_packages {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        install_neutron_agent_packages_mutnauq "$@"
-    else
-        :
-    fi
-}
-
-function neutron_server_config_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        mutnauq_server_config_add "$@"
-    else
-        neutron_server_config_add_new "$@"
-    fi
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
+    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
 }
 
 function start_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        start_mutnauq_l2_agent "$@"
-        start_mutnauq_other_agents "$@"
-    else
-        start_neutron_new "$@"
+    start_l2_agent "$@"
+    start_other_agents "$@"
+}
+
+# Control of the l2 agent is separated out to make it easier to test partial
+# upgrades (everything upgraded except the L2 agent)
+function start_l2_agent {
+    run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+
+    if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
+        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
+        sudo ip link set $OVS_PHYSICAL_BRIDGE up
+        sudo ip link set br-int up
+        sudo ip link set $PUBLIC_INTERFACE up
+        if is_ironic_hardware; then
+            for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
+                sudo ip addr del $IP dev $PUBLIC_INTERFACE
+                sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
+            done
+            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        fi
     fi
 }
 
+function start_other_agents {
+    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
+
+    run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
+
+    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
+    run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+}
+
+# Start running processes, including screen
+function start_neutron_agents {
+    # NOTE(slaweq): it's now just a wrapper for start_neutron function
+    start_neutron "$@"
+}
+
+function stop_l2_agent {
+    stop_process q-agt
+}
+
+# stop_other() - Stop running processes
+function stop_other {
+    if is_service_enabled q-dhcp neutron-dhcp; then
+        stop_process q-dhcp
+        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
+        [ ! -z "$pid" ] && sudo kill -9 $pid
+    fi
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-rpc-server
+        stop_process neutron-api
+    else
+        stop_process q-svc
+    fi
+
+    if is_service_enabled q-l3 neutron-l3; then
+        sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
+        stop_process q-l3
+    fi
+
+    if is_service_enabled q-meta neutron-metadata-agent; then
+        stop_process q-meta
+    fi
+
+    if is_service_enabled q-metering neutron-metering; then
+        neutron_metering_stop
+    fi
+
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || :
+    fi
+}
+
+# stop_neutron() - Stop running processes (non-screen)
 function stop_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        stop_mutnauq "$@"
-    else
-        stop_neutron_new "$@"
+    stop_other
+    stop_l2_agent
+
+    if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then
+        stop_ovn
     fi
 }
 
-function neutron_deploy_rootwrap_filters {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_deploy_rootwrap_filters "$@"
+# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
+# on startup, or back to the public interface on cleanup. If no IP is
+# configured on the interface, just add it as a port to the OVS bridge.
+function _move_neutron_addresses_route {
+    local from_intf=$1
+    local to_intf=$2
+    local add_ovs_port=$3
+    local del_ovs_port=$4
+    local af=$5
+
+    if [[ -n "$from_intf" && -n "$to_intf" ]]; then
+        # Remove the primary IP address from $from_intf and add it to $to_intf,
+        # along with the default route, if it exists.  Also, when called
+        # on configure we will also add $from_intf as a port on $to_intf,
+        # assuming it is an OVS bridge.
+
+        local IP_REPLACE=""
+        local IP_DEL=""
+        local IP_UP=""
+        local DEFAULT_ROUTE_GW
+        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
+        local ADD_OVS_PORT=""
+        local DEL_OVS_PORT=""
+        local ARP_CMD=""
+
+        IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
+
+        if [ "$DEFAULT_ROUTE_GW" != "" ]; then
+            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
+        fi
+
+        if [[ "$add_ovs_port" == "True" ]]; then
+            ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
+        fi
+
+        if [[ "$del_ovs_port" == "True" ]]; then
+            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
+        fi
+
+        if [[ "$IP_BRD" != "" ]]; then
+            IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
+            IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
+            IP_UP="sudo ip link set $to_intf up"
+            if [[ "$af" == "inet" ]]; then
+                IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
+                ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
+            fi
+        fi
+
+        # The add/del OVS port calls have to happen either before or
+        # after the address is moved in order to not leave it orphaned.
+        $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
+    fi
+}
+
+# _configure_public_network_connectivity() - Configures connectivity to the
+# external network using $PUBLIC_INTERFACE or NAT on the single interface
+# machines
+function _configure_public_network_connectivity {
+    # If we've given a PUBLIC_INTERFACE to take over, then we assume
+    # that we can own the whole thing, and privot it into the OVS
+    # bridge. If we are not, we're probably on a single interface
+    # machine, and we just setup NAT so that fixed guests can get out.
+    if [[ -n "$PUBLIC_INTERFACE" ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+
+        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+        fi
     else
-        neutron_deploy_rootwrap_filters_new "$@"
+        for d in $default_v4_route_devs; do
+            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+        done
+    fi
+}
+
+# cleanup_neutron() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_neutron {
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-api
+        stop_process neutron-rpc-server
+        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+        sudo rm -f $(apache_site_config_for neutron-api)
+    fi
+
+    if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
+
+        if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+            # ip(8) wants the prefix length when deleting
+            local v6_gateway
+            v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
+            sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
+            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
+        fi
+
+        if is_provider_network && is_ironic_hardware; then
+            for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+                sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+                sudo ip addr add $IP dev $PUBLIC_INTERFACE
+            done
+            sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        fi
+    fi
+
+    if is_neutron_ovs_base_plugin; then
+        neutron_ovs_base_cleanup
+    fi
+
+    if [[ $Q_AGENT == "linuxbridge" ]]; then
+        neutron_lb_cleanup
+    fi
+
+    # delete all namespaces created by neutron
+    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
+        sudo ip netns delete ${ns}
+    done
+
+    if [[ $Q_AGENT == "ovn" ]]; then
+        cleanup_ovn
+    fi
+}
+
+
+function _create_neutron_conf_dir {
+    # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
+    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
+}
+
+# _configure_neutron_common()
+# Set common config for all neutron server and agents.
+# This MUST be called before other ``_configure_neutron_*`` functions.
+function _configure_neutron_common {
+    _create_neutron_conf_dir
+
+    # Uses oslo config generator to generate core sample configuration files
+    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
+
+    Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
+
+    # allow neutron user to administer neutron to match neutron account
+    # NOTE(amotoki): This is required for nova works correctly with neutron.
+    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    else
+        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+    fi
+
+    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
+    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
+    neutron_plugin_configure_common
+
+    if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
+        die $LINENO "Neutron plugin not set.. exiting"
+    fi
+
+    # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
+    mkdir -p /$Q_PLUGIN_CONF_PATH
+    Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+    # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository,
+    # it was previously defined in the lib/neutron module which is now deleted.
+    NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE
+    # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
+    # there is no config file in Neutron tree. They should prepare the file in each plugin.
+    if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
+        cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
+    elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
+        cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+    fi
+
+    iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
+    iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
+    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
+    iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
+    iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
+
+    # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
+    iniset $NEUTRON_CONF nova region_name $REGION_NAME
+
+    if [ "$VIRT_DRIVER" = 'fake' ]; then
+        # Disable arbitrary limits
+        iniset $NEUTRON_CONF quotas quota_network -1
+        iniset $NEUTRON_CONF quotas quota_subnet -1
+        iniset $NEUTRON_CONF quotas quota_port -1
+        iniset $NEUTRON_CONF quotas quota_security_group -1
+        iniset $NEUTRON_CONF quotas quota_security_group_rule -1
+    fi
+
+    # Format logging
+    setup_logging $NEUTRON_CONF
+
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
+        # Set the service port for a proxy to take the original
+        iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
+    fi
+
+    _neutron_setup_rootwrap
+}
+
+function _configure_neutron_dhcp_agent {
+
+    cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
+
+    iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    # make it so we have working DNS from guests
+    iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
+    configure_root_helper_options $Q_DHCP_CONF_FILE
+
+    if ! is_service_enabled q-l3 neutron-l3; then
+        if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
+            iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
+            iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
+        else
+            if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
+                die "$LINENO" "Enable isolated metadata is a must for metadata network"
+            fi
+        fi
+    fi
+
+    _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
+
+    neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
+}
+
+
+function _configure_neutron_metadata_agent {
+    cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
+
+    iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
+    iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
+    configure_root_helper_options $Q_META_CONF_FILE
+}
+
+function _configure_neutron_ceilometer_notifications {
+    iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
+}
+
+function _configure_neutron_metering {
+    neutron_agent_metering_configure_common
+    neutron_agent_metering_configure_agent
+}
+
+function _configure_dvr {
+    iniset $NEUTRON_CONF DEFAULT router_distributed True
+    iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
+}
+
+
+# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
+# It is called when q-agt is enabled.
+function _configure_neutron_plugin_agent {
+    # Specify the default root helper prior to agent configuration to
+    # ensure that an agent's configuration can override the default
+    configure_root_helper_options /$Q_PLUGIN_CONF_FILE
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+
+    # Configure agent for plugin
+    neutron_plugin_configure_plugin_agent
+}
+
+function _replace_api_paste_composite {
+    local sep
+    sep=$(echo -ne "\x01")
+    # Replace it
+    $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE"
+    $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE"
+    $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE"
+}
+
+# _configure_neutron_service() - Set config files for neutron service
+# It is called when q-svc is enabled.
+function _configure_neutron_service {
+    Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
+    cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+
+    if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+        _replace_api_paste_composite
+    fi
+
+    # Update either configuration file with plugin
+    iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
+
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
+
+    iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
+    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
+
+    # Configuration for neutron notifications to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
+
+    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
+
+    # Configuration for placement client
+    configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
+
+    # Configure plugin
+    neutron_plugin_configure_service
+}
+
+# Utility Functions
+#------------------
+
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add {
+    local service_plugin_class=$1
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
+    elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
+    fi
+}
+
+# neutron_ml2_extension_driver_add() - add ML2 extension driver
+function neutron_ml2_extension_driver_add {
+    local extension=$1
+    if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
+        Q_ML2_PLUGIN_EXT_DRIVERS=$extension
+    elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
+        Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
+    fi
+}
+
+# neutron_server_config_add() - add server config file
+function neutron_server_config_add {
+    _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
+}
+
+# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
+function neutron_deploy_rootwrap_filters {
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        return
+    fi
+    local srcdir=$1
+    sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
+    sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
+}
+
+# _neutron_setup_rootwrap() - configure Neutron's rootwrap
+function _neutron_setup_rootwrap {
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        return
+    fi
+    # Wipe any existing ``rootwrap.d`` files first
+    Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
+    if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
+        sudo rm -rf $Q_CONF_ROOTWRAP_D
+    fi
+
+    neutron_deploy_rootwrap_filters $NEUTRON_DIR
+
+    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
+    # location moved in newer versions, prefer new location
+    if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
+        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
+    else
+        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+    fi
+    sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
+    sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
+
+    # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
+    ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
+    ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+
+    # Set up the rootwrap sudoers for neutron
+    TEMPFILE=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
+    chmod 0440 $TEMPFILE
+    sudo chown root:root $TEMPFILE
+    sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
+
+    # Update the root_helper
+    configure_root_helper_options $NEUTRON_CONF
+}
+
+function configure_root_helper_options {
+    local conffile=$1
+    iniset $conffile agent root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
+}
+
+function _neutron_setup_interface_driver {
+
+    # ovs_use_veth needs to be set before the plugin configuration
+    # occurs to allow plugins to override the setting.
+    iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
+    neutron_plugin_setup_interface_driver $1
+}
+# Functions for Neutron Exercises
+#--------------------------------
+
+function delete_probe {
+    local from_net="$1"
+    net_id=`_get_net_id $from_net`
+    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
+    neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
+}
+
+function _get_net_id {
+    openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
+}
+
+function _get_probe_cmd_prefix {
+    local from_net="$1"
+    net_id=`_get_net_id $from_net`
+    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
+    echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
+}
+
+# ssh check
+function _ssh_check_neutron {
+    local from_net=$1
+    local key_file=$2
+    local ip=$3
+    local user=$4
+    local timeout_sec=$5
+    local probe_cmd = ""
+    probe_cmd=`_get_probe_cmd_prefix $from_net`
+    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
+    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
+}
+
+function plugin_agent_add_l2_agent_extension {
+    local l2_agent_extension=$1
+    if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+        L2_AGENT_EXTENSIONS=$l2_agent_extension
+    elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+        L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
     fi
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_NEUTRON
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index baf67f2..e90400f 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -1,1097 +1,6 @@
 #!/bin/bash
-#
-# lib/neutron
-# functions - functions specific to neutron
 
-# Dependencies:
-# ``functions`` file
-# ``DEST`` must be defined
-# ``STACK_USER`` must be defined
+# TODO(slaweq): remove that file when other projects, like e.g. Grenade will
+# be using lib/neutron
 
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_neutron_agent_packages
-# - install_neutronclient
-# - install_neutron
-# - install_neutron_third_party
-# - configure_neutron
-# - init_neutron
-# - configure_neutron_third_party
-# - init_neutron_third_party
-# - start_neutron_third_party
-# - create_nova_conf_neutron
-# - configure_neutron_after_post_config
-# - start_neutron_service_and_check
-# - check_neutron_third_party_integration
-# - start_neutron_agents
-# - create_neutron_initial_network
-#
-# ``unstack.sh`` calls the entry points in this order:
-#
-# - stop_neutron
-# - stop_neutron_third_party
-# - cleanup_neutron
-
-# Functions in lib/neutron are classified into the following categories:
-#
-# - entry points (called from stack.sh or unstack.sh)
-# - internal functions
-# - neutron exercises
-# - 3rd party programs
-
-
-# Neutron Networking
-# ------------------
-
-# Make sure that neutron is enabled in ``ENABLED_SERVICES``.  If you want
-# to run Neutron on this host, make sure that q-svc is also in
-# ``ENABLED_SERVICES``.
-#
-# See "Neutron Network Configuration" below for additional variables
-# that must be set in localrc for connectivity across hosts with
-# Neutron.
-
-# Settings
-# --------
-
-
-# Neutron Network Configuration
-# -----------------------------
-
-if is_service_enabled tls-proxy; then
-    Q_PROTOCOL="https"
-fi
-
-
-# Set up default directories
-GITDIR["python-neutronclient"]=$DEST/python-neutronclient
-
-
-NEUTRON_DIR=$DEST/neutron
-NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-
-# Support entry points installation of console scripts
-if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
-    NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
-else
-    NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
-
-# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
-# - False (default) : Run neutron under Eventlet
-# - True : Run neutron under uwsgi
-# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
-# enough
-NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
-# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
-
-# Agent binaries.  Note, binary paths for other agents are set in per-service
-# scripts in lib/neutron_plugins/services/
-AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
-AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
-AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
-
-# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
-# loaded from per-plugin  scripts in lib/neutron_plugins/
-Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
-Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
-
-# Default name for Neutron database
-Q_DB_NAME=${Q_DB_NAME:-neutron}
-# Default Neutron Plugin
-Q_PLUGIN=${Q_PLUGIN:-ml2}
-# Default Neutron Port
-Q_PORT=${Q_PORT:-9696}
-# Default Neutron Internal Port when using TLS proxy
-Q_PORT_INT=${Q_PORT_INT:-19696}
-# Default Neutron Host
-Q_HOST=${Q_HOST:-$SERVICE_HOST}
-# Default protocol
-Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
-# Default listen address
-Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
-# Default admin username
-Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
-# Default auth strategy
-Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# RHEL's support for namespaces requires using veths with ovs
-Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
-Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
-Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
-# Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
-# Allow Overlapping IP among subnets
-Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
-Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
-VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
-VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
-
-# Allow to skip stopping of OVN services
-SKIP_STOP_OVN=${SKIP_STOP_OVN:-False}
-
-# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
-# /etc/neutron is assumed by many of devstack plugins.  Do not change.
-_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
-
-# The name of the service in the endpoint URL
-NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
-if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
-    NEUTRON_ENDPOINT_SERVICE_NAME="networking"
-fi
-
-# List of config file names in addition to the main plugin config file
-# To add additional plugin config files, use ``neutron_server_config_add``
-# utility function.  For example:
-#
-#    ``neutron_server_config_add file1``
-#
-# These config files are relative to ``/etc/neutron``.  The above
-# example would specify ``--config-file /etc/neutron/file1`` for
-# neutron server.
-declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
-
-# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
-declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
-
-
-Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-    Q_RR_COMMAND="sudo"
-else
-    NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-    Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
-    fi
-fi
-
-
-# Distributed Virtual Router (DVR) configuration
-# Can be:
-# - ``legacy``   - No DVR functionality
-# - ``dvr_snat`` - Controller or single node DVR
-# - ``dvr``      - Compute node in multi-node DVR
-#
-Q_DVR_MODE=${Q_DVR_MODE:-legacy}
-if [[ "$Q_DVR_MODE" != "legacy" ]]; then
-    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
-fi
-
-# Provider Network Configurations
-# --------------------------------
-
-# The following variables control the Neutron ML2 plugins' allocation
-# of tenant networks and availability of provider networks. If these
-# are not configured in ``localrc``, tenant networks will be local to
-# the host (with no remote connectivity), and no physical resources
-# will be available for the allocation of provider networks.
-
-# To disable tunnels (GRE or VXLAN) for tenant networks,
-# set to False in ``local.conf``.
-# GRE tunnels are only supported by the openvswitch.
-ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
-
-# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
-# specify the range of IDs from which tenant networks are
-# allocated. Can be overridden in ``localrc`` if necessary.
-TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
-
-# To use VLANs for tenant networks, set to True in localrc. VLANs
-# are supported by the ML2 plugins, requiring additional configuration
-# described below.
-ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
-# If using VLANs for tenant networks, set in ``localrc`` to specify
-# the range of VLAN VIDs from which tenant networks are
-# allocated. An external network switch must be configured to
-# trunk these VLANs between hosts for multi-host connectivity.
-#
-# Example: ``TENANT_VLAN_RANGE=1000:1999``
-TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
-# If using VLANs for tenant networks, or if using flat or VLAN
-# provider networks, set in ``localrc`` to the name of the physical
-# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
-# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
-# agent, as described below.
-#
-# Example: ``PHYSICAL_NETWORK=default``
-PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
-
-# With the openvswitch agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the OVS bridge to use for the physical network. The
-# bridge will be created if it does not already exist, but a
-# physical interface must be manually added to the bridge as a
-# port for external connectivity.
-#
-# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
-
-# With the linuxbridge agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the network interface to use for the physical
-# network.
-#
-# Example: ``LB_PHYSICAL_INTERFACE=eth1``
-if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
-    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
-    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
-    LB_PHYSICAL_INTERFACE=$default_route_dev
-fi
-
-# With the openvswitch plugin, set to True in ``localrc`` to enable
-# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
-#
-# Example: ``OVS_ENABLE_TUNNELING=True``
-OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
-
-# Use DHCP agent for providing metadata service in the case of
-# without L3 agent (No Route Agent), set to True in localrc.
-ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
-
-# Add a static route as dhcp option, so the request to 169.254.169.254
-# will be able to reach through a route(DHCP agent)
-# This option require ENABLE_ISOLATED_METADATA = True
-ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
-# Neutron plugin specific functions
-# ---------------------------------
-
-# Please refer to ``lib/neutron_plugins/README.md`` for details.
-if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
-    source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
-fi
-
-# Agent metering service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/metering
-
-# L3 Service functions
-source $TOP_DIR/lib/neutron_plugins/services/l3
-
-# Additional Neutron service plugins
-source $TOP_DIR/lib/neutron_plugins/services/placement
-source $TOP_DIR/lib/neutron_plugins/services/trunk
-source $TOP_DIR/lib/neutron_plugins/services/qos
-
-# Use security group or not
-if has_neutron_plugin_security_group; then
-    Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
-else
-    Q_USE_SECGROUP=False
-fi
-
-# Save trace setting
-_XTRACE_NEUTRON=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Functions
-# ---------
-
-function _determine_config_server {
-    if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
-        if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
-            deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
-        else
-            die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
-        fi
-    fi
-    if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
-        deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated.  Use neutron_server_config_add instead."
-    fi
-    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
-        _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
-    done
-
-    local cfg_file
-    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
-        opts+=" --config-file $cfg_file"
-    done
-    echo "$opts"
-}
-
-function _determine_config_l3 {
-    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
-    echo "$opts"
-}
-
-# For services and agents that require it, dynamically construct a list of
-# --config-file arguments that are passed to the binary.
-function determine_config_files {
-    local opts=""
-    case "$1" in
-        "neutron-server") opts="$(_determine_config_server)" ;;
-        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
-    esac
-    if [ -z "$opts" ] ; then
-        die $LINENO "Could not determine config files for $1."
-    fi
-    echo "$opts"
-}
-
-# configure_mutnauq()
-# Set common config for all neutron server and agents.
-function configure_mutnauq {
-    _configure_neutron_common
-    iniset_rpc_backend neutron $NEUTRON_CONF
-
-    if is_service_enabled q-metering; then
-        _configure_neutron_metering
-    fi
-    if is_service_enabled q-agt q-svc; then
-        _configure_neutron_service
-    fi
-    if is_service_enabled q-agt; then
-        _configure_neutron_plugin_agent
-    fi
-    if is_service_enabled q-dhcp; then
-        _configure_neutron_dhcp_agent
-    fi
-    if is_service_enabled q-l3; then
-        _configure_neutron_l3_agent
-    fi
-    if is_service_enabled q-meta; then
-        _configure_neutron_metadata_agent
-    fi
-
-    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
-        _configure_dvr
-    fi
-    if is_service_enabled ceilometer; then
-        _configure_neutron_ceilometer_notifications
-    fi
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        configure_ovn
-        configure_ovn_plugin
-    fi
-
-    # Configure Neutron's advanced services
-    if is_service_enabled q-placement neutron-placement; then
-        configure_placement_extension
-    fi
-    if is_service_enabled q-trunk neutron-trunk; then
-        configure_trunk_extension
-    fi
-    if is_service_enabled q-qos neutron-qos; then
-        configure_qos
-        if is_service_enabled q-l3 neutron-l3; then
-            configure_l3_agent_extension_fip_qos
-            configure_l3_agent_extension_gateway_ip_qos
-        fi
-    fi
-
-    iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
-    # devstack is not a tool for running uber scale OpenStack
-    # clouds, therefore running without a dedicated RPC worker
-    # for state reports is more than adequate.
-    iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
-}
-
-function create_nova_conf_neutron {
-    local conf=${1:-$NOVA_CONF}
-    iniset $conf neutron auth_type "password"
-    iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf neutron username "$Q_ADMIN_USERNAME"
-    iniset $conf neutron password "$SERVICE_PASSWORD"
-    iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
-    iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
-    iniset $conf neutron region_name "$REGION_NAME"
-
-    # optionally set options in nova_conf
-    neutron_plugin_create_nova_conf $conf
-
-    if is_service_enabled q-meta; then
-        iniset $conf neutron service_metadata_proxy "True"
-    fi
-
-    iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
-    iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
-}
-
-# create_mutnauq_accounts() - Set up common required neutron accounts
-
-# Tenant               User       Roles
-# ------------------------------------------------------------------
-# service              neutron    admin        # if enabled
-
-# Migrated from keystone_data.sh
-function create_mutnauq_accounts {
-    local neutron_url
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/
-    else
-        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
-    fi
-    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
-        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
-    fi
-
-    if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-
-        create_service_user "neutron"
-
-        get_or_create_service "neutron" "network" "Neutron Service"
-        get_or_create_endpoint \
-            "network" \
-            "$REGION_NAME" "$neutron_url"
-    fi
-}
-
-# init_mutnauq() - Initialize databases, etc.
-function init_mutnauq {
-    recreate_database $Q_DB_NAME
-    time_start "dbsync"
-    # Run Neutron db migrations
-    $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-    time_stop "dbsync"
-}
-
-# install_mutnauq() - Collect source and prepare
-function install_mutnauq {
-    # Install neutron-lib from git so we make sure we're testing
-    # the latest code.
-    if use_library_from_git "neutron-lib"; then
-        git_clone_by_name "neutron-lib"
-        setup_dev_lib "neutron-lib"
-    fi
-
-    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
-    setup_develop $NEUTRON_DIR
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        install_ovn
-    fi
-}
-
-# install_neutron_agent_packages() - Collect source and prepare
-function install_neutron_agent_packages_mutnauq {
-    # radvd doesn't come with the OS. Install it if the l3 service is enabled.
-    if is_service_enabled q-l3; then
-        install_package radvd
-    fi
-    # install packages that are specific to plugin agent(s)
-    if is_service_enabled q-agt q-dhcp q-l3; then
-        neutron_plugin_install_agent_packages
-    fi
-}
-
-# Finish neutron configuration
-function configure_neutron_after_post_config {
-    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
-        iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
-    fi
-    configure_rbac_policies
-}
-
-# configure_rbac_policies() - Configure Neutron to enforce new RBAC
-# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
-function configure_rbac_policies {
-    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
-        iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
-        iniset $NEUTRON_CONF oslo_policy enforce_scope True
-    else
-        iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
-        iniset $NEUTRON_CONF oslo_policy enforce_scope False
-    fi
-}
-
-# Start running OVN processes
-function start_ovn_services {
-    if [[ $Q_AGENT == "ovn" ]]; then
-        init_ovn
-        start_ovn
-        if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
-            if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
-                echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
-                echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
-            else
-                create_public_bridge
-            fi
-        fi
-    fi
-}
-
-# Start running processes
-function start_neutron_service_and_check {
-    local service_port=$Q_PORT
-    local service_protocol=$Q_PROTOCOL
-    local cfg_file_options
-    local neutron_url
-
-    cfg_file_options="$(determine_config_files neutron-server)"
-
-    if is_service_enabled tls-proxy; then
-        service_port=$Q_PORT_INT
-        service_protocol="http"
-    fi
-    # Start the Neutron service
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        enable_service neutron-api
-        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
-        neutron_url=$Q_PROTOCOL://$Q_HOST/
-        enable_service neutron-rpc-server
-        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
-    else
-        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
-        neutron_url=$service_protocol://$Q_HOST:$service_port/
-        # Start proxy if enabled
-        if is_service_enabled tls-proxy; then
-            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
-        fi
-    fi
-    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
-        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
-    fi
-    echo "Waiting for Neutron to start..."
-
-    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
-    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-}
-
-# Control of the l2 agent is separated out to make it easier to test partial
-# upgrades (everything upgraded except the L2 agent)
-function start_mutnauq_l2_agent {
-    run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-
-    if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
-        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
-        sudo ip link set $OVS_PHYSICAL_BRIDGE up
-        sudo ip link set br-int up
-        sudo ip link set $PUBLIC_INTERFACE up
-        if is_ironic_hardware; then
-            for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
-                sudo ip addr del $IP dev $PUBLIC_INTERFACE
-                sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
-            done
-            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
-        fi
-    fi
-}
-
-function start_mutnauq_other_agents {
-    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
-
-    run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
-
-    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
-    run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
-}
-
-# Start running processes, including screen
-function start_neutron_agents {
-    # Start up the neutron agents if enabled
-    start_mutnauq_l2_agent
-    start_mutnauq_other_agents
-}
-
-function stop_mutnauq_l2_agent {
-    stop_process q-agt
-}
-
-# stop_mutnauq_other() - Stop running processes
-function stop_mutnauq_other {
-    if is_service_enabled q-dhcp; then
-        stop_process q-dhcp
-        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
-        [ ! -z "$pid" ] && sudo kill -9 $pid
-    fi
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        stop_process neutron-rpc-server
-        stop_process neutron-api
-    else
-        stop_process q-svc
-    fi
-
-    if is_service_enabled q-l3; then
-        sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
-        stop_process q-l3
-    fi
-
-    if is_service_enabled q-meta; then
-        stop_process q-meta
-    fi
-
-    if is_service_enabled q-metering; then
-        neutron_metering_stop
-    fi
-
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || :
-    fi
-}
-
-# stop_neutron() - Stop running processes (non-screen)
-function stop_mutnauq {
-    stop_mutnauq_other
-    stop_mutnauq_l2_agent
-
-    if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then
-        stop_ovn
-    fi
-}
-
-# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
-# on startup, or back to the public interface on cleanup. If no IP is
-# configured on the interface, just add it as a port to the OVS bridge.
-function _move_neutron_addresses_route {
-    local from_intf=$1
-    local to_intf=$2
-    local add_ovs_port=$3
-    local del_ovs_port=$4
-    local af=$5
-
-    if [[ -n "$from_intf" && -n "$to_intf" ]]; then
-        # Remove the primary IP address from $from_intf and add it to $to_intf,
-        # along with the default route, if it exists.  Also, when called
-        # on configure we will also add $from_intf as a port on $to_intf,
-        # assuming it is an OVS bridge.
-
-        local IP_REPLACE=""
-        local IP_DEL=""
-        local IP_UP=""
-        local DEFAULT_ROUTE_GW
-        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
-        local ADD_OVS_PORT=""
-        local DEL_OVS_PORT=""
-        local ARP_CMD=""
-
-        IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
-
-        if [ "$DEFAULT_ROUTE_GW" != "" ]; then
-            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
-        fi
-
-        if [[ "$add_ovs_port" == "True" ]]; then
-            ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
-        fi
-
-        if [[ "$del_ovs_port" == "True" ]]; then
-            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
-        fi
-
-        if [[ "$IP_BRD" != "" ]]; then
-            IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
-            IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
-            IP_UP="sudo ip link set $to_intf up"
-            if [[ "$af" == "inet" ]]; then
-                IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
-                ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
-            fi
-        fi
-
-        # The add/del OVS port calls have to happen either before or
-        # after the address is moved in order to not leave it orphaned.
-        $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
-    fi
-}
-
-# _configure_public_network_connectivity() - Configures connectivity to the
-# external network using $PUBLIC_INTERFACE or NAT on the single interface
-# machines
-function _configure_public_network_connectivity {
-    # If we've given a PUBLIC_INTERFACE to take over, then we assume
-    # that we can own the whole thing, and privot it into the OVS
-    # bridge. If we are not, we're probably on a single interface
-    # machine, and we just setup NAT so that fixed guests can get out.
-    if [[ -n "$PUBLIC_INTERFACE" ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
-
-        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
-        fi
-    else
-        for d in $default_v4_route_devs; do
-            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
-        done
-    fi
-}
-
-# cleanup_mutnauq() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_mutnauq {
-
-    if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
-        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
-
-        if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
-            # ip(8) wants the prefix length when deleting
-            local v6_gateway
-            v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
-            sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
-            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
-        fi
-
-        if is_provider_network && is_ironic_hardware; then
-            for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
-                sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
-                sudo ip addr add $IP dev $PUBLIC_INTERFACE
-            done
-            sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
-        fi
-    fi
-
-    if is_neutron_ovs_base_plugin; then
-        neutron_ovs_base_cleanup
-    fi
-
-    if [[ $Q_AGENT == "linuxbridge" ]]; then
-        neutron_lb_cleanup
-    fi
-
-    # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
-        sudo ip netns delete ${ns}
-    done
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        cleanup_ovn
-    fi
-}
-
-
-function _create_neutron_conf_dir {
-    # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
-    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-}
-
-# _configure_neutron_common()
-# Set common config for all neutron server and agents.
-# This MUST be called before other ``_configure_neutron_*`` functions.
-function _configure_neutron_common {
-    _create_neutron_conf_dir
-
-    # Uses oslo config generator to generate core sample configuration files
-    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
-    Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
-
-    # allow neutron user to administer neutron to match neutron account
-    # NOTE(amotoki): This is required for nova works correctly with neutron.
-    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
-        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
-        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
-    else
-        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
-    fi
-
-    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
-    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
-    neutron_plugin_configure_common
-
-    if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
-        die $LINENO "Neutron plugin not set.. exiting"
-    fi
-
-    # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
-    mkdir -p /$Q_PLUGIN_CONF_PATH
-    Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
-    # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
-    # there is no config file in Neutron tree. They should prepare the file in each plugin.
-    if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
-        cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
-    elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
-        cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
-    fi
-
-    iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
-    iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
-    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
-    iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
-    iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
-
-    # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
-    iniset $NEUTRON_CONF nova region_name $REGION_NAME
-
-    if [ "$VIRT_DRIVER" = 'fake' ]; then
-        # Disable arbitrary limits
-        iniset $NEUTRON_CONF quotas quota_network -1
-        iniset $NEUTRON_CONF quotas quota_subnet -1
-        iniset $NEUTRON_CONF quotas quota_port -1
-        iniset $NEUTRON_CONF quotas quota_security_group -1
-        iniset $NEUTRON_CONF quotas quota_security_group_rule -1
-    fi
-
-    # Format logging
-    setup_logging $NEUTRON_CONF
-
-    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
-        # Set the service port for a proxy to take the original
-        iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
-        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
-    fi
-
-    _neutron_setup_rootwrap
-}
-
-function _configure_neutron_dhcp_agent {
-
-    cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
-
-    iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    # make it so we have working DNS from guests
-    iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
-    iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-
-    if ! is_service_enabled q-l3; then
-        if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
-            iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
-            iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
-        else
-            if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
-                die "$LINENO" "Enable isolated metadata is a must for metadata network"
-            fi
-        fi
-    fi
-
-    _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
-
-    neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
-}
-
-
-function _configure_neutron_metadata_agent {
-    cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
-
-    iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
-    iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
-    iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-}
-
-function _configure_neutron_ceilometer_notifications {
-    iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
-}
-
-function _configure_neutron_metering {
-    neutron_agent_metering_configure_common
-    neutron_agent_metering_configure_agent
-}
-
-function _configure_dvr {
-    iniset $NEUTRON_CONF DEFAULT router_distributed True
-    iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
-}
-
-
-# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
-# It is called when q-agt is enabled.
-function _configure_neutron_plugin_agent {
-    # Specify the default root helper prior to agent configuration to
-    # ensure that an agent's configuration can override the default
-    iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE  agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
-    # Configure agent for plugin
-    neutron_plugin_configure_plugin_agent
-}
-
-function _replace_api_paste_composite {
-    local sep
-    sep=$(echo -ne "\x01")
-    # Replace it
-    $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE"
-    $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE"
-    $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE"
-}
-
-# _configure_neutron_service() - Set config files for neutron service
-# It is called when q-svc is enabled.
-function _configure_neutron_service {
-    Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
-    cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
-
-    if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
-        _replace_api_paste_composite
-    fi
-
-    # Update either configuration file with plugin
-    iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
-
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
-
-    iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
-    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
-
-    # Configuration for neutron notifications to nova.
-    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
-    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
-    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
-    # Configuration for placement client
-    configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
-
-    # Configure plugin
-    neutron_plugin_configure_service
-}
-
-# Utility Functions
-#------------------
-
-# _neutron_service_plugin_class_add() - add service plugin class
-function _neutron_service_plugin_class_add {
-    local service_plugin_class=$1
-    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
-        Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
-    elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
-        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
-    fi
-}
-
-# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver
-function _neutron_ml2_extension_driver_add_old {
-    local extension=$1
-    if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS=$extension
-    elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
-    fi
-}
-
-# mutnauq_server_config_add() - add server config file
-function mutnauq_server_config_add {
-    _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
-function _neutron_deploy_rootwrap_filters {
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        return
-    fi
-    local srcdir=$1
-    sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
-    sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
-}
-
-# _neutron_setup_rootwrap() - configure Neutron's rootwrap
-function _neutron_setup_rootwrap {
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        return
-    fi
-    # Wipe any existing ``rootwrap.d`` files first
-    Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
-    if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
-        sudo rm -rf $Q_CONF_ROOTWRAP_D
-    fi
-
-    _neutron_deploy_rootwrap_filters $NEUTRON_DIR
-
-    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
-    # location moved in newer versions, prefer new location
-    if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
-        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
-    else
-        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
-    fi
-    sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
-    sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
-
-    # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
-    ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
-    ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
-
-    # Set up the rootwrap sudoers for neutron
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
-
-    # Update the root_helper
-    iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-}
-
-function _neutron_setup_interface_driver {
-
-    # ovs_use_veth needs to be set before the plugin configuration
-    # occurs to allow plugins to override the setting.
-    iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
-    neutron_plugin_setup_interface_driver $1
-}
-# Functions for Neutron Exercises
-#--------------------------------
-
-function delete_probe {
-    local from_net="$1"
-    net_id=`_get_net_id $from_net`
-    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
-    neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
-function _get_net_id {
-    openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
-}
-
-function _get_probe_cmd_prefix {
-    local from_net="$1"
-    net_id=`_get_net_id $from_net`
-    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
-    echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
-}
-
-# ssh check
-function _ssh_check_neutron {
-    local from_net=$1
-    local key_file=$2
-    local ip=$3
-    local user=$4
-    local timeout_sec=$5
-    local probe_cmd = ""
-    probe_cmd=`_get_probe_cmd_prefix $from_net`
-    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
-    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
-}
-
-function plugin_agent_add_l2_agent_extension {
-    local l2_agent_extension=$1
-    if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
-        L2_AGENT_EXTENSIONS=$l2_agent_extension
-    elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
-        L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
-    fi
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
+source $TOP_DIR/lib/neutron
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index ed40886..728aaee 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -13,7 +13,7 @@
 
 functions
 ---------
-``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled
+``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled
 
 * ``neutron_plugin_create_nova_conf`` :
   optionally set options in nova_conf
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index d3f5bd5..84ca7ec 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -67,7 +67,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 310b72e..9640063 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -72,7 +72,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index bdeaf0f..a392bd0 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -97,7 +97,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index fa61f1e..c2e78c6 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -67,7 +67,7 @@
     Q_PLUGIN_CLASS="ml2"
     # The ML2 plugin delegates L3 routing/NAT functionality to
     # the L3 service plugin which must therefore be specified.
-    _neutron_service_plugin_class_add $ML2_L3_PLUGIN
+    neutron_service_plugin_class_add $ML2_L3_PLUGIN
 }
 
 function neutron_plugin_configure_service {
@@ -111,18 +111,7 @@
             fi
         fi
     fi
-    # REVISIT(rkukura): Setting firewall_driver here for
-    # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is
-    # used in the server, in case no L2 agent is configured on the
-    # server's node. If an L2 agent is configured, this will get
-    # overridden with the correct driver. The ml2 plugin should
-    # instead use its own config variable to indicate whether security
-    # groups is enabled, and that will need to be set here instead.
-    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver
-    else
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
-    fi
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
     if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 7fed8bf..6e79984 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -68,7 +68,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 8eb2993..3526ccd 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -244,11 +244,12 @@
     local cmd="$2"
     local stop_cmd="$3"
     local group=$4
-    local user=${5:-$STACK_USER}
+    local user=$5
+    local rundir=${6:-$OVS_RUNDIR}
 
     local systemd_service="devstack@$service.service"
     local unit_file="$SYSTEMD_DIR/$systemd_service"
-    local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
+    local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
 
     echo "Starting $service executed command": $cmd
 
@@ -264,14 +265,14 @@
 
     _start_process $systemd_service
 
-    local testcmd="test -e $OVS_RUNDIR/$service.pid"
+    local testcmd="test -e $rundir/$service.pid"
     test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
     local service_ctl_file
-    service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+    service_ctl_file=$(ls $rundir | grep $service | grep ctl)
     if [ -z "$service_ctl_file" ]; then
         die $LINENO "ctl file for service $service is not present."
     fi
-    sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
+    sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info
 }
 
 function clone_repository {
@@ -347,7 +348,7 @@
 
 # OVN service sanity check
 function ovn_sanity_check {
-    if is_service_enabled q-agt neutron-agt; then
+    if is_service_enabled q-agt neutron-agent; then
         die $LINENO "The q-agt/neutron-agt service must be disabled with OVN."
     elif is_service_enabled q-l3 neutron-l3; then
         die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN."
@@ -370,10 +371,6 @@
 
     sudo mkdir -p $OVS_RUNDIR
     sudo chown $(whoami) $OVS_RUNDIR
-    # NOTE(lucasagomes): To keep things simpler, let's reuse the same
-    # RUNDIR for both OVS and OVN. This way we avoid having to specify the
-    # --db option in the ovn-{n,s}bctl commands while playing with DevStack
-    sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
 
     if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
         # If OVS is already installed, remove it, because we're about to
@@ -464,7 +461,7 @@
 function configure_ovn_plugin {
     echo "Configuring Neutron for OVN"
 
-    if is_service_enabled q-svc ; then
+    if is_service_enabled q-svc neutron-api; then
         filter_network_api_extensions
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
@@ -488,7 +485,7 @@
             inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
         fi
 
-        if is_service_enabled q-ovn-metadata-agent; then
+        if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
         else
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
@@ -509,7 +506,7 @@
     fi
 
     if is_service_enabled n-api-meta ; then
-        if is_service_enabled q-ovn-metadata-agent ; then
+        if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
             iniset $NOVA_CONF neutron service_metadata_proxy True
         fi
     fi
@@ -542,7 +539,7 @@
     fi
 
     # Metadata
-    if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
         sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
 
         mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
@@ -554,7 +551,7 @@
         iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
         iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
-        iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
+        iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron
         iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
         iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
         if is_service_enabled tls-proxy; then
@@ -590,6 +587,7 @@
     rm -f $OVS_DATADIR/.*.db.~lock~
     sudo rm -f $OVN_DATADIR/*.db
     sudo rm -f $OVN_DATADIR/.*.db.~lock~
+    sudo rm -f $OVN_RUNDIR/*.sock
 }
 
 function _start_ovs {
@@ -616,12 +614,12 @@
                 dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
             fi
             dbcmd+=" $OVS_DATADIR/conf.db"
-            _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root"
+            _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
 
             # Note: ovn-controller will create and configure br-int once it is started.
             # So, no need to create it now because nothing depends on that bridge here.
             local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
-            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
         else
             _start_process "$OVSDB_SERVER_SERVICE"
             _start_process "$OVS_VSWITCHD_SERVICE"
@@ -660,7 +658,7 @@
 
             enable_service ovs-vtep
             local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
-            _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root"
+            _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
 
             vtep-ctl set-manager tcp:$HOST_IP:6640
         fi
@@ -683,7 +681,7 @@
     if is_service_enabled ovs-vtep ; then
         _start_process "devstack@ovs-vtep.service"
     fi
-    if is_service_enabled q-ovn-metadata-agent; then
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then
         _start_process "devstack@q-ovn-metadata-agent.service"
     fi
 }
@@ -704,26 +702,26 @@
             local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
             local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
 
-            _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+            _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_NORTHD_SERVICE"
         fi
 
         # Wait for the service to be ready
         # Check for socket and db files for both OVN NB and SB
-        wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
-        wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+        wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock
+        wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock
         wait_for_db_file $OVN_DATADIR/ovnnb_db.db
         wait_for_db_file $OVN_DATADIR/ovnsb_db.db
 
         if is_service_enabled tls-proxy; then
-            sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
-            sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
         fi
-        sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
-        sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
-        sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
-        sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+        sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+        sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
     fi
 
     if is_service_enabled ovn-controller ; then
@@ -731,7 +729,7 @@
             local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
             local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
 
-            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_CONTROLLER_SERVICE"
         fi
@@ -740,13 +738,13 @@
     if is_service_enabled ovn-controller-vtep ; then
         if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
             local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
-            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
         fi
     fi
 
-    if is_service_enabled q-ovn-metadata-agent; then
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
         run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF"
         # Format logging
         setup_logging $OVN_META_CONF
@@ -770,8 +768,10 @@
 }
 
 function stop_ovn {
-    if is_service_enabled q-ovn-metadata-agent; then
-        sudo pkill -9 -f haproxy || :
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "[h]aproxy" || :
         _stop_process "devstack@q-ovn-metadata-agent.service"
     fi
     if is_service_enabled ovn-controller-vtep ; then
@@ -816,5 +816,5 @@
         _cleanup $ovs_path
     fi
 
-    sudo rm -f $OVN_RUNDIR
+    sudo rm -rf $OVN_RUNDIR
 }
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 164d574..ea71e60 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -33,9 +33,9 @@
     local fatal=$2
 
     if [ "$(trueorfalse True fatal)" == "True" ]; then
-        sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module")
+        sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module")
     else
-        sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg)
+        sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg)
     fi
 }
 
@@ -103,7 +103,7 @@
 function load_ovs_kernel_modules {
     load_module openvswitch
     load_module vport-geneve False
-    dmesg | tail
+    sudo dmesg | tail
 }
 
 # reload_ovs_kernel_modules() - reload openvswitch kernel module
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 3dffc33..2bf884a 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -323,7 +323,7 @@
     openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
 
     # This logic is specific to using OVN or the l3-agent for layer 3
-    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
         # Configure and enable public bridge
         local ext_gw_interface="none"
         if is_neutron_ovs_base_plugin; then
@@ -372,7 +372,7 @@
     fi
 
     # This logic is specific to using OVN or the l3-agent for layer 3
-    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
         # if the Linux host considers itself to be a router then it will
         # ignore all router advertisements
         # Ensure IPv6 RAs are accepted on interfaces with a default route.
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 5b32468..757a562 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -12,7 +12,7 @@
 METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin"
 
 function neutron_agent_metering_configure_common {
-    _neutron_service_plugin_class_add $METERING_PLUGIN
+    neutron_service_plugin_class_add $METERING_PLUGIN
 }
 
 function neutron_agent_metering_configure_agent {
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
index af9eb3d..c11c315 100644
--- a/lib/neutron_plugins/services/qos
+++ b/lib/neutron_plugins/services/qos
@@ -6,7 +6,7 @@
 
 
 function configure_qos_core_plugin {
-    configure_qos_$NEUTRON_CORE_PLUGIN
+    configure_qos_$Q_PLUGIN
 }
 
 
diff --git a/lib/nova b/lib/nova
index 6de1d33..3aa6b9e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -97,6 +97,18 @@
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
 NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
 
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to switch the compute API policies enable the scope and new defaults.
+# By Default, these flag are False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE)
+
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+    NOVA_MY_IP="$HOST_IPV6"
+else
+    NOVA_MY_IP="$HOST_IP"
+fi
+
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
@@ -205,6 +217,9 @@
         done
         sudo iscsiadm --mode node --op delete || true
 
+        # Disconnect all nvmeof connections
+        sudo nvme disconnect-all || true
+
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
@@ -292,6 +307,7 @@
             fi
         fi
 
+        # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM
         # Ensure each compute host uses a unique iSCSI initiator
         echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
 
@@ -312,8 +328,28 @@
         # not work under FIPS.
         iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
 
-        # ensure that iscsid is started, even when disabled by default
-        restart_service iscsid
+        if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then
+            # ensure that iscsid is started, even when disabled by default
+            restart_service iscsid
+
+        # For NVMe-oF we need different packages that many not be present
+        else
+            install_package nvme-cli
+            sudo modprobe nvme-fabrics
+
+            # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface
+            if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+                sudo modprobe nvme-rdma
+                iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`}
+                if ! sudo rdma link | grep $iface ; then
+                    sudo rdma link add rxe_$iface type rxe netdev $iface
+                fi
+            elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+                sudo modprobe nvme-tcp
+            else  # 'nvmet_fc'
+                sudo modprobe nvme-fc
+            fi
+        fi
     fi
 
     # Rebuild the config file from scratch
@@ -404,11 +440,7 @@
     iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
     iniset $NOVA_CONF scheduler workers "$API_WORKERS"
     iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
-    if [[ $SERVICE_IP_VERSION == 6 ]]; then
-        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
-    else
-        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
-    fi
+    iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP"
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
@@ -455,6 +487,13 @@
             NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
         fi
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
+        if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+            iniset $NOVA_CONF oslo_policy enforce_new_defaults True
+            iniset $NOVA_CONF oslo_policy enforce_scope True
+        else
+            iniset $NOVA_CONF oslo_policy enforce_new_defaults False
+            iniset $NOVA_CONF oslo_policy enforce_scope False
+        fi
         if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
             # Set the service port for a proxy to take the original
             iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
@@ -885,8 +924,23 @@
         # a websockets/html5 or flash powered VNC console for vm instances
         NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE)
         if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
+            # Installing novnc on Debian bullseye breaks the global pip
+            # install. This happens because novnc pulls in distro cryptography
+            # which will be prefered by distro pip, but if anything has
+            # installed pyOpenSSL from pypi (keystone) that is not compatible
+            # with distro cryptography. Fix this by installing
+            # python3-openssl (pyOpenSSL) from the distro which pip will prefer
+            # on Debian. Ubuntu has inverse problems so we only do this for
+            # Debian.
+            local novnc_packages
+            novnc_packages="novnc"
+            GetOSVersion
+            if [[ "$os_VENDOR" = "Debian" ]] ; then
+                novnc_packages="$novnc_packages python3-openssl"
+            fi
+
             NOVNC_WEB_DIR=/usr/share/novnc
-            install_package novnc
+            install_package $novnc_packages
         else
             NOVNC_WEB_DIR=$DEST/novnc
             git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 3e7d280..c0e45eb 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -82,11 +82,17 @@
             sudo dnf copr enable -y @virtmaint-sig/virt-preview
         fi
 
+        if is_openeuler; then
+            qemu_package=qemu
+        else
+            qemu_package=qemu-kvm
+        fi
+
         # Note that in CentOS/RHEL this needs to come from the RDO
         # repositories (qemu-kvm-ev ... which provides this package)
         # as the base system version is too old.  We should have
         # pre-installed these
-        install_package qemu-kvm
+        install_package $qemu_package
         install_package libvirt libvirt-devel python3-libvirt
 
         if is_arch "aarch64"; then
diff --git a/lib/os-vif b/lib/os-vif
index 865645c..7c8bee3 100644
--- a/lib/os-vif
+++ b/lib/os-vif
@@ -1,10 +1,5 @@
 #!/bin/bash
 
-# support vsctl or native.
-# until bug #1929446 is resolved we override the os-vif default
-# and fall back to the legacy "vsctl" driver.
-OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"}
-
 function is_ml2_ovs {
     if [[ "${Q_AGENT}" == "openvswitch" ]]; then
         echo "True"
@@ -19,11 +14,9 @@
 
 function configure_os_vif {
     if [[ -e ${NOVA_CONF} ]]; then
-        iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
         iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
     fi
     if [[ -e ${NEUTRON_CONF} ]]; then
-        iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
         iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
     fi
 }
diff --git a/lib/placement b/lib/placement
index b779866..c6bf99f 100644
--- a/lib/placement
+++ b/lib/placement
@@ -48,6 +48,12 @@
 PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
 
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to switch the Placement API policies scope and new defaults.
+# By Default, these flag are False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE)
+
 # Functions
 # ---------
 
@@ -111,6 +117,13 @@
     else
         _config_placement_apache_wsgi
     fi
+    if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+        iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True
+        iniset $PLACEMENT_CONF oslo_policy enforce_scope True
+    else
+        iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False
+        iniset $PLACEMENT_CONF oslo_policy enforce_scope False
+    fi
 }
 
 # create_placement_accounts() - Set up required placement accounts
diff --git a/lib/tempest b/lib/tempest
index 87a2244..c3d3e9a 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -128,6 +128,13 @@
         (cd $REQUIREMENTS_DIR &&
             git show master:upper-constraints.txt 2>/dev/null ||
             git show origin/master:upper-constraints.txt) > $tmp_c
+        # NOTE(gmann): we need to set the below env var pointing to master
+        # constraints even that is what default in tox.ini. Otherwise it can
+        # create the issue for grenade run where old and new devstack can have
+        # different tempest (old and master) to install. For detail problem,
+        # refer to the https://bugs.launchpad.net/devstack/+bug/2003993
+        export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
+        export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
     else
         echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
         cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -674,6 +681,14 @@
         iniset $TEMPEST_CONFIG auth admin_project_name ''
     fi
 
+    if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope nova true
+    fi
+
+    if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope placement true
+    fi
+
     if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $TEMPEST_CONFIG enforce_scope glance true
     fi
@@ -729,12 +744,12 @@
     # Neutron API Extensions
 
     # disable metering if we didn't enable the service
-    if ! is_service_enabled q-metering; then
+    if ! is_service_enabled q-metering neutron-metering; then
         DISABLE_NETWORK_API_EXTENSIONS+=", metering"
     fi
 
     # disable l3_agent_scheduler if we didn't enable L3 agent
-    if ! is_service_enabled q-l3; then
+    if ! is_service_enabled q-l3 neutron-l3; then
         DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler"
     fi
 
@@ -775,7 +790,12 @@
 # install_tempest() - Collect source and prepare
 function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-    pip_install 'tox!=2.8.0'
+    # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+    # released after zed was released and has some incompatible changes
+    # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+    # beanches jobs. We can continue testing the stable/zed and lower
+    # branches with tox<4.0.0
+    pip_install 'tox!=2.8.0,<4.0.0'
     pushd $TEMPEST_DIR
     # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
     # is tag name not master. git_clone would not checkout tag because
diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml
index d7e4670..68d5254 100644
--- a/playbooks/tox/pre.yaml
+++ b/playbooks/tox/pre.yaml
@@ -5,4 +5,10 @@
       bindep_profile: test
       bindep_dir: "{{ zuul_work_dir }}"
     - test-setup
-    - ensure-tox
+    # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+    # released after zed was released and has some incompatible changes
+    # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+    # beanches jobs. We can continue testing the stable/zed and lower
+    # branches with tox<4.0.0
+    - role: ensure-tox
+      ensure_tox_version: "<4"
diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml
index bd64574..6b7ea37 100644
--- a/roles/apache-logs-conf/tasks/main.yaml
+++ b/roles/apache-logs-conf/tasks/main.yaml
@@ -64,6 +64,7 @@
       'Debian': '/etc/apache2/sites-enabled/'
       'Suse': '/etc/apache2/conf.d/'
       'RedHat': '/etc/httpd/conf.d/'
+      'openEuler': '/etc/httpd/conf.d/'
 
 - name: Discover configurations
   find:
diff --git a/stack.sh b/stack.sh
index c99189e..ccd2d16 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
 # a multi-node developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
-# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL**
 # (7 or newer) machine. (It may work on other platforms but support for those
 # platforms is left to those who added them to DevStack.) It should work in
 # a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -229,7 +229,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9"
+SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03"
 
 if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
@@ -394,6 +394,22 @@
     sudo dnf config-manager --set-enabled crb
     # rabbitmq and other packages are provided by RDO repositories.
     _install_rdo
+
+    # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl,
+    # it triggers a conflict when devstack wants to install "curl".
+    # Swap curl-minimal with curl.
+    if is_package_installed curl-minimal; then
+        sudo dnf swap -y curl-minimal curl
+    fi
+elif [[ $DISTRO == "openEuler-22.03" ]]; then
+    # There are some problem in openEuler. We should fix it first. Some required
+    # package/action runs before fixup script. So we can't fix there.
+    #
+    # 1. the hostname package is not installed by default
+    # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel
+    # 3. python3-pip can be uninstalled by `get_pip.py` automaticly.
+    install_package hostname openstack-release-wallaby
+    uninstall_package python3-pip
 fi
 
 # Ensure python is installed
diff --git a/stackrc b/stackrc
index b3130e5..442e9a0 100644
--- a/stackrc
+++ b/stackrc
@@ -201,6 +201,11 @@
 # performance_schema that are of interest to us
 MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
 
+# This can be used to reduce the amount of memory mysqld uses while running.
+# These are unscientifically determined, and could reduce performance or
+# cause other issues.
+MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY)
+
 # Set a timeout for git operations.  If git is still running when the
 # timeout expires, the command will be retried up to 3 times.  This is
 # in the format for timeout(1);
@@ -243,7 +248,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="zed"
+DEVSTACK_SERIES="2023.1"
 
 ##############
 #
diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh
new file mode 100755
index 0000000..9c31b30
--- /dev/null
+++ b/tools/file_tracker.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+# time to sleep between checks
+SLEEP_TIME=20
+
+function tracker {
+    echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened"
+    while true; do
+        cat /proc/sys/fs/file-nr
+        sleep $SLEEP_TIME
+    done
+}
+
+function usage {
+    echo "Usage: $0 [-x] [-s N]" 1>&2
+    exit 1
+}
+
+while getopts ":s:x" opt; do
+    case $opt in
+        s)
+            SLEEP_TIME=$OPTARG
+            ;;
+        x)
+            set -o xtrace
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+
+tracker
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 7c5d4c6..91b180c 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -139,7 +139,7 @@
     # recent enough anyway.  This is included via rpms/general
     : # Simply fall through
 elif is_ubuntu; then
-    # pip on Ubuntu 20.04 is new enough, too
+    # pip on Ubuntu 20.04 and higher is new enough, too
     # drop setuptools from u-c
     sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt
 else