Merge "Fix deployment of the neutron with uwsgi"
diff --git a/.gitignore b/.gitignore
index 8fe56ad..ad153f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,3 +38,5 @@
 userrc_early
 AUTHORS
 ChangeLog
+tools/dbcounter/build/
+tools/dbcounter/dbcounter.egg-info/
diff --git a/.zuul.yaml b/.zuul.yaml
index 5a7edd6..294dd48 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,14 +1,4 @@
 - nodeset:
-    name: openstack-single-node
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
     name: openstack-single-node-jammy
     nodes:
       - name: controller
@@ -39,26 +29,6 @@
           - controller
 
 - nodeset:
-    name: openstack-single-node-xenial
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
-    name: devstack-single-node-centos-7
-    nodes:
-      - name: controller
-        label: centos-7
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
     name: devstack-single-node-centos-9-stream
     nodes:
       - name: controller
@@ -119,36 +89,6 @@
           - controller
 
 - nodeset:
-    name: openstack-two-node
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-      - name: compute1
-        label: ubuntu-xenial
-    groups:
-      # Node where tests are executed and test results collected
-      - name: tempest
-        nodes:
-          - controller
-      # Nodes running the compute service
-      - name: compute
-        nodes:
-          - controller
-          - compute1
-      # Nodes that are not the controller
-      - name: subnode
-        nodes:
-          - compute1
-      # Switch node for multinode networking setup
-      - name: switch
-        nodes:
-          - controller
-      # Peer nodes for multinode networking setup
-      - name: peers
-        nodes:
-          - compute1
-
-- nodeset:
     name: openstack-two-node-centos-9-stream
     nodes:
       - name: controller
@@ -269,36 +209,6 @@
           - compute1
 
 - nodeset:
-    name: openstack-two-node-xenial
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-      - name: compute1
-        label: ubuntu-xenial
-    groups:
-      # Node where tests are executed and test results collected
-      - name: tempest
-        nodes:
-          - controller
-      # Nodes running the compute service
-      - name: compute
-        nodes:
-          - controller
-          - compute1
-      # Nodes that are not the controller
-      - name: subnode
-        nodes:
-          - compute1
-      # Switch node for multinode networking setup
-      - name: switch
-        nodes:
-          - controller
-      # Peer nodes for multinode networking setup
-      - name: peers
-        nodes:
-          - compute1
-
-- nodeset:
     name: openstack-three-node-focal
     nodes:
       - name: controller
@@ -716,12 +626,8 @@
     description: Debian Bookworm platform test
     nodeset: devstack-single-node-debian-bookworm
     timeout: 9000
-    voting: false
     vars:
       configure_swap_size: 4096
-      devstack_localrc:
-        # TODO(frickler): drop this once wheel build is fixed
-        MYSQL_GATHER_PERFORMANCE: false
 
 - job:
     name: devstack-platform-debian-bullseye
@@ -731,9 +637,6 @@
     timeout: 9000
     vars:
       configure_swap_size: 4096
-      devstack_localrc:
-        # TODO(frickler): drop this once wheel build is fixed
-        MYSQL_GATHER_PERFORMANCE: false
 
 - job:
     name: devstack-platform-rocky-blue-onyx
@@ -741,6 +644,11 @@
     description: Rocky Linux 9 Blue Onyx platform test
     nodeset: devstack-single-node-rockylinux-9
     timeout: 9000
+    # NOTE(danms): This has been failing lately with some repository metadata
+    # errors. We're marking this as non-voting until it appears to have
+    # stabilized:
+    # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0
+    voting: false
     vars:
       configure_swap_size: 4096
 
@@ -941,6 +849,8 @@
         - devstack-platform-rocky-blue-onyx
         - devstack-platform-ubuntu-jammy-ovn-source
         - devstack-platform-ubuntu-jammy-ovs
+        - devstack-platform-openEuler-22.03-ovn-source
+        - devstack-platform-openEuler-22.03-ovs
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
@@ -954,10 +864,6 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-linuxbridge-tempest:
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
         - neutron-ovn-tempest-ovs-release:
             voting: false
             irrelevant-files:
@@ -984,8 +890,11 @@
       jobs:
         - devstack
         - devstack-ipv6
+        - devstack-platform-debian-bookworm
         - devstack-platform-debian-bullseye
-        - devstack-platform-rocky-blue-onyx
+        # NOTE(danms): Disabled due to instability, see comment in the job
+        # definition above.
+        # - devstack-platform-rocky-blue-onyx
         - devstack-enforce-scope
         - devstack-multinode
         - devstack-unit-tests
@@ -994,10 +903,6 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-linuxbridge-tempest:
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
         - ironic-tempest-bios-ipmi-direct-tinyipa
         - swift-dsvm-functional
         - grenade:
@@ -1065,3 +970,13 @@
     periodic:
       jobs:
         - devstack-no-tls-proxy
+    periodic-weekly:
+      jobs:
+        - devstack-platform-centos-9-stream
+        - devstack-platform-debian-bookworm
+        - devstack-platform-debian-bullseye
+        - devstack-platform-rocky-blue-onyx
+        - devstack-platform-ubuntu-jammy-ovn-source
+        - devstack-platform-ubuntu-jammy-ovs
+        - devstack-platform-openEuler-22.03-ovn-source
+        - devstack-platform-openEuler-22.03-ovs
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 03c7469..21cf52c 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -28,8 +28,6 @@
 openstack/barbican                       `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
 openstack/blazar                         `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
 openstack/ceilometer                     `https://opendev.org/openstack/ceilometer <https://opendev.org/openstack/ceilometer>`__
-openstack/ceilometer-powervm             `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
-openstack/cinderlib                      `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
 openstack/cloudkitty                     `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
 openstack/cyborg                         `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
 openstack/designate                      `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
@@ -39,7 +37,6 @@
 openstack/devstack-plugin-kafka          `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
 openstack/devstack-plugin-nfs            `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
 openstack/devstack-plugin-open-cas       `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
-openstack/ec2-api                        `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
 openstack/freezer                        `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
 openstack/freezer-api                    `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
 openstack/freezer-tempest-plugin         `https://opendev.org/openstack/freezer-tempest-plugin <https://opendev.org/openstack/freezer-tempest-plugin>`__
@@ -64,12 +61,10 @@
 openstack/monasca-api                    `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
 openstack/monasca-events-api             `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
 openstack/monasca-tempest-plugin         `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
-openstack/murano                         `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
 openstack/networking-bagpipe             `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
 openstack/networking-baremetal           `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
 openstack/networking-bgpvpn              `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
 openstack/networking-generic-switch      `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
-openstack/networking-powervm             `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
 openstack/networking-sfc                 `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
 openstack/neutron                        `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
 openstack/neutron-dynamic-routing        `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
@@ -78,20 +73,17 @@
 openstack/neutron-tempest-plugin         `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
 openstack/neutron-vpnaas                 `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
 openstack/neutron-vpnaas-dashboard       `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
-openstack/nova-powervm                   `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
+openstack/nova                           `https://opendev.org/openstack/nova <https://opendev.org/openstack/nova>`__
 openstack/octavia                        `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
 openstack/octavia-dashboard              `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
 openstack/octavia-tempest-plugin         `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
 openstack/openstacksdk                   `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
 openstack/osprofiler                     `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
+openstack/ovn-bgp-agent                  `https://opendev.org/openstack/ovn-bgp-agent <https://opendev.org/openstack/ovn-bgp-agent>`__
 openstack/ovn-octavia-provider           `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
 openstack/rally-openstack                `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
-openstack/sahara                         `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
-openstack/sahara-dashboard               `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
-openstack/senlin                         `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
 openstack/shade                          `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
 openstack/skyline-apiserver              `https://opendev.org/openstack/skyline-apiserver <https://opendev.org/openstack/skyline-apiserver>`__
-openstack/solum                          `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
 openstack/storlets                       `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
 openstack/tacker                         `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
 openstack/tap-as-a-service               `https://opendev.org/openstack/tap-as-a-service <https://opendev.org/openstack/tap-as-a-service>`__
@@ -184,6 +176,7 @@
 x/valet                                  `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
 x/vmware-nsx                             `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
 x/vmware-vspc                            `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+x/whitebox-neutron-tempest-plugin        `https://opendev.org/x/whitebox-neutron-tempest-plugin <https://opendev.org/x/whitebox-neutron-tempest-plugin>`__
 ======================================== ===
 
 
diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack
new file mode 100755
index 0000000..ef05f1b
--- /dev/null
+++ b/files/openstack-cli-server/openstack
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import os.path
+import json
+
+server_address = "/tmp/openstack.sock"
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+try:
+    sock.connect(server_address)
+except socket.error as msg:
+    print(msg, file=sys.stderr)
+    sys.exit(1)
+
+
+def send(sock, doc):
+    jdoc = json.dumps(doc)
+    sock.send(b'%d\n' % len(jdoc))
+    sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+    length_str = b''
+
+    char = sock.recv(1)
+    if len(char) == 0:
+        print("Unexpected end of file", file=sys.stderr)
+        sys.exit(1)
+
+    while char != b'\n':
+        length_str += char
+        char = sock.recv(1)
+        if len(char) == 0:
+            print("Unexpected end of file", file=sys.stderr)
+            sys.exit(1)
+
+    total = int(length_str)
+
+    # use a memoryview to receive the data chunk by chunk efficiently
+    jdoc = memoryview(bytearray(total))
+    next_offset = 0
+    while total - next_offset > 0:
+        recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+        next_offset += recv_size
+    try:
+        doc = json.loads(jdoc.tobytes())
+    except (TypeError, ValueError) as e:
+        raise Exception('Data received was not in JSON format')
+    return doc
+
+try:
+    env = {}
+    passenv = ["CINDER_VERSION",
+               "OS_AUTH_URL",
+               "OS_IDENTITY_API_VERSION",
+               "OS_NO_CACHE",
+               "OS_PASSWORD",
+               "OS_PROJECT_NAME",
+               "OS_REGION_NAME",
+               "OS_TENANT_NAME",
+               "OS_USERNAME",
+               "OS_VOLUME_API_VERSION",
+               "OS_CLOUD"]
+    for name in passenv:
+        if name in os.environ:
+            env[name] = os.environ[name]
+
+    cmd = {
+        "app": os.path.basename(sys.argv[0]),
+        "env": env,
+        "argv": sys.argv[1:]
+    }
+    try:
+        image_idx = sys.argv.index('image')
+        create_idx = sys.argv.index('create')
+        missing_file = image_idx < create_idx and \
+                not any(x.startswith('--file') for x in sys.argv)
+    except ValueError:
+        missing_file = False
+
+    if missing_file:
+        # This means we were called with an image create command, but were
+        # not provided a --file option. That likely means we're being passed
+        # the image data to stdin, which won't work because we do not proxy
+        # stdin to the server. So, we just reject the operation and ask the
+        # caller to provide the file with --file instead.
+        # We've already connected to the server, we need to send it some dummy
+        # data so it doesn't wait forever.
+        send(sock, {})
+        print('Image create without --file is not allowed in server mode',
+              file=sys.stderr)
+        sys.exit(1)
+    else:
+        send(sock, cmd)
+
+    doc = recv(sock)
+    if doc["stdout"] != b'':
+        print(doc["stdout"], end='')
+    if doc["stderr"] != b'':
+        print(doc["stderr"], file=sys.stderr)
+    sys.exit(doc["status"])
+finally:
+    sock.close()
diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server
new file mode 100755
index 0000000..f3d2747
--- /dev/null
+++ b/files/openstack-cli-server/openstack-cli-server
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import json
+
+from openstackclient import shell as osc_shell
+from io import StringIO
+
+server_address = "/tmp/openstack.sock"
+
+try:
+    os.unlink(server_address)
+except OSError:
+    if os.path.exists(server_address):
+        raise
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+print('starting up on %s' % server_address, file=sys.stderr)
+sock.bind(server_address)
+
+# Listen for incoming connections
+sock.listen(1)
+
+def send(sock, doc):
+    jdoc = json.dumps(doc)
+    sock.send(b'%d\n' % len(jdoc))
+    sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+    length_str = b''
+    char = sock.recv(1)
+    while char != b'\n':
+        length_str += char
+        char = sock.recv(1)
+
+    total = int(length_str)
+
+    # use a memoryview to receive the data chunk by chunk efficiently
+    jdoc = memoryview(bytearray(total))
+    next_offset = 0
+    while total - next_offset > 0:
+        recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+        next_offset += recv_size
+    try:
+        doc = json.loads(jdoc.tobytes())
+    except (TypeError, ValueError) as e:
+        raise Exception('Data received was not in JSON format')
+    return doc
+
+while True:
+    csock, client_address = sock.accept()
+    try:
+        doc = recv(csock)
+
+        print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr)
+        oldenv = {}
+        for name in doc["env"].keys():
+            oldenv[name] = os.environ.get(name, None)
+            os.environ[name] = doc["env"][name]
+
+        try:
+            old_stdout = sys.stdout
+            old_stderr = sys.stderr
+            my_stdout = sys.stdout = StringIO()
+            my_stderr = sys.stderr = StringIO()
+
+            class Exit(BaseException):
+                def __init__(self, status):
+                    self.status = status
+
+            def noexit(stat):
+                raise Exit(stat)
+
+            sys.exit = noexit
+
+            if doc["app"] == "openstack":
+                sh = osc_shell.OpenStackShell()
+                ret = sh.run(doc["argv"])
+            else:
+                print("Unknown application %s" % doc["app"], file=sys.stderr)
+                ret = 1
+        except Exit as e:
+            ret = e.status
+        finally:
+            sys.stdout = old_stdout
+            sys.stderr = old_stderr
+
+            for name in oldenv.keys():
+                if oldenv[name] is None:
+                    del os.environ[name]
+                else:
+                    os.environ[name] = oldenv[name]
+
+        send(csock, {
+            "stdout": my_stdout.getvalue(),
+            "stderr": my_stderr.getvalue(),
+            "status": ret,
+        })
+
+    except BaseException as e:
+        print(e, file=sys.stderr)
+    finally:
+        csock.close()
diff --git a/functions b/functions
index 7ada0fe..f81e8f0 100644
--- a/functions
+++ b/functions
@@ -118,7 +118,7 @@
         useimport="--import"
     fi
 
-    openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
+    openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}")
 }
 
 # Retrieve an image from a URL and upload into Glance.
@@ -133,17 +133,28 @@
 
     local image image_fname image_name
 
+    local max_attempts=5
+
     # Create a directory for the downloaded image tarballs.
     mkdir -p $FILES/images
     image_fname=`basename "$image_url"`
     if [[ $image_url != file* ]]; then
         # Downloads the image (uec ami+akistyle), then extracts it.
         if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
-            wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
-            if [[ $? -ne 0 ]]; then
-                echo "Not found: $image_url"
-                return
-            fi
+            for attempt in `seq $max_attempts`; do
+                local rc=0
+                wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$?
+                if [[ $rc -ne 0 ]]; then
+                    if [[ "$attempt" -eq "$max_attempts" ]]; then
+                        echo "Not found: $image_url"
+                        return
+                    fi
+                    echo "Download failed, retrying in $attempt second, attempt: $attempt"
+                    sleep $attempt
+                else
+                    break
+                fi
+            done
         fi
         image="$FILES/${image_fname}"
     else
@@ -414,10 +425,10 @@
         # kernel for use when uploading the root filesystem.
         local kernel_id="" ramdisk_id="";
         if [ -n "$kernel" ]; then
-            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id)
+            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id)
         fi
         if [ -n "$ramdisk" ]; then
-            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id)
+            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id)
         fi
         _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
     fi
diff --git a/functions-common b/functions-common
index c57c4cc..84d281b 100644
--- a/functions-common
+++ b/functions-common
@@ -236,6 +236,27 @@
     $xtrace
 }
 
+# bool_to_int <True|False>
+#
+# Convert True|False to int 1 or 0
+# This function can be used to convert the output of trueorfalse
+# to an int follow c conventions where false is 0 and 1 it true.
+function bool_to_int {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    if [ -z $1 ]; then
+        die $LINENO "Bool value required"
+    fi
+    if [[ $1 == "True" ]] ; then
+        echo '1'
+    else
+        echo '0'
+    fi
+    $xtrace
+}
+
+
 function isset {
     [[ -v "$1" ]]
 }
@@ -380,9 +401,9 @@
 # such as "install_package" further abstract things in better ways.
 #
 # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
-# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora)
+# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora)
 # ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
-# ``os_CODENAME`` - vendor's codename for release: ``xenial``
+# ``os_CODENAME`` - vendor's codename for release: ``jammy``
 
 declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME
 
@@ -2417,6 +2438,11 @@
     _TIME_TOTAL[$name]=$(($total + $elapsed_time))
 }
 
+function install_openstack_cli_server {
+    export PATH=$TOP_DIR/files/openstack-cli-server:$PATH
+    run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server"
+}
+
 function oscwrap {
     local xtrace
     xtrace=$(set +o | grep xtrace)
diff --git a/inc/python b/inc/python
index cc6e01f..43b06eb 100644
--- a/inc/python
+++ b/inc/python
@@ -405,6 +405,9 @@
         # source we are about to do.
         local name
         name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+        if [ -z $name ]; then
+            name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml)
+        fi
         $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
             $REQUIREMENTS_DIR/upper-constraints.txt -- $name
     fi
diff --git a/lib/apache b/lib/apache
index cf7215b..1420f76 100644
--- a/lib/apache
+++ b/lib/apache
@@ -237,13 +237,17 @@
     restart_service $APACHE_NAME
 }
 
+# write_uwsgi_config() - Create a new uWSGI config file
 function write_uwsgi_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local url=$3
     local http=$4
-    local name=""
-    name=$(basename $wsgi)
+    local name=$5
+
+    if [ -z "$name" ]; then
+        name=$(basename $wsgi)
+    fi
 
     # create a home for the sockets; note don't use /tmp -- apache has
     # a private view of it on some platforms.
@@ -258,38 +262,46 @@
     local socket="$socket_dir/${name}.socket"
 
     # always cleanup given that we are using iniset here
-    rm -rf $file
-    iniset "$file" uwsgi wsgi-file "$wsgi"
-    iniset "$file" uwsgi processes $API_WORKERS
+    rm -rf $conf
+    # Set either the module path or wsgi script path depending on what we've
+    # been given. Note that the regex isn't exhaustive - neither Python modules
+    # nor Python variables can start with a number - but it's "good enough"
+    if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+        iniset "$conf" uwsgi module "$wsgi"
+    else
+        deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+        iniset "$conf" uwsgi wsgi-file "$wsgi"
+    fi
+    iniset "$conf" uwsgi processes $API_WORKERS
     # This is running standalone
-    iniset "$file" uwsgi master true
+    iniset "$conf" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
-    iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload false
+    iniset "$conf" uwsgi die-on-term true
+    iniset "$conf" uwsgi exit-on-reload false
     # Set worker-reload-mercy so that worker will not exit till the time
     # configured after graceful shutdown
-    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
-    iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins http,python3
+    iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi enable-threads true
+    iniset "$conf" uwsgi plugins http,python3
     # uwsgi recommends this to prevent thundering herd on accept.
-    iniset "$file" uwsgi thunder-lock true
+    iniset "$conf" uwsgi thunder-lock true
     # Set hook to trigger graceful shutdown on SIGTERM
-    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+    iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
     # Override the default size for headers from the 4k default.
-    iniset "$file" uwsgi buffer-size 65535
+    iniset "$conf" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
-    iniset "$file" uwsgi add-header "Connection: close"
+    iniset "$conf" uwsgi add-header "Connection: close"
     # This ensures that file descriptors aren't shared between processes.
-    iniset "$file" uwsgi lazy-apps true
+    iniset "$conf" uwsgi lazy-apps true
 
     # If we said bind directly to http, then do that and don't start the apache proxy
     if [[ -n "$http" ]]; then
-        iniset "$file" uwsgi http $http
+        iniset "$conf" uwsgi http $http
     else
         local apache_conf=""
         apache_conf=$(apache_site_config_for $name)
-        iniset "$file" uwsgi socket "$socket"
-        iniset "$file" uwsgi chmod-socket 666
+        iniset "$conf" uwsgi socket "$socket"
+        iniset "$conf" uwsgi chmod-socket 666
         echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf
         enable_apache_site $name
         restart_apache_server
@@ -303,47 +315,58 @@
 # but that involves having apache buffer the request before sending it to
 # uwsgi.
 function write_local_uwsgi_http_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local url=$3
-    name=$(basename $wsgi)
+    local name=$4
+
+    if [ -z "$name" ]; then
+        name=$(basename $wsgi)
+    fi
 
     # create a home for the sockets; note don't use /tmp -- apache has
     # a private view of it on some platforms.
 
     # always cleanup given that we are using iniset here
-    rm -rf $file
-    iniset "$file" uwsgi wsgi-file "$wsgi"
+    rm -rf $conf
+    # Set either the module path or wsgi script path depending on what we've
+    # been given
+    if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+        iniset "$conf" uwsgi module "$wsgi"
+    else
+        deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+        iniset "$conf" uwsgi wsgi-file "$wsgi"
+    fi
     port=$(get_random_port)
-    iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
-    iniset "$file" uwsgi processes $API_WORKERS
+    iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
+    iniset "$conf" uwsgi processes $API_WORKERS
     # This is running standalone
-    iniset "$file" uwsgi master true
+    iniset "$conf" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
-    iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload false
-    iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins http,python3
-    # uwsgi recommends this to prevent thundering herd on accept.
-    iniset "$file" uwsgi thunder-lock true
-    # Set hook to trigger graceful shutdown on SIGTERM
-    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+    iniset "$conf" uwsgi die-on-term true
+    iniset "$conf" uwsgi exit-on-reload false
     # Set worker-reload-mercy so that worker will not exit till the time
     # configured after graceful shutdown
-    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi enable-threads true
+    iniset "$conf" uwsgi plugins http,python3
+    # uwsgi recommends this to prevent thundering herd on accept.
+    iniset "$conf" uwsgi thunder-lock true
+    # Set hook to trigger graceful shutdown on SIGTERM
+    iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
     # Override the default size for headers from the 4k default.
-    iniset "$file" uwsgi buffer-size 65535
+    iniset "$conf" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
-    iniset "$file" uwsgi add-header "Connection: close"
+    iniset "$conf" uwsgi add-header "Connection: close"
     # This ensures that file descriptors aren't shared between processes.
-    iniset "$file" uwsgi lazy-apps true
-    iniset "$file" uwsgi chmod-socket 666
-    iniset "$file" uwsgi http-raw-body true
-    iniset "$file" uwsgi http-chunked-input true
-    iniset "$file" uwsgi http-auto-chunked true
-    iniset "$file" uwsgi http-keepalive false
+    iniset "$conf" uwsgi lazy-apps true
+    iniset "$conf" uwsgi chmod-socket 666
+    iniset "$conf" uwsgi http-raw-body true
+    iniset "$conf" uwsgi http-chunked-input true
+    iniset "$conf" uwsgi http-auto-chunked true
+    iniset "$conf" uwsgi http-keepalive false
     # Increase socket timeout for slow chunked uploads
-    iniset "$file" uwsgi socket-timeout 30
+    iniset "$conf" uwsgi socket-timeout 30
 
     enable_apache_mod proxy
     enable_apache_mod proxy_http
@@ -376,12 +399,18 @@
 }
 
 function remove_uwsgi_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local name=""
+    # TODO(stephenfin): Remove this call when everyone is using module path
+    # configuration instead of file path configuration
     name=$(basename $wsgi)
 
-    rm -rf $file
+    if [[ "$wsgi" = /* ]]; then
+        deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead"
+    fi
+
+    rm -rf $conf
     disable_apache_site $name
 }
 
diff --git a/lib/cinder b/lib/cinder
index f7824eb..ae898e9 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -275,7 +275,7 @@
     fi
 
     stop_process "c-api"
-    remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
+    remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi"
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
diff --git a/lib/glance b/lib/glance
index 796ebdb..2746871 100644
--- a/lib/glance
+++ b/lib/glance
@@ -75,6 +75,7 @@
 GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
 
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db}
 
 # Full Glance functionality requires running in standalone mode. If we are
 # not in uwsgi mode, then we are standalone, otherwise allow separate control.
@@ -167,6 +168,7 @@
         # Cleanup reserved stores directories
         sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
     fi
+    remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api"
 }
 
 # Set multiple cinder store related config options for each of the cinder store
@@ -329,6 +331,7 @@
     iniset $GLANCE_API_CONF database connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
     iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
     configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
@@ -392,6 +395,7 @@
     iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
     iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
     iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
     iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
@@ -429,6 +433,7 @@
         iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
         iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
         iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+        iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL
     fi
 
     if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
diff --git a/lib/horizon b/lib/horizon
index 611329d..7c0d443 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -109,6 +109,10 @@
         _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True"
     fi
 
+    if is_service_enabled c-bak; then
+        _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True"
+    fi
+
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
@@ -169,6 +173,10 @@
     # Apache installation, because we mark it NOPRIME
     install_apache_wsgi
 
+    # Install the memcache library so that horizon can use memcached as its
+    # cache backend
+    pip_install_gr pymemcache
+
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH
 }
 
diff --git a/lib/host b/lib/host
new file mode 100644
index 0000000..a812c39
--- /dev/null
+++ b/lib/host
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Kernel Samepage Merging (KSM)
+# -----------------------------
+
+# Processes that mark their memory as mergeable can share identical memory
+# pages if KSM is enabled. This is particularly useful for nova + libvirt
+# backends but any other setup that marks its memory as mergeable can take
+# advantage. The drawback is there is higher cpu load; however, we tend to
+# be memory bound not cpu bound so enable KSM by default but allow people
+# to opt out if the CPU time is more important to them.
+ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
+ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED)
+function configure_ksm {
+    if [[ $ENABLE_KSMTUNED == "True" ]] ; then
+        install_package "ksmtuned"
+    fi
+    if [[ -f /sys/kernel/mm/ksm/run ]] ; then
+        echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run
+    fi
+}
+
+# Compressed swap (ZSWAP)
+#------------------------
+
+# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html
+# Zswap is a lightweight compressed cache for swap pages.
+# It takes pages that are in the process of being swapped out and attempts
+# to compress them into a dynamically allocated RAM-based memory pool.
+# zswap basically trades CPU cycles for potentially reduced swap I/O.
+# This trade-off can also result in a significant performance improvement
+# if reads from the compressed cache are faster than reads from a swap device.
+
+ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP)
+# lz4 is very fast although it does not have the best compression
+# zstd has much better compression but more latency
+ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"}
+ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"}
+function configure_zswap {
+    if [[ $ENABLE_ZSWAP == "True" ]] ; then
+        # Centos 9 stream seems to only support enabling but not run time
+        # tuning so dont try to choose better default on centos
+        if is_ubuntu; then
+            echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor
+            echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool
+        fi
+        echo 1 | sudo tee /sys/module/zswap/parameters/enabled
+        # print curent zswap kernel config
+        sudo grep -R . /sys/module/zswap/parameters || /bin/true
+    fi
+}
+
+ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING)
+function configure_sysctl_mem_parmaters {
+    if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then
+        # defer write when memory is available
+        sudo sysctl -w vm.dirty_ratio=60
+        sudo sysctl -w vm.dirty_background_ratio=10
+        sudo sysctl -w vm.vfs_cache_pressure=50
+        # assume swap is compressed so on new kernels
+        # give it equal priority as page cache which is
+        # uncompressed. on kernels < 5.8 the max is 100
+        # not 200 so it will strongly prefer swapping.
+        sudo sysctl -w vm.swappiness=100
+        sudo grep -R . /proc/sys/vm/  || /bin/true
+    fi
+}
+
+function configure_host_mem {
+    configure_zswap
+    configure_ksm
+    configure_sysctl_mem_parmaters
+}
+
+ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING)
+function configure_sysctl_net_parmaters {
+    if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then
+        # detect dead TCP connections after 120 seconds
+        sudo sysctl -w net.ipv4.tcp_keepalive_time=60
+        sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10
+        sudo sysctl -w net.ipv4.tcp_keepalive_probes=6
+        # reudce network latency for new connections
+        sudo sysctl -w net.ipv4.tcp_fastopen=3
+        # print tcp options
+        sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true
+        # disable qos by default
+        sudo sysctl -w net.core.default_qdisc=pfifo_fast
+    fi
+}
+
+function configure_host_net {
+    configure_sysctl_net_parmaters
+}
+
+function tune_host {
+    configure_host_mem
+    configure_host_net
+}
diff --git a/lib/keystone b/lib/keystone
index 6cb4aac..7d6b05f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -150,7 +150,7 @@
         sudo rm -f $(apache_site_config_for keystone)
     else
         stop_process "keystone"
-        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
+        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public"
         sudo rm -f $(apache_site_config_for keystone-wsgi-public)
     fi
 }
diff --git a/lib/neutron b/lib/neutron
index 021ffeb..8b65980 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -92,8 +92,9 @@
 
 # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
 # and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+# of the new RBAC policies and scopes. Set it to False if you do not
+# want to run Neutron with new RBAC.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE)
 
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
@@ -157,6 +158,14 @@
     NEUTRON_ENDPOINT_SERVICE_NAME="networking"
 fi
 
+# Source install libraries
+ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git}
+ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic}
+ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main}
+SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git}
+SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy}
+SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main}
+
 # List of config file names in addition to the main plugin config file
 # To add additional plugin config files, use ``neutron_server_config_add``
 # utility function.  For example:
@@ -524,6 +533,17 @@
         setup_dev_lib "neutron-lib"
     fi
 
+    # Install SQLAlchemy and alembic from git when these are required
+    # see https://bugs.launchpad.net/neutron/+bug/2042941
+    if use_library_from_git "sqlalchemy"; then
+        git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH
+        setup_develop $SQLALCHEMY_DIR
+    fi
+    if use_library_from_git "alembic"; then
+        git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH
+        setup_develop $ALEMBIC_DIR
+    fi
+
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
 
@@ -803,7 +823,7 @@
     if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
         stop_process neutron-api
         stop_process neutron-rpc-server
-        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api"
         sudo rm -f $(apache_site_config_for neutron-api)
     fi
 
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index c51b708..699bd54 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -91,9 +91,14 @@
 # http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt
 OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
 
+# OVN metadata agent configuration
 OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
 OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
 
+# OVN agent configuration
+OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini
+OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-}
+
 # If True (default) the node will be considered a gateway node.
 ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW)
 OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
@@ -132,6 +137,7 @@
 
 NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix)
 NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent"
+NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent"
 
 STACK_GROUP="$( id --group --name "$STACK_USER" )"
 
@@ -487,6 +493,8 @@
 
         if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
+        elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
         else
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
         fi
@@ -508,6 +516,8 @@
     if is_service_enabled n-api-meta ; then
         if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
             iniset $NOVA_CONF neutron service_metadata_proxy True
+        elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
+            iniset $NOVA_CONF neutron service_metadata_proxy True
         fi
     fi
 }
@@ -539,29 +549,42 @@
     fi
 
     # Metadata
-    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
+    local sample_file=""
+    local config_file=""
+    if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then
+        sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample
+        config_file=$OVN_AGENT_CONF
+    elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
+        sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample
+        config_file=$OVN_META_CONF
+    fi
+    if [ -n "$config_file" ]; then
         sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
 
         mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
         (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
 
-        cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF
-        configure_root_helper_options $OVN_META_CONF
+        cp $sample_file $config_file
+        configure_root_helper_options $config_file
 
-        iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
-        iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
-        iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron
-        iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
-        iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
+        iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST
+        iniset $config_file DEFAULT metadata_workers $API_WORKERS
+        iniset $config_file DEFAULT state_path $DATA_DIR/neutron
+        iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
+        iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE
         if is_service_enabled tls-proxy; then
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key
         fi
+        if [[ $config_file == $OVN_AGENT_CONF ]]; then
+            iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS
+            iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE
+        fi
     fi
 }
 
@@ -684,6 +707,9 @@
     if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then
         _start_process "devstack@q-ovn-metadata-agent.service"
     fi
+    if is_service_enabled q-ovn-agent neutron-ovn-agent ; then
+        _start_process "devstack@q-ovn-agent.service"
+    fi
 }
 
 # start_ovn() - Start running processes, including screen
@@ -750,6 +776,12 @@
         setup_logging $OVN_META_CONF
     fi
 
+    if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+        run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF"
+        # Format logging
+        setup_logging $OVN_AGENT_CONF
+    fi
+
     _start_ovn_services
 }
 
@@ -774,6 +806,12 @@
         sudo pkill -9 -f "[h]aproxy" || :
         _stop_process "devstack@q-ovn-metadata-agent.service"
     fi
+    if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "[h]aproxy" || :
+        _stop_process "devstack@q-ovn-agent.service"
+    fi
     if is_service_enabled ovn-controller-vtep ; then
         _stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
     fi
diff --git a/lib/nova b/lib/nova
index da3118f..ee3f29e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -53,11 +53,19 @@
 NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
 NOVA_API_DB=${NOVA_API_DB:-nova_api}
-NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
-NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
+NOVA_UWSGI=nova.wsgi.osapi_compute:application
+NOVA_METADATA_UWSGI=nova.wsgi.metadata:application
 NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini
 NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini
 
+# Allow forcing the stable compute uuid to something specific. This would be
+# done by deployment tools that pre-allocate the UUIDs, but it is also handy
+# for developers that need to re-stack a compute-only deployment multiple
+# times. Since the DB is non-local and not erased on an unstack, making it
+# stay the same each time is what developers want. Set to a uuid here or
+# leave it blank for default allocate-on-start behavior.
+NOVA_CPU_UUID=""
+
 # The total number of cells we expect. Must be greater than one and doesn't
 # count cell0.
 NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1}
@@ -240,8 +248,8 @@
 
     stop_process "n-api"
     stop_process "n-api-meta"
-    remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
-    remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
+    remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api"
+    remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata"
 
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
         clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME
@@ -541,11 +549,11 @@
     iniset $NOVA_CONF upgrade_levels compute "auto"
 
     if is_service_enabled n-api; then
-        write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+        write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api"
     fi
 
     if is_service_enabled n-api-meta; then
-        write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+        write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata"
     fi
 
     if is_service_enabled ceilometer; then
@@ -1046,7 +1054,7 @@
 
     # Set rebuild timeout longer for BFV instances because we likely have
     # slower disk than expected. Default is 20s/GB
-    iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 60
+    iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180
 
     # Configure the OVSDB connection for os-vif
     if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
@@ -1058,6 +1066,10 @@
         iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True
     fi
 
+    if [[ "$NOVA_CPU_UUID" ]]; then
+        echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id
+    fi
+
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # ``sg`` is used in run_process to execute nova-compute as a member of the
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 87c3d3a..4b44c1f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -56,6 +56,10 @@
     # arm64-specific configuration
     if is_arch "aarch64"; then
         iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
+        # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is
+        #               set to `host-passthrough`, or `nova-compute` refuses to
+        #               start.
+        inidelete $NOVA_CONF libvirt cpu_model
     fi
 
     if isset ENABLE_FILE_INJECTION; then
diff --git a/lib/placement b/lib/placement
index c6bf99f..63fdfb6 100644
--- a/lib/placement
+++ b/lib/placement
@@ -68,7 +68,7 @@
 # runs that a clean run would need to clean up
 function cleanup_placement {
     sudo rm -f $(apache_site_config_for placement-api)
-    remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
+    remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api"
 }
 
 # _config_placement_apache_wsgi() - Set WSGI config files
diff --git a/lib/tempest b/lib/tempest
index 2f62f6e..6bd203e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -29,6 +29,7 @@
 # - ``DEFAULT_INSTANCE_USER``
 # - ``DEFAULT_INSTANCE_ALT_USER``
 # - ``CINDER_ENABLED_BACKENDS``
+# - ``CINDER_BACKUP_DRIVER``
 # - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
 #
 # ``stack.sh`` calls the entry points in this order:
@@ -571,6 +572,9 @@
         TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
     fi
     iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
+    if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then
+        iniset $TEMPEST_CONFIG volume backup_driver swift
+    fi
     local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
     local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
     if [ "$tempest_volume_min_microversion" == "None" ]; then
@@ -698,8 +702,6 @@
     # test can be run with scoped token.
     if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $TEMPEST_CONFIG enforce_scope keystone true
-        iniset $TEMPEST_CONFIG auth admin_system 'all'
-        iniset $TEMPEST_CONFIG auth admin_project_name ''
     fi
 
     if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml
deleted file mode 100644
index f815e14..0000000
--- a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-fixes:
-  - |
-    Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on
-    SQLAlchemy 2.x.
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
index f9bb0f7..51a11b6 100644
--- a/roles/capture-performance-data/tasks/main.yaml
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -3,7 +3,9 @@
     executable: /bin/bash
     cmd: |
       source {{ devstack_conf_dir }}/stackrc
-      python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+      source {{ devstack_conf_dir }}/inc/python
+      setup_devstack_virtualenv
+      $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \
         --db-user="$DATABASE_USER" \
         --db-pass="$DATABASE_PASSWORD" \
         --db-host="$DATABASE_HOST" \
diff --git a/stack.sh b/stack.sh
index 530fda4..0c36e10 100755
--- a/stack.sh
+++ b/stack.sh
@@ -421,8 +421,12 @@
     # 1. the hostname package is not installed by default
     # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel
     # 3. python3-pip can be uninstalled by `get_pip.py` automaticly.
-    install_package hostname openstack-release-wallaby
+    # 4. Ensure wget installation before use
+    install_package hostname openstack-release-wallaby wget
     uninstall_package python3-pip
+
+    # Add yum repository for libvirt7.X
+    sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo
 fi
 
 # Ensure python is installed
@@ -607,6 +611,12 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
+# load host tuning functions and defaults
+source $TOP_DIR/lib/host
+# tune host memory early to ensure zswap/ksm are configured before
+# doing memory intensive operation like cloning repos or unpacking packages.
+tune_host
+
 # Configure Projects
 # ==================
 
@@ -821,6 +831,7 @@
 
 if [[ "$GLOBAL_VENV" == "True" ]] ; then
     # TODO(frickler): find a better solution for this
+    sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin
     sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin
     sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin
     sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin
@@ -1011,6 +1022,9 @@
     setup_dev_lib "python-openstackclient"
 else
     pip_install_gr python-openstackclient
+    if is_service_enabled openstack-cli-server; then
+        install_openstack_cli_server
+    fi
 fi
 
 # Installs alias for osc so that we can collect timing for all
@@ -1075,22 +1089,6 @@
 # Save configuration values
 save_stackenv $LINENO
 
-# Kernel Samepage Merging (KSM)
-# -----------------------------
-
-# Processes that mark their memory as mergeable can share identical memory
-# pages if KSM is enabled. This is particularly useful for nova + libvirt
-# backends but any other setup that marks its memory as mergeable can take
-# advantage. The drawback is there is higher cpu load; however, we tend to
-# be memory bound not cpu bound so enable KSM by default but allow people
-# to opt out if the CPU time is more important to them.
-
-if [[ $ENABLE_KSM == "True" ]] ; then
-    if [[ -f /sys/kernel/mm/ksm/run ]] ; then
-        sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run"
-    fi
-fi
-
 
 # Start Services
 # ==============
diff --git a/stackrc b/stackrc
index ff30d37..de81f01 100644
--- a/stackrc
+++ b/stackrc
@@ -121,15 +121,6 @@
     SYSTEMCTL="sudo systemctl"
 fi
 
-
-# Whether or not to enable Kernel Samepage Merging (KSM) if available.
-# This allows programs that mark their memory as mergeable to share
-# memory pages if they are identical. This is particularly useful with
-# libvirt backends. This reduces memory usage at the cost of CPU overhead
-# to scan memory. We default to enabling it because we tend to be more
-# memory constrained than CPU bound.
-ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
-
 # Passwords generated by interactive devstack runs
 if [[ -r $RC_DIR/.localrc.password ]]; then
     source $RC_DIR/.localrc.password
@@ -207,8 +198,9 @@
 USE_VENV=$(trueorfalse False USE_VENV)
 
 # Add packages that need to be installed into a venv but are not in any
-# requirmenets files here, in a comma-separated list
-ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
+# requirements files here, in a comma-separated list.
+# Currently only used when USE_VENV is true (individual project venvs)
+ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}
 
 # This can be used to turn database query logging on and off
 # (currently only implemented for MySQL backend)
@@ -265,7 +257,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="2023.2"
+DEVSTACK_SERIES="2024.2"
 
 ##############
 #
@@ -588,28 +580,6 @@
 GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
 GITDIR["os-ken"]=$DEST/os-ken
 
-##################
-#
-#  TripleO / Heat Agent Components
-#
-##################
-
-# run-parts script required by os-refresh-config
-DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
-DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
-
-# os-apply-config configuration template tool
-OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
-OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-collect-config configuration agent
-OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
-OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-refresh-config configuration run-parts tool
-ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
-ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
 
 #################
 #
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 1cacd06..bc28515 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -73,8 +73,11 @@
 s = requests.Session()
 # sometimes gitea gives us a 500 error; retry sanely
 #  https://stackoverflow.com/a/35636367
+# We need to disable raise_on_status because if any repo endup with 500 then
+# propose-updates job which run this script will fail.
 retries = Retry(total=3, backoff_factor=1,
-                status_forcelist=[ 500 ])
+                status_forcelist=[ 500 ],
+                raise_on_status=False)
 s.mount('https://', HTTPAdapter(max_retries=retries))
 
 found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
diff --git a/tools/worlddump.py b/tools/worlddump.py
index aadd33b..edbfa26 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -51,7 +51,7 @@
 
 
 def filename(dirname, name=""):
-    now = datetime.datetime.utcnow()
+    now = datetime.datetime.now(datetime.timezone.utc)
     fmt = "worlddump-%Y-%m-%d-%H%M%S"
     if name:
         fmt += "-" + name
diff --git a/tox.ini b/tox.ini
index ec764ab..26cd68c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@
 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
 # modified bashate tree
 deps =
-   {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
+   {env:BASHATE_INSTALL_PATH:bashate}
 allowlist_externals = bash
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .?\* -prune \)    \
diff --git a/unstack.sh b/unstack.sh
index 33b069b..1b2d8dd 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -168,6 +168,10 @@
     cleanup_etcd3
 fi
 
+if is_service_enabled openstack-cli-server; then
+    stop_service devstack@openstack-cli-server
+fi
+
 stop_dstat
 
 # NOTE: Cinder automatically installs the lvm2 package, independently of the