Merge "Add support for ceph_iscsi cinder driver"
diff --git a/.zuul.yaml b/.zuul.yaml
index 94410b4..b65aeec 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -340,6 +340,7 @@
         '{{ stage_dir }}/listen53.txt': logs
         '{{ stage_dir }}/deprecations.log': logs
         '{{ stage_dir }}/audit.log': logs
+        /etc/ceph: logs
         /var/log/ceph: logs
         /var/log/openvswitch: logs
         /var/log/glusterfs: logs
@@ -584,18 +585,15 @@
     timeout: 9000
 
 - job:
-    name: devstack-platform-opensuse-15
+    name: devstack-async
     parent: tempest-full-py3
-    description: openSUSE 15.x platform test
-    nodeset: devstack-single-node-opensuse-15
+    description: Async mode enabled
     voting: false
-
-- job:
-    name: devstack-platform-bionic
-    parent: tempest-full-py3
-    description: Ubuntu Bionic platform test
-    nodeset: openstack-single-node-bionic
-    voting: false
+    vars:
+      devstack_localrc:
+        DEVSTACK_PARALLEL: True
+      zuul_copy_output:
+        /opt/stack/async: logs
 
 - job:
     name: devstack-platform-fedora-latest
@@ -685,10 +683,9 @@
       jobs:
         - devstack
         - devstack-ipv6
-        - devstack-platform-opensuse-15
         - devstack-platform-fedora-latest
         - devstack-platform-centos-8
-        - devstack-platform-bionic
+        - devstack-async
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
@@ -736,15 +733,6 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        # NOTE(gmann): Remove this job from devstack pipeline once it is
-        # migrated to zuulv3 native. This is legacy job and rely on
-        # devstack-gate + devstack setting so any change in devstack can
-        # break it.
-        - nova-live-migration:
-            voting: false
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
     gate:
       jobs:
         - devstack
diff --git a/HACKING.rst b/HACKING.rst
index f55aed8..6a91e0a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -74,8 +74,7 @@
 
 ``tools`` - Contains a collection of stand-alone scripts. While these
 may reference the top-level DevStack configuration they can generally be
-run alone. There are also some sub-directories to support specific
-environments such as XenServer.
+run alone.
 
 
 Scripts
@@ -275,9 +274,6 @@
   even years from now -- why we were motivated to make a change at the
   time.
 
-* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people
-  that should be added to reviews of various sub-systems.
-
 
 Making Changes, Testing, and CI
 -------------------------------
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
deleted file mode 100644
index d4968a6..0000000
--- a/MAINTAINERS.rst
+++ /dev/null
@@ -1,92 +0,0 @@
-MAINTAINERS
-===========
-
-
-Overview
---------
-
-The following is a list of people known to have interests in
-particular areas or sub-systems of devstack.
-
-It is a rather general guide intended to help seed the initial
-reviewers list of a change.  A +1 on a review from someone identified
-as being a maintainer of its affected area is a very positive flag to
-the core team for the veracity of the change.
-
-The ``devstack-core`` group can still be added to all reviews.
-
-
-Format
-~~~~~~
-
-The format of the file is the name of the maintainer and their
-gerrit-registered email.
-
-
-Maintainers
------------
-
-.. contents:: :local:
-
-
-Ceph
-~~~~
-
-* Sebastien Han <sebastien.han@enovance.com>
-
-Cinder
-~~~~~~
-
-Fedora/CentOS/RHEL
-~~~~~~~~~~~~~~~~~~
-
-* Ian Wienand <iwienand@redhat.com>
-
-Neutron
-~~~~~~~
-
-MidoNet
-~~~~~~~
-
-* Jaume Devesa <devvesa@gmail.com>
-* Ryu Ishimoto <ryu@midokura.com>
-* YAMAMOTO Takashi <yamamoto@midokura.com>
-
-OpenDaylight
-~~~~~~~~~~~~
-
-* Kyle Mestery <mestery@mestery.com>
-
-OpenFlow Agent (ofagent)
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
-* Fumihiko Kakuma <kakuma@valinux.co.jp>
-
-Swift
-~~~~~
-
-* Chmouel Boudjnah <chmouel@enovance.com>
-
-SUSE
-~~~~
-
-* Ralf Haferkamp <rhafer@suse.de>
-* Vincent Untz <vuntz@suse.com>
-
-Tempest
-~~~~~~~
-
-Xen
-~~~
-* Bob Ball <bob.ball@citrix.com>
-
-Zaqar (Marconi)
-~~~~~~~~~~~~~~~
-
-* Flavio Percoco <flaper87@gmail.com>
-* Malini Kamalambal <malini.kamalambal@rackspace.com>
-
-Oracle Linux
-~~~~~~~~~~~~
-* Wiekus Beukes <wiekus.beukes@oracle.com>
diff --git a/clean.sh b/clean.sh
index cb0a8b4..870dfd4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -113,7 +113,7 @@
 cleanup_database
 
 # Clean out data and status
-sudo rm -rf $DATA_DIR $DEST/status
+sudo rm -rf $DATA_DIR $DEST/status $DEST/async
 
 # Clean out the log file and log directories
 if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then
@@ -145,3 +145,5 @@
 
 rm -rf ~/.config/openstack
 
+# Clear any fstab entries made
+sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 22f5999..2d0c894 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -628,12 +628,6 @@
     INSTALL_TEMPEST=True
 
 
-Xenserver
-~~~~~~~~~
-
-If you would like to use Xenserver as the hypervisor, please refer to
-the instructions in ``./tools/xen/README.md``.
-
 Cinder
 ~~~~~~
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index a18a786..7d70d74 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -241,7 +241,7 @@
   on Ubuntu, Debian or Linux Mint.
 
 - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
-  on Red Hat, Fedora, CentOS or XenServer.
+  on Red Hat, Fedora, or CentOS.
 
 - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
   running on SUSE Linux or openSUSE.
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 4f83b36..7853520 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -196,31 +196,6 @@
 
 .. _`remote-pdb`: https://pypi.org/project/remote-pdb/
 
-Known Issues
-============
-
-Be careful about systemd python libraries. There are 3 of them on
-pypi, and they are all very different. They unfortunately all install
-into the ``systemd`` namespace, which can cause some issues.
-
-- ``systemd-python`` - this is the upstream maintained library, it has
-  a version number like systemd itself (currently ``234``). This is
-  the one you want.
-- ``systemd`` - a python 3 only library, not what you want.
-- ``python-systemd`` - another library you don't want. Installing it
-  on a system will break ansible's ability to run. The package has now
-  been renamed to ``cysystemd``, which avoids the namespace collision.
-
-
-If we were using user units, the ``[Service]`` - ``Group=`` parameter
-doesn't seem to work with user units, even though the documentation
-says that it should. This means that we will need to do an explicit
-``/usr/bin/sg``. This has the downside of making the SYSLOG_IDENTIFIER
-be ``sg``. We can explicitly set that with ``SyslogIdentifier=``, but
-it's really unfortunate that we're going to need this work
-around. This is currently not a problem because we're only using
-system units.
-
 Future Work
 ===========
 
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 15ecfe3..06c73ec 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -6,7 +6,7 @@
         source $TOP_DIR/lib/tempest
     elif [[ "$1" == "stack" && "$2" == "install" ]]; then
         echo_summary "Installing Tempest"
-        install_tempest
+        async_runfunc install_tempest
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
         # Tempest config must come after layer 2 services are running
         :
@@ -17,6 +17,7 @@
         # local.conf Tempest option overrides
         :
     elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
+        async_wait install_tempest
         echo_summary "Initializing Tempest"
         configure_tempest
         echo_summary "Installing Tempest Plugins"
diff --git a/files/debs/dstat b/files/debs/dstat
index 2b643b8..40d00f4 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1 +1,2 @@
-dstat
+dstat # dist:bionic
+pcp
diff --git a/files/debs/general b/files/debs/general
index 4bf1ff4..7e481b4 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -5,6 +5,7 @@
 curl
 default-jre-headless  # NOPRIME
 g++
+gawk
 gcc
 gettext  # used for compiling message catalogs
 git
@@ -28,6 +29,7 @@
 psmisc
 python3-dev
 python3-pip
+python3-systemd
 python3-venv
 tar
 tcpdump
diff --git a/files/debs/nova b/files/debs/nova
index a7aebbf..e194414 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -3,7 +3,6 @@
 dnsmasq-base
 dnsmasq-utils # for dhcp_release
 ebtables
-gawk
 genisoimage # required for config_drive
 iptables
 iputils-arping
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 0af2b5b..f636110 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -3,6 +3,7 @@
 bc
 ca-certificates-mozilla
 curl
+gawk
 gcc
 gcc-c++
 git-core
@@ -20,10 +21,10 @@
 pcre-devel # python-pcre
 postgresql-devel  # psycopg2
 psmisc
+python3-systemd
 python-cmd2 # dist:opensuse-12.3
 python-devel  # pyOpenSSL
 python-xml
-systemd-devel # for systemd-python
 tar
 tcpdump
 unzip
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 9923760..1cc2f62 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -4,7 +4,6 @@
 dnsmasq
 dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
 ebtables
-gawk
 iptables
 iputils
 kpartx
diff --git a/files/rpms/general b/files/rpms/general
index c42ce52..33da0a5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,6 +1,7 @@
 bc
 curl
 dbus
+gawk
 gcc
 gcc-c++
 gettext  # used for compiling message catalogs
@@ -25,8 +26,8 @@
 psmisc
 python3-devel
 python3-pip
+python3-systemd
 redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # for systemd-python
 tar
 tcpdump
 unzip
diff --git a/files/rpms/nova b/files/rpms/nova
index 2218330..8ea8ccc 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,7 +3,6 @@
 dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
 ebtables
-gawk
 genisoimage # required for config_drive
 iptables
 iputils
diff --git a/functions b/functions
index cc1ca6c..ccca5cd 100644
--- a/functions
+++ b/functions
@@ -21,6 +21,7 @@
 source ${FUNC_DIR}/inc/meta-config
 source ${FUNC_DIR}/inc/python
 source ${FUNC_DIR}/inc/rootwrap
+source ${FUNC_DIR}/inc/async
 
 # Save trace setting
 _XTRACE_FUNCTIONS=$(set +o | grep xtrace)
@@ -279,31 +280,6 @@
         return
     fi
 
-    # XenServer-vhd-ovf-format images are provided as .vhd.tgz
-    # and should not be decompressed prior to loading
-    if [[ "$image_url" =~ '.vhd.tgz' ]]; then
-        image_name="${image_fname%.vhd.tgz}"
-        local force_vm_mode=""
-        if [[ "$image_name" =~ 'cirros' ]]; then
-            # Cirros VHD image currently only boots in PV mode.
-            # Nova defaults to PV for all VHD images, but
-            # the glance setting is needed for booting
-            # directly from volume.
-            force_vm_mode="vm_mode=xen"
-        fi
-        _upload_image "$image_name" ovf vhd "$image" $force_vm_mode
-        return
-    fi
-
-    # .xen-raw.tgz suggests a Xen capable raw image inside a tgz.
-    # and should not be decompressed prior to loading.
-    # Setting metadata, so PV mode is used.
-    if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
-        image_name="${image_fname%.xen-raw.tgz}"
-        _upload_image "$image_name" tgz raw "$image" vm_mode=xen
-        return
-    fi
-
     if [[ "$image_url" =~ '.hds' ]]; then
         image_name="${image_fname%.hds}"
         vm_mode=${image_name##*-}
@@ -751,23 +727,22 @@
 fi
 
 
-# create_disk - Create backing disk
+# create_disk - Create, configure, and mount a backing disk
 function create_disk {
     local node_number
     local disk_image=${1}
     local storage_data_dir=${2}
     local loopback_disk_size=${3}
+    local key
 
-    # Create a loopback disk and format it to XFS.
-    if [[ -e ${disk_image} ]]; then
-        if egrep -q ${storage_data_dir} /proc/mounts; then
-            sudo umount ${storage_data_dir}
-            sudo rm -f ${disk_image}
-        fi
-    fi
+    key=$(echo $disk_image | sed 's#/.##')
+    key="devstack-$key"
 
-    sudo mkdir -p ${storage_data_dir}/drives/images
+    destroy_disk $disk_image $storage_data_dir
 
+    # Create an empty file of the correct size (and ensure the
+    # directory structure up to that path exists)
+    sudo mkdir -p $(dirname ${disk_image})
     sudo truncate -s ${loopback_disk_size} ${disk_image}
 
     # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
@@ -777,11 +752,31 @@
     # Swift and Ceph.
     sudo mkfs.xfs -f -i size=1024 ${disk_image}
 
-    # Mount the disk with mount options to make it as efficient as possible
-    if ! egrep -q ${storage_data_dir} /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8  \
-            ${disk_image} ${storage_data_dir}
+    # Install a new loopback fstab entry for this disk image, and mount it
+    echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab
+    sudo mkdir -p $storage_data_dir
+    sudo mount -v $storage_data_dir
+}
+
+# Unmount, de-configure, and destroy a backing disk
+function destroy_disk {
+    local disk_image=$1
+    local storage_data_dir=$2
+    local key
+
+    key=$(echo $disk_image | sed 's#/.##')
+    key="devstack-$key"
+
+    # Unmount the target, if mounted
+    if egrep -q $storage_data_dir /proc/mounts; then
+        sudo umount $storage_data_dir
     fi
+
+    # Clear any fstab rules
+    sudo sed -i '/.*comment=$key.*/ d' /etc/fstab
+
+    # Delete the file
+    sudo rm -f $disk_image
 }
 
 
diff --git a/functions-common b/functions-common
index 547f6df..340da75 100644
--- a/functions-common
+++ b/functions-common
@@ -397,8 +397,6 @@
         # Drop the . release as we assume it's compatible
         # XXX re-evaluate when we get RHEL10
         DISTRO="rhel${os_RELEASE::1}"
-    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
-        DISTRO="xs${os_RELEASE%.*}"
     else
         # We can't make a good choice here.  Setting a sensible DISTRO
         # is part of the problem, but not the major issue -- we really
@@ -452,8 +450,8 @@
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
         [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
         [ "$os_VENDOR" = "RedHatEnterprise" ] || \
-        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \
-        [ "$os_VENDOR" = "Virtuozzo" ]
+        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
+        [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
 }
 
 
@@ -1609,10 +1607,6 @@
 }
 
 
-function tail_log {
-    deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens"
-}
-
 # Plugin Functions
 # =================
 
diff --git a/inc/async b/inc/async
new file mode 100644
index 0000000..56338f5
--- /dev/null
+++ b/inc/async
@@ -0,0 +1,256 @@
+#!/bin/bash
+#
+# Symbolic asynchronous tasks for devstack
+#
+# Usage:
+#
+#  async_runfunc my_shell_func foo bar baz
+#
+#  ... do other stuff ...
+#
+#  async_wait my_shell_func
+#
+
+DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL)
+_ASYNC_BG_TIME=0
+
+# Keep track of how much total time was spent in background tasks
+# Takes a job runtime in ms.
+function _async_incr_bg_time {
+    local elapsed_ms="$1"
+    _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms))
+}
+
+# Get the PID of a named future to wait on
+function async_pidof {
+    local name="$1"
+    local inifile="${DEST}/async/${name}.ini"
+
+    if [ -f "$inifile" ]; then
+        iniget $inifile job pid
+    else
+        echo 'UNKNOWN'
+        return 1
+    fi
+}
+
+# Log a message about a job. If the message contains "%command" then the
+# full command line of the job will be substituted in the output
+function async_log {
+    local name="$1"
+    shift
+    local message="$*"
+    local inifile=${DEST}/async/${name}.ini
+    local pid
+    local command
+
+    pid=$(iniget $inifile job pid)
+    command=$(iniget $inifile job command | tr '#' '-')
+    message=$(echo "$message" | sed "s#%command#$command#g")
+
+    echo "[$BASHPID Async ${name}:${pid}]: $message"
+}
+
+# Inner function that actually runs the requested task. We wrap it like this
+# just so we can emit a finish message as soon as the work is done, to make
+# it easier to find the tracking just before an error.
+function async_inner {
+    local name="$1"
+    local rc
+    local fifo="${DEST}/async/${name}.fifo"
+    shift
+    set -o xtrace
+    if $* >${DEST}/async/${name}.log 2>&1; then
+        rc=0
+        set +o xtrace
+        async_log "$name" "finished successfully"
+    else
+        rc=$?
+        set +o xtrace
+        async_log "$name" "FAILED with rc $rc"
+    fi
+    iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N")
+    # Block on the fifo until we are signaled to exit by the main process
+    cat $fifo
+    return $rc
+}
+
+# Run something async. Takes a symbolic name and a list of arguments of
+# what to run. Ideally this would be rarely used and async_runfunc() would
+# be used everywhere for readability.
+#
+# This spawns the work in a background worker, records a "future" to be
+# collected by a later call to async_wait()
+function async_run {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local name="$1"
+    shift
+    local inifile=${DEST}/async/${name}.ini
+    local fifo=${DEST}/async/${name}.fifo
+
+    touch $inifile
+    iniset $inifile job command "$*"
+    iniset $inifile job start_time $(date +%s%3N)
+
+    if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then
+        mkfifo $fifo
+        async_inner $name $* &
+        iniset $inifile job pid $!
+        async_log "$name" "running: %command"
+        $xtrace
+    else
+        iniset $inifile job pid "self"
+        async_log "$name" "Running synchronously: %command"
+        $xtrace
+        $*
+        return $?
+    fi
+}
+
+# Shortcut for running a shell function async. Uses the function name as the
+# async name.
+function async_runfunc {
+    async_run $1 $*
+}
+
+# Dump some information to help debug a failed wait
+function async_wait_dump {
+    local failpid=$1
+
+    echo "=== Wait failure dump from $BASHPID ==="
+    echo "Processes:"
+    ps -f
+    echo "Waiting jobs:"
+    for name in $(ls ${DEST}/async/*.ini); do
+        echo "Job $name :"
+        cat "$name"
+    done
+    echo "Failed PID status:"
+    sudo cat /proc/$failpid/status
+    sudo cat /proc/$failpid/cmdline
+    echo "=== End wait failure dump ==="
+}
+
+# Wait for an async future to complete. May return immediately if already
+# complete, or of the future has already been waited on (avoid this). May
+# block until the future completes.
+function async_wait {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local pid rc running inifile runtime fifo
+    rc=0
+    for name in $*; do
+        running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l)
+        inifile="${DEST}/async/${name}.ini"
+        fifo="${DEST}/async/${name}.fifo"
+
+        if pid=$(async_pidof "$name"); then
+            async_log "$name" "Waiting for completion of %command" \
+                      "running on PID $pid ($running other jobs running)"
+            time_start async_wait
+            if [[ "$pid" != "self" ]]; then
+                # Signal the child to go ahead and exit since we are about to
+                # wait for it to collect its status.
+                async_log "$name" "Signaling child to exit"
+                echo WAKEUP > $fifo
+                async_log "$name" "Signaled"
+                # Do not actually call wait if we ran synchronously
+                if wait $pid; then
+                    rc=0
+                else
+                    rc=$?
+                fi
+                cat ${DEST}/async/${name}.log
+                rm -f $fifo
+            fi
+            time_stop async_wait
+            local start_time
+            local end_time
+            start_time=$(iniget $inifile job start_time)
+            end_time=$(iniget $inifile job end_time)
+            _async_incr_bg_time $(($end_time - $start_time))
+            runtime=$((($end_time - $start_time) / 1000))
+            async_log "$name" "finished %command with result" \
+                      "$rc in $runtime seconds"
+            rm -f $inifile
+            if [ $rc -ne 0 ]; then
+                async_wait_dump $pid
+                echo Stopping async wait due to error: $*
+                break
+            fi
+        else
+            # This could probably be removed - it is really just here
+            # to help notice if you wait for something by the wrong
+            # name, but it also shows up for things we didn't start
+            # because they were not enabled.
+            echo Not waiting for async task $name that we never started or \
+                 has already been waited for
+        fi
+    done
+
+    $xtrace
+    return $rc
+}
+
+# Check for uncollected futures and wait on them
+function async_cleanup {
+    local name
+
+    if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+        return 0
+    fi
+
+    for inifile in $(find ${DEST}/async -name '*.ini'); do
+        name=$(basename $pidfile .ini)
+        echo "WARNING: uncollected async future $name"
+        async_wait $name || true
+    done
+}
+
+# Make sure our async dir is created and clean
+function async_init {
+    local async_dir=${DEST}/async
+
+    # Clean any residue if present from previous runs
+    rm -Rf $async_dir
+
+    # Make sure we have a state directory
+    mkdir -p $async_dir
+}
+
+function async_print_timing {
+    local bg_time_minus_wait
+    local elapsed_time
+    local serial_time
+    local speedup
+
+    if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+        return 0
+    fi
+
+    # The logic here is: All the background task time would be
+    # serialized if we did not do them in the background. So we can
+    # add that to the elapsed time for the whole run. However, time we
+    # spend waiting for async things to finish adds to the elapsed
+    # time, but is time where we're not doing anything useful. Thus,
+    # we substract that from the would-be-serialized time.
+
+    bg_time_minus_wait=$((\
+            ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000))
+    elapsed_time=$(($(date "+%s") - $_TIME_BEGIN))
+    serial_time=$(($elapsed_time + $bg_time_minus_wait))
+
+    echo
+    echo "================="
+    echo " Async summary"
+    echo "================="
+    echo " Time spent in the background minus waits: $bg_time_minus_wait sec"
+    echo " Elapsed time: $elapsed_time sec"
+    echo " Time if we did everything serially: $serial_time sec"
+    echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}")
+}
diff --git a/lib/apache b/lib/apache
index 870a65a..04259ba 100644
--- a/lib/apache
+++ b/lib/apache
@@ -93,9 +93,6 @@
 
     if is_ubuntu; then
         local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
-        if [[ "$DISTRO" == 'bionic' ]]; then
-            pkg_list="${pkg_list} uwsgi-plugin-python"
-        fi
         install_package ${pkg_list}
     elif is_fedora; then
         # Note httpd comes with mod_proxy_uwsgi and it is loaded by
diff --git a/lib/cinder b/lib/cinder
index b892b91..f6fd095 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -31,6 +31,7 @@
 CINDER_DRIVER=${CINDER_DRIVER:-default}
 CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins
 CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends
+CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups
 
 # grab plugin config if specified via cinder_driver
 if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
@@ -87,17 +88,26 @@
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
-# Centos7 and OpenSUSE switched to using LIO and that's all that's supported,
-# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI
+# Default to lioadm
+CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+
+# EL and SUSE should only use lioadm
 if is_fedora || is_suse; then
-    CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
     if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
         die "lioadm is the only valid Cinder target_helper config on this platform"
     fi
-else
-    CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
 fi
 
+# For backward compatibility
+# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured
+# along with ceph backend driver.
+if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then
+    CINDER_BACKUP_DRIVER=ceph
+fi
+
+# Supported backup drivers are in lib/cinder_backups
+CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift}
+
 # Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi
 # reference should be cleaned up to more accurately refer to uwsgi.
 CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True}
@@ -113,6 +123,15 @@
     done
 fi
 
+# Source the backup driver
+if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+    if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then
+        source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER
+    else
+        die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported"
+    fi
+fi
+
 # Environment variables to configure the image-volume cache
 CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
 
@@ -189,6 +208,12 @@
         done
     fi
 
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            cleanup_cinder_backup_$CINDER_BACKUP_DRIVER
+        fi
+    fi
+
     stop_process "c-api"
     remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
 }
@@ -236,6 +261,11 @@
     iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
     iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
 
+    # Avoid RPC timeouts in slow CI and test environments by doubling the
+    # default response timeout set by RPC clients. See bug #1873234 for more
+    # details and example failures.
+    iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
+
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
         local default_name=""
@@ -250,9 +280,6 @@
                 default_name=$be_name
             fi
             enabled_backends+=$be_name,
-
-            iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR
-
         done
         iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
         if [[ -n "$default_name" ]]; then
@@ -261,13 +288,12 @@
         configure_cinder_image_volume_cache
     fi
 
-    if is_service_enabled c-bak; then
-        # NOTE(mriedem): The default backup driver uses swift and if we're
-        # on a subnode we might not know if swift is enabled, but chances are
-        # good that it is on the controller so configure the backup service
-        # to use it. If we want to configure the backup service to use
-        # a non-swift driver, we'll likely need environment variables.
-        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            configure_cinder_backup_$CINDER_BACKUP_DRIVER
+        else
+            die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER"
+        fi
     fi
 
     if is_service_enabled ceilometer; then
@@ -344,12 +370,6 @@
                 "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
 
-            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
-            get_or_create_endpoint \
-                "volumev2" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s"
-
             get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
             get_or_create_endpoint \
                 "volumev3" \
@@ -361,12 +381,6 @@
                 "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
 
-            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
-            get_or_create_endpoint \
-                "volumev2" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s"
-
             get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
             get_or_create_endpoint \
                 "volumev3" \
@@ -405,6 +419,12 @@
         done
     fi
 
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            init_cinder_backup_$CINDER_BACKUP_DRIVER
+        fi
+    fi
+
     mkdir -p $CINDER_STATE_PATH/volumes
 }
 
@@ -534,6 +554,14 @@
                 OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name}
             fi
         done
+
+        # Increase quota for the service project if glance is using cinder,
+        # since it's likely to occasionally go above the default 10 in parallel
+        # test execution.
+        if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+            openstack --os-region-name="$REGION_NAME" \
+                      quota set --volumes 50 "$SERVICE_PROJECT_NAME"
+        fi
     fi
 }
 
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 33c9706..0b46573 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -6,12 +6,6 @@
 # Enable with:
 #
 #   CINDER_ENABLED_BACKENDS+=,ceph:ceph
-#
-# Optional parameters:
-#   CINDER_BAK_CEPH_POOL=<pool-name>
-#   CINDER_BAK_CEPH_USER=<user>
-#   CINDER_BAK_CEPH_POOL_PG=<pg-num>
-#   CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
 
 # Dependencies:
 #
@@ -29,11 +23,6 @@
 # Defaults
 # --------
 
-CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
-CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
-CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
-CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
-
 
 # Entry Points
 # ------------
@@ -52,27 +41,6 @@
     iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
     iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
     iniset $CINDER_CONF DEFAULT glance_api_version 2
-
-    if is_service_enabled c-bak; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            # Configure Cinder backup service options, ceph pool, ceph user and ceph key
-            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
-            if [[ $CEPH_REPLICAS -ne 1 ]]; then
-                sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
-            fi
-        fi
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-
-        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
-        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
-        iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
-        iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
-        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
-        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
-        iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
-    fi
 }
 
 # Restore xtrace
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 497081c..e03ef14 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -52,7 +52,7 @@
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
     iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
-
+    iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
 }
 
 # init_cinder_backend_lvm - Initialize volume group
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
new file mode 100644
index 0000000..e4003c0
--- /dev/null
+++ b/lib/cinder_backups/ceph
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# lib/cinder_backups/ceph
+# Configure the ceph backup driver
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=ceph
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_CEPH=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+function configure_cinder_backup_ceph {
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+    if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+    sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
+    iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+    iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+    iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+    iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+    iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+}
+
+# init_cinder_backup_ceph: nothing to do
+# cleanup_cinder_backup_ceph: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift
new file mode 100644
index 0000000..6fb2486
--- /dev/null
+++ b/lib/cinder_backups/s3_swift
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# lib/cinder_backups/s3_swift
+# Configure the s3 backup driver with swift s3api
+#
+# TODO: create lib/cinder_backup/s3 for external s3 compatible storage
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=s3_swift
+#   enable_service s3api s-proxy s-object s-container s-account
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_cinder_backup_s3_swift {
+    # This configuration requires swift and s3api. If we're
+    # on a subnode we might not know if they are enabled
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT"
+}
+
+function init_cinder_backup_s3_swift {
+    openstack ec2 credential create
+    iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)"
+    iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)"
+    if is_service_enabled tls-proxy; then
+        iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE"
+    fi
+}
+
+# cleanup_cinder_backup_s3_swift: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_S3_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
new file mode 100644
index 0000000..d7c977e
--- /dev/null
+++ b/lib/cinder_backups/swift
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# lib/cinder_backups/swift
+# Configure the swift backup driver
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=swift
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+
+function configure_cinder_backup_swift {
+    # NOTE(mriedem): The default backup driver uses swift and if we're
+    # on a subnode we might not know if swift is enabled, but chances are
+    # good that it is on the controller so configure the backup service
+    # to use it.
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+}
+
+# init_cinder_backup_swift: nothing to do
+# cleanup_cinder_backup_swift: nothing to do
+
+
+# Restore xtrace
+$_XTRACE_CINDER_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS
deleted file mode 100644
index 92135e7..0000000
--- a/lib/cinder_plugins/XenAPINFS
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/XenAPINFS
-# Configure the XenAPINFS driver
-
-# Enable with:
-#
-#   CINDER_DRIVER=XenAPINFS
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
-    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
-    iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
-    iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_XENAPINFS
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/glance b/lib/glance
index c2a8b74..e789aff 100644
--- a/lib/glance
+++ b/lib/glance
@@ -130,8 +130,9 @@
 # cleanup_glance() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_glance {
-    # delete image files (glance)
-    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR
+    # delete image files (glance) and all of the glance-remote temporary
+    # storage
+    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote"
 
     # Cleanup multiple stores directories
     if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
@@ -279,10 +280,6 @@
     configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
     iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2
     iniset_rpc_backend glance $GLANCE_API_CONF
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-        iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
-        iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
-    fi
     if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then
         iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop"
     fi
@@ -365,6 +362,11 @@
 
     if [[ "$GLANCE_STANDALONE" == False ]]; then
         write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image"
+        # Grab our uwsgi listen address and use that to fill out our
+        # worker_self_reference_url config
+        iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \
+               $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \
+                    $GLANCE_UWSGI_CONF)
     else
         write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image"
         iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
@@ -460,6 +462,67 @@
     setup_develop $GLANCE_DIR
 }
 
+# glance_remote_conf() - Return the path to an alternate config file for
+#                        the remote glance clone
+function glance_remote_conf {
+    echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1")
+}
+
+# start_glance_remote_clone() - Clone the regular glance api worker
+function start_glance_remote_clone {
+    local glance_remote_conf_dir glance_remote_port remote_data
+    local glance_remote_uwsgi
+
+    glance_remote_conf_dir="$(glance_remote_conf "")"
+    glance_remote_port=$(get_random_port)
+    glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)"
+
+    # Clone the existing ready-to-go glance-api setup
+    sudo rm -Rf "$glance_remote_conf_dir"
+    sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir"
+    sudo chown $STACK_USER -R "$glance_remote_conf_dir"
+
+    # Point this worker at different data dirs
+    remote_data="${DATA_DIR}/glance-remote"
+    mkdir -p $remote_data/os_glance_tasks_store \
+          "${remote_data}/os_glance_staging_store"
+    iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \
+           filesystem_store_datadir "${remote_data}/os_glance_staging_store"
+    iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \
+           filesystem_store_datadir "${remote_data}/os_glance_tasks_store"
+
+    # Change our uwsgi to our new port
+    sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \
+        "$glance_remote_uwsgi"
+
+    # Update the self-reference url with our new port
+    iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \
+           worker_self_reference_url \
+           $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+                    "$glance_remote_uwsgi")
+
+    # We need to create the systemd service for the clone, but then
+    # change it to include an Environment line to point the WSGI app
+    # at the alternate config directory.
+    write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \
+                               --procname-prefix \
+                               glance-api-remote \
+                               --ini $glance_remote_uwsgi" \
+                               "" "$STACK_USER"
+    iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
+           "Service" "Environment" \
+           "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir"
+
+    # Reload and restart with the new config
+    $SYSTEMCTL daemon-reload
+    $SYSTEMCTL restart devstack@g-api-r
+
+    get_or_create_service glance_remote image_remote "Alternate glance"
+    get_or_create_endpoint image_remote $REGION_NAME \
+                $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+                    $glance_remote_uwsgi)
+}
+
 # start_glance() - Start running processes
 function start_glance {
     local service_protocol=$GLANCE_SERVICE_PROTOCOL
@@ -475,6 +538,11 @@
         run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
     fi
 
+    if is_service_enabled g-api-r; then
+        echo "Starting the g-api-r clone service..."
+        start_glance_remote_clone
+    fi
+
     echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
     if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then
         die $LINENO "g-api did not start"
@@ -484,6 +552,7 @@
 # stop_glance() - Stop running processes
 function stop_glance {
     stop_process g-api
+    stop_process g-api-r
 }
 
 # Restore xtrace
diff --git a/lib/keystone b/lib/keystone
index d4c7b06..66e867c 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -318,25 +318,25 @@
     local admin_role="admin"
     local member_role="member"
 
-    get_or_add_user_domain_role $admin_role $admin_user default
+    async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
 
     # Create service project/role
     get_or_create_domain "$SERVICE_DOMAIN_NAME"
-    get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
+    async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
 
     # Service role, so service users do not have to be admins
-    get_or_create_role service
+    async_run ks-service get_or_create_role service
 
     # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
     # The admin role in swift allows a user to act as an admin for their project,
     # but ResellerAdmin is needed for a user to act as any project. The name of this
     # role is also configurable in swift-proxy.conf
-    get_or_create_role ResellerAdmin
+    async_run ks-reseller get_or_create_role ResellerAdmin
 
     # another_role demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
     local another_role="anotherrole"
-    get_or_create_role $another_role
+    async_run ks-anotherrole get_or_create_role $another_role
 
     # invisible project - admin can't see this one
     local invis_project
@@ -349,10 +349,12 @@
     demo_user=$(get_or_create_user "demo" \
         "$ADMIN_PASSWORD" "default" "demo@example.com")
 
-    get_or_add_user_project_role $member_role $demo_user $demo_project
-    get_or_add_user_project_role $admin_role $admin_user $demo_project
-    get_or_add_user_project_role $another_role $demo_user $demo_project
-    get_or_add_user_project_role $member_role $demo_user $invis_project
+    async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
+
+    async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+    async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
+    async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
+    async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
 
     # alt_demo
     local alt_demo_project
@@ -361,9 +363,9 @@
     alt_demo_user=$(get_or_create_user "alt_demo" \
         "$ADMIN_PASSWORD" "default" "alt_demo@example.com")
 
-    get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
-    get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
-    get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+    async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
+    async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+    async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
 
     # groups
     local admin_group
@@ -373,11 +375,15 @@
     non_admin_group=$(get_or_create_group "nonadmins" \
         "default" "non-admin group")
 
-    get_or_add_group_project_role $member_role $non_admin_group $demo_project
-    get_or_add_group_project_role $another_role $non_admin_group $demo_project
-    get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
-    get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
-    get_or_add_group_project_role $admin_role $admin_group $admin_project
+    async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project
+    async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project
+    async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
+    async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
+    async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
+
+    async_wait ks-demo-{member,admin,another,invis}
+    async_wait ks-alt-{member,admin,another}
+    async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
 
     if is_service_enabled ldap; then
         create_ldap_domain
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 2906f15..791ff18 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -226,15 +226,17 @@
 # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
 OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
 
-default_route_dev=$(ip route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
 # With the linuxbridge agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
 # the name of the network interface to use for the physical
 # network.
 #
 # Example: ``LB_PHYSICAL_INTERFACE=eth1``
-LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev}
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+    LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
 
 # When Neutron tunnels are enabled it is needed to specify the
 # IP address of the end point in the local server. This IP is set
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 1009611..7fed8bf 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -15,6 +15,10 @@
 
 function neutron_plugin_install_agent_packages {
     _neutron_ovs_base_install_agent_packages
+    if use_library_from_git "os-ken"; then
+        git_clone_by_name "os-ken"
+        setup_dev_lib "os-ken"
+    fi
 }
 
 function neutron_plugin_configure_dhcp_agent {
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 84df918..e4d0d75 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -66,12 +66,15 @@
 
 # A UUID to uniquely identify this system.  If one is not specified, a random
 # one will be generated.  A randomly generated UUID will be saved in a file
-# 'ovn-uuid' so that the same one will be re-used if you re-run DevStack.
+# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf)
+# so that the same one will be re-used if you re-run DevStack or restart
+# Open vSwitch service.
 OVN_UUID=${OVN_UUID:-}
 
 # Whether or not to build the openvswitch kernel module from ovs.  This is required
 # unless the distro kernel includes ovs+conntrack support.
 OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES)
+OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE)
 
 # Whether or not to install the ovs python module from ovs source.  This can be
 # used to test and validate new ovs python features.  This should only be used
@@ -91,17 +94,24 @@
 OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
 OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
 
-OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+    OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+fi
 
 OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
 
-OVS_PREFIX=/usr/local
+OVS_PREFIX=
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+    OVS_PREFIX=/usr/local
+fi
 OVS_SBINDIR=$OVS_PREFIX/sbin
 OVS_BINDIR=$OVS_PREFIX/bin
 OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch
 OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch
 OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts
 OVS_DATADIR=$DATA_DIR/ovs
+OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
 
 OVN_DATADIR=$DATA_DIR/ovn
 OVN_SHAREDIR=$OVS_PREFIX/share/ovn
@@ -113,6 +123,24 @@
 
 STACK_GROUP="$( id --group --name "$STACK_USER" )"
 
+OVN_NORTHD_SERVICE=ovn-northd.service
+if is_ubuntu; then
+    # The ovn-central.service file on Ubuntu is responsible for starting
+    # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service)
+    OVN_NORTHD_SERVICE=ovn-central.service
+fi
+OVSDB_SERVER_SERVICE=ovsdb-server.service
+OVS_VSWITCHD_SERVICE=ovs-vswitchd.service
+OVN_CONTROLLER_SERVICE=ovn-controller.service
+OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+    OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service
+    OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service
+    OVN_NORTHD_SERVICE=devstack@ovn-northd.service
+    OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service
+    OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service
+fi
+
 # Defaults Overwrite
 # ------------------
 
@@ -124,14 +152,33 @@
 # this one allows empty:
 ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"}
 
+Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100}
+Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25}
+Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter}
 
 # Utility Functions
 # -----------------
 
+function wait_for_sock_file {
+    local count=0
+    while [ ! -S $1 ]; do
+        sleep 1
+        count=$((count+1))
+        if [ "$count" -gt 5 ]; then
+            die $LINENO "Socket $1 not found"
+        fi
+    done
+}
+
 function use_new_ovn_repository {
     if [ -z "$is_new_ovn" ]; then
         local ovs_repo_dir=$DEST/$OVS_REPO_NAME
         if [ ! -d $ovs_repo_dir ]; then
+            git_timed clone $OVS_REPO $ovs_repo_dir
+            pushd $ovs_repo_dir
+            git checkout $OVS_BRANCH
+            popd
+        else
             clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH
         fi
         # Check the split commit exists in the current branch
@@ -150,14 +197,14 @@
 # neutron-ovs-cleanup uses the OVSDB native interface.
 function ovn_base_setup_bridge {
     local bridge=$1
-    local addbr_cmd="ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15"
+    local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15"
 
     if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
         addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
     fi
 
     $addbr_cmd
-    ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
+    sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
 
 function _start_process {
@@ -226,12 +273,11 @@
     local ext_gw_ifc
     ext_gw_ifc=$(get_ext_gw_interface)
 
-    ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15
-    ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc
+    sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15
+    sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc
     if [ -n "$FLOATING_RANGE" ]; then
         local cidr_len=${FLOATING_RANGE#*/}
-        sudo ip addr flush dev $ext_gw_ifc
-        sudo ip addr add $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc
+        sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc
     fi
 
     # Ensure IPv6 RAs are accepted on the interface with the default route.
@@ -245,8 +291,7 @@
     sudo sysctl -w net.ipv6.conf.all.forwarding=1
     if [ -n "$IPV6_PUBLIC_RANGE" ]; then
         local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
-        sudo ip -6 addr flush dev $ext_gw_ifc
-        sudo ip -6 addr add $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc
+        sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc
     fi
 
     sudo ip link set $ext_gw_ifc up
@@ -334,34 +379,45 @@
     # Check the OVN configuration
     ovn_sanity_check
 
-    # If OVS is already installed, remove it, because we're about to re-install
-    # it from source.
-    for package in openvswitch openvswitch-switch openvswitch-common; do
-        if is_package_installed $package ; then
-            uninstall_package $package
-        fi
-    done
-
     # Install tox, used to generate the config (see devstack/override-defaults)
     pip_install tox
-    remove_ovs_packages
-    sudo rm -f $OVS_RUNDIR/*
-
-    compile_ovs $OVN_BUILD_MODULES
-    if use_new_ovn_repository; then
-        compile_ovn $OVN_BUILD_MODULES
-    fi
-
-    # Ensure that the OVS commands are accessible in the PATH
-    OVS_BINDIR=${OVS_BINDIR:-/usr/local/bin}
-    export PATH=$OVS_BINDIR:$PATH
 
     sudo mkdir -p $OVS_RUNDIR
     sudo chown $(whoami) $OVS_RUNDIR
-    sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
-    sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch
-    sudo mkdir -p $OVS_PREFIX/var/log/ovn
-    sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
+    # NOTE(lucasagomes): To keep things simpler, let's reuse the same
+    # RUNDIR for both OVS and OVN. This way we avoid having to specify the
+    # --db option in the ovn-{n,s}bctl commands while playing with DevStack
+    sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
+
+    if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+        # If OVS is already installed, remove it, because we're about to
+        # re-install it from source.
+        for package in openvswitch openvswitch-switch openvswitch-common; do
+            if is_package_installed $package ; then
+                uninstall_package $package
+            fi
+        done
+
+        remove_ovs_packages
+        sudo rm -f $OVS_RUNDIR/*
+
+        compile_ovs $OVN_BUILD_MODULES
+        if use_new_ovn_repository; then
+            compile_ovn $OVN_BUILD_MODULES
+        fi
+
+        sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
+        sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch
+        sudo mkdir -p $OVS_PREFIX/var/log/ovn
+        sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
+    else
+        fixup_ovn_centos
+        install_package $(get_packages openvswitch)
+        install_package $(get_packages ovn)
+    fi
+
+    # Ensure that the OVS commands are accessible in the PATH
+    export PATH=$OVS_BINDIR:$PATH
 
     # Archive log files and create new
     local log_archive_dir=$LOGDIR/archive
@@ -438,6 +494,12 @@
         populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP"
         inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver
 
+        if is_service_enabled q-log neutron-log; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT"
+            inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
+        fi
+
         if is_service_enabled q-ovn-metadata-agent; then
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
         else
@@ -469,12 +531,26 @@
     echo "Configuring OVN"
 
     if [ -z "$OVN_UUID" ] ; then
-        if [ -f ./ovn-uuid ] ; then
-            OVN_UUID=$(cat ovn-uuid)
+        if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then
+            OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf)
         else
             OVN_UUID=$(uuidgen)
-            echo $OVN_UUID > ovn-uuid
+            echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
         fi
+    else
+        local ovs_uuid
+        ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf)
+        if [ "$ovs_uuid" != $OVN_UUID ]; then
+            echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
+        fi
+    fi
+
+    # Erase the pre-set configurations from packages. DevStack will
+    # configure OVS and OVN accordingly for its use.
+    if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then
+        sudo truncate -s 0 /etc/openvswitch/default.conf
+        sudo truncate -s 0 /etc/sysconfig/openvswitch
+        sudo truncate -s 0 /etc/sysconfig/ovn
     fi
 
     # Metadata
@@ -491,7 +567,7 @@
         iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
         iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
         iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
-        iniset $OVN_META_CONF ovs ovsdb_connection unix:$OVS_RUNDIR/db.sock
+        iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
         iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
         if is_service_enabled tls-proxy; then
             iniset $OVN_META_CONF ovn \
@@ -530,51 +606,58 @@
         enable_service ovsdb-server
         enable_service ovs-vswitchd
 
-        if [ ! -f $OVS_DATADIR/conf.db ]; then
-            ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema
-        fi
-
-        if is_service_enabled ovn-controller-vtep; then
-            if [ ! -f $OVS_DATADIR/vtep.db ]; then
-                ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            if [ ! -f $OVS_DATADIR/conf.db ]; then
+                ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema
             fi
-        fi
 
-        local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file"
-        dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options"
-        if is_service_enabled ovn-controller-vtep; then
-            dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
+            if is_service_enabled ovn-controller-vtep; then
+                if [ ! -f $OVS_DATADIR/vtep.db ]; then
+                    ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema
+                fi
+            fi
+
+            local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file"
+            dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options"
+            if is_service_enabled ovn-controller-vtep; then
+                dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
+            fi
+            dbcmd+=" $OVS_DATADIR/conf.db"
+            _run_process ovsdb-server "$dbcmd"
+
+            # Note: ovn-controller will create and configure br-int once it is started.
+            # So, no need to create it now because nothing depends on that bridge here.
+            local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
+            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVSDB_SERVER_SERVICE"
+            _start_process "$OVS_VSWITCHD_SERVICE"
         fi
-        dbcmd+=" $OVS_DATADIR/conf.db"
-        _run_process ovsdb-server "$dbcmd"
 
         echo "Configuring OVSDB"
         if is_service_enabled tls-proxy; then
-            ovs-vsctl --no-wait set-ssl \
+            sudo ovs-vsctl --no-wait set-ssl \
                 $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
                 $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \
                 $INT_CA_DIR/ca-chain.pem
         fi
-        ovs-vsctl --no-wait set open_vswitch . system-type="devstack"
-        ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID"
-        ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
-        ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
-        ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
-        ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
+
+        sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST
+        sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
         # Select this chassis to host gateway routers
         if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
-            ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
+            sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
         fi
 
-        # Note: ovn-controller will create and configure br-int once it is started.
-        # So, no need to create it now because nothing depends on that bridge here.
-
-        local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
-        _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
-
         if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then
             ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE
-            ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE}
+            sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE}
         fi
 
         if is_service_enabled ovn-controller-vtep ; then
@@ -592,20 +675,20 @@
 }
 
 function _start_ovn_services {
-    _start_process "devstack@ovsdb-server.service"
-    _start_process "devstack@ovs-vswitchd.service"
+    _start_process "$OVSDB_SERVER_SERVICE"
+    _start_process "$OVS_VSWITCHD_SERVICE"
 
-    if is_service_enabled ovs-vtep ; then
-        _start_process "devstack@ovs-vtep.service"
-    fi
     if is_service_enabled ovn-northd ; then
-        _start_process "devstack@ovn-northd.service"
+        _start_process "$OVN_NORTHD_SERVICE"
     fi
     if is_service_enabled ovn-controller ; then
-        _start_process "devstack@ovn-controller.service"
+        _start_process "$OVN_CONTROLLER_SERVICE"
     fi
     if is_service_enabled ovn-controller-vtep ; then
-        _start_process "devstack@ovn-controller-vtep.service"
+        _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+    fi
+    if is_service_enabled ovs-vtep ; then
+        _start_process "devstack@ovs-vtep.service"
     fi
     if is_service_enabled q-ovn-metadata-agent; then
         _start_process "devstack@q-ovn-metadata-agent.service"
@@ -624,39 +707,47 @@
     fi
 
     if is_service_enabled ovn-northd ; then
-        if is_service_enabled tls-proxy; then
-            local tls_args="\
-                --ovn-nb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \
-                --ovn-nb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \
-                --ovn-nb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
-                --ovn-sb-db-ssl-ca-cert=$INT_CA_DIR/ca-chain.pem \
-                --ovn-sb-db-ssl-cert=$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \
-                --ovn-sb-db-ssl-key=$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
-                "
-        else
-            local tls_args=""
-        fi
-        local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor $tls_args start_northd"
-        local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
+            local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
 
-        _run_process ovn-northd "$cmd" "$stop_cmd"
-        ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
-        ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+            _run_process ovn-northd "$cmd" "$stop_cmd"
+        else
+            _start_process "$OVN_NORTHD_SERVICE"
+        fi
+
+        # Wait for the service to be ready
+        wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
+        wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+
+        if is_service_enabled tls-proxy; then
+            sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+        fi
+        sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
         sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
         sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
     fi
 
     if is_service_enabled ovn-controller ; then
-        local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
-        local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
+            local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
 
-        _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVN_CONTROLLER_SERVICE"
+        fi
     fi
 
     if is_service_enabled ovn-controller-vtep ; then
-        local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
-
-        _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
+            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+        fi
     fi
 
     if is_service_enabled q-ovn-metadata-agent; then
@@ -665,13 +756,6 @@
         setup_logging $OVN_META_CONF
     fi
 
-    # NOTE(lucasagomes): To keep things simpler, let's reuse the same
-    # RUNDIR for both OVS and OVN. This way we avoid having to specify the
-    # --db option in the ovn-{n,s}bctl commands while playing with DevStack
-    if use_new_ovn_repository; then
-        sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
-    fi
-
     _start_ovn_services
 }
 
@@ -680,26 +764,35 @@
     modprobe -q -r vport_geneve vport_vxlan openvswitch || true
 }
 
+function _stop_process {
+    local service=$1
+    echo "Stopping process $service"
+    if $SYSTEMCTL is-enabled $service; then
+        $SYSTEMCTL stop $service
+        $SYSTEMCTL disable $service
+    fi
+}
+
 function stop_ovn {
     if is_service_enabled q-ovn-metadata-agent; then
         sudo pkill -9 -f haproxy || :
-        stop_process neutron-ovn-metadata-agent
+        _stop_process "devstack@q-ovn-metadata-agent.service"
     fi
     if is_service_enabled ovn-controller-vtep ; then
-        stop_process ovn-controller-vtep
+        _stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
     fi
     if is_service_enabled ovn-controller ; then
-        stop_process ovn-controller
+        _stop_process "$OVN_CONTROLLER_SERVICE"
     fi
     if is_service_enabled ovn-northd ; then
-        stop_process ovn-northd
+        _stop_process "$OVN_NORTHD_SERVICE"
     fi
     if is_service_enabled ovs-vtep ; then
-        stop_process ovs-vtep
+        _stop_process "devstack@ovs-vtep.service"
     fi
 
-    stop_process ovs-vswitchd
-    stop_process ovsdb-server
+    _stop_process "$OVS_VSWITCHD_SERVICE"
+    _stop_process "$OVSDB_SERVER_SERVICE"
 
     _stop_ovs_dp
 }
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 69536bb..75a3567 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -101,7 +101,6 @@
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
 default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
 
 default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
 
diff --git a/lib/nova b/lib/nova
index 9d7bbd8..930529a 100644
--- a/lib/nova
+++ b/lib/nova
@@ -83,6 +83,11 @@
 # services and the compute node
 NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False}
 
+# Validate configuration
+if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then
+    die $LINENO "enabling TLS for the console proxy requires the tls-proxy service"
+fi
+
 # Public facing bits
 NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
@@ -135,7 +140,7 @@
 # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
 # user token while communicating to external RESP API's like Neutron, Cinder
 # and Glance.
-NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
+NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN)
 
 # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
 # where there are at least two nova-computes.
@@ -607,10 +612,10 @@
     # can use the NOVA_CPU_CELL variable to know which cell we are for
     # calculating the offset.
     # Stagger the offset based on the total number of possible console proxies
-    # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+    # (novnc, spice, serial) so that their ports will not collide if
     # all are enabled.
     local offset
-    offset=$(((NOVA_CPU_CELL - 1) * 4))
+    offset=$(((NOVA_CPU_CELL - 1) * 3))
 
     # Use the host IP instead of the service host because for multi-node, the
     # service host will be the controller only.
@@ -618,7 +623,7 @@
     default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip)
 
     # All nova-compute workers need to know the vnc configuration options
-    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+    # These settings don't hurt anything if n-novnc is disabled
     if is_service_enabled n-cpu; then
         if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then
             # Use the old URL when installing novnc packages.
@@ -631,13 +636,11 @@
             NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"}
         fi
         iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
-        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"}
-        iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
-        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"}
+        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"}
         iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
     fi
 
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+    if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         # Address on which instance vncservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
@@ -660,7 +663,7 @@
 
     if is_service_enabled n-sproxy; then
         iniset $NOVA_CPU_CONF serial_console enabled True
-        iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/"
+        iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/"
     fi
 }
 
@@ -669,15 +672,13 @@
     local conf=${1:-$NOVA_CONF}
     local offset=${2:-0}
     # Stagger the offset based on the total number of possible console proxies
-    # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+    # (novnc, spice, serial) so that their ports will not collide if
     # all are enabled.
-    offset=$((offset * 4))
+    offset=$((offset * 3))
 
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+    if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $conf vnc novncproxy_port $((6080 + offset))
-        iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-        iniset $conf vnc xvpvncproxy_port $((6081 + offset))
 
         if is_nova_console_proxy_compute_tls_enabled ; then
             iniset $conf vnc auth_schemes "vencrypt"
@@ -709,12 +710,12 @@
 
     if is_service_enabled n-spice; then
         iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-        iniset $conf spice html5proxy_port $((6082 + offset))
+        iniset $conf spice html5proxy_port $((6081 + offset))
     fi
 
     if is_service_enabled n-sproxy; then
         iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-        iniset $conf serial_console serialproxy_port $((6083 + offset))
+        iniset $conf serial_console serialproxy_port $((6082 + offset))
     fi
 }
 
@@ -741,30 +742,50 @@
     sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys
 }
 
+function init_nova_db {
+    local dbname="$1"
+    local conffile="$2"
+    recreate_database $dbname
+    $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell
+}
+
 # init_nova() - Initialize databases, etc.
 function init_nova {
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
+        # (Re)create nova databases
+        if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+            # If we are doing singleconductor mode, we have some strange
+            # interdependencies. in that the main config refers to cell1
+            # instead of cell0. In that case, just make sure the cell0 database
+            # is created before we need it below, but don't db_sync it until
+            # after the cellN databases are there.
+            recreate_database nova_cell0
+        else
+            async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF
+        fi
+
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i)
+        done
+
         recreate_database $NOVA_API_DB
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
 
-        recreate_database nova_cell0
-
         # map_cell0 will create the cell mapping record in the nova_api DB so
-        # this needs to come after the api_db sync happens. We also want to run
-        # this before the db sync below since that will migrate both the nova
-        # and nova_cell0 databases.
+        # this needs to come after the api_db sync happens.
         $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
 
-        # (Re)create nova databases
-        for i in $(seq 1 $NOVA_NUM_CELLS); do
-            recreate_database nova_cell${i}
-            $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell
+        # Wait for DBs to finish from above
+        for i in $(seq 0 $NOVA_NUM_CELLS); do
+            async_wait nova-cell-$i
         done
 
-        # Migrate nova and nova_cell0 databases.
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
+        if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+            # We didn't db sync cell0 above, so run it now
+            $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
+        fi
 
         # Run online migrations on the new databases
         # Needed for flavor conversion
@@ -906,6 +927,11 @@
     # by the compute process.
     configure_console_compute
 
+    # Configure the OVSDB connection for os-vif
+    if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
+        iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
+    fi
+
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # ``sg`` is used in run_process to execute nova-compute as a member of the
@@ -956,7 +982,7 @@
 
 function enable_nova_console_proxies {
     for i in $(seq 1 $NOVA_NUM_CELLS); do
-        for srv in n-novnc n-xvnc n-spice n-sproxy; do
+        for srv in n-novnc n-spice n-sproxy; do
             if is_service_enabled $srv; then
                 enable_service ${srv}-cell${i}
             fi
@@ -974,7 +1000,6 @@
     # console proxies run globally for singleconductor, else they run per cell
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
-        run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
         run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
         run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
     else
@@ -983,7 +1008,6 @@
             local conf
             conf=$(conductor_conf $i)
             run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR"
-            run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf"
             run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR"
             run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf"
         done
@@ -1028,14 +1052,6 @@
     # happen between here and the script ending. However, in multinode
     # tests this can very often not be the case. So ensure that the
     # compute is up before we move on.
-
-    # TODO(sdague): honestly, this probably should be a plug point for
-    # an external system.
-    if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
-        # xenserver encodes information in the hostname of the compute
-        # because of the dom0/domU split. Just ignore for now.
-        return
-    fi
     wait_for_compute $NOVA_READY_TIMEOUT
 }
 
@@ -1074,13 +1090,13 @@
 
 function stop_nova_console_proxies {
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
-        for srv in n-novnc n-xvnc n-spice n-sproxy; do
+        for srv in n-novnc n-spice n-sproxy; do
             stop_process $srv
         done
     else
         enable_nova_console_proxies
         for i in $(seq 1 $NOVA_NUM_CELLS); do
-            for srv in n-novnc n-xvnc n-spice n-sproxy; do
+            for srv in n-novnc n-spice n-sproxy; do
                 stop_process ${srv}-cell${i}
             done
         done
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index b25bc0c..321775d 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -100,7 +100,7 @@
 
     if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
         if is_ubuntu; then
-            install_package python-guestfs
+            install_package python3-guestfs
             # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs:
             # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725)
             INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)"
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
deleted file mode 100644
index 511ec1b..0000000
--- a/lib/nova_plugins/hypervisor-xenserver
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/bash
-#
-# lib/nova_plugins/hypervisor-xenserver
-# Configure the XenServer hypervisor
-
-# Enable with:
-# VIRT_DRIVER=xenserver
-
-# Dependencies:
-# ``functions`` file
-# ``nova`` configuration
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-_XTRACE_XENSERVER=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
-    if [ -z "$XENAPI_CONNECTION_URL" ]; then
-        die $LINENO "XENAPI_CONNECTION_URL is not specified"
-    fi
-
-    # Check os-xenapi plugin is enabled
-    local plugins="${DEVSTACK_PLUGINS}"
-    local plugin
-    local found=0
-    for plugin in ${plugins//,/ }; do
-        if [[ "$plugin" = "os-xenapi" ]]; then
-            found=1
-            break
-        fi
-    done
-    if [[ $found -ne 1 ]]; then
-        die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf"
-    fi
-
-    iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
-    iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL"
-    iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER"
-    iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD"
-    iniset $NOVA_CONF DEFAULT flat_injected "False"
-
-    local dom0_ip
-    dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
-
-    local ssh_dom0
-    ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
-
-    # install console logrotate script
-    tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
-        $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest'
-
-    # Create a cron job that will rotate guest logs
-    $ssh_dom0 crontab - << CRONTAB
-* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1
-CRONTAB
-
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
-    # xenapi functionality is now included in os-xenapi library which houses the plugin
-    # so this function intentionally left blank
-    :
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-
-# Restore xtrace
-$_XTRACE_XENSERVER
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/placement b/lib/placement
index 2a449bf..b779866 100644
--- a/lib/placement
+++ b/lib/placement
@@ -148,7 +148,6 @@
     else
         enable_apache_site placement-api
         restart_apache_server
-        tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
     fi
 
     echo "Waiting for placement-api to start..."
diff --git a/lib/swift b/lib/swift
index a981dfc..790fb99 100644
--- a/lib/swift
+++ b/lib/swift
@@ -741,7 +741,9 @@
 
 function install_swift {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
-    setup_develop $SWIFT_DIR
+    # keystonemiddleware needs to be installed via keystone extras as defined
+    # in setup.cfg, see bug #1909018 for more details.
+    setup_develop $SWIFT_DIR keystone
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
         install_apache_wsgi
     fi
diff --git a/lib/tempest b/lib/tempest
index 9f2ec30..d835c68 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -27,6 +27,7 @@
 # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
 # - ``DEFAULT_INSTANCE_TYPE``
 # - ``DEFAULT_INSTANCE_USER``
+# - ``DEFAULT_INSTANCE_ALT_USER``
 # - ``CINDER_ENABLED_BACKENDS``
 # - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
 #
@@ -110,6 +111,21 @@
     echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
 }
 
+function set_tempest_venv_constraints {
+    local tmp_c
+    tmp_c=$1
+    if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
+        (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+    else
+        echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
+        cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
+        # NOTE: setting both tox env var and once Tempest start using new var
+        # TOX_CONSTRAINTS_FILE then we can remove the old one.
+        export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+        export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+    fi
+}
+
 # configure_tempest() - Set config files, create data dirs, etc
 function configure_tempest {
     if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -346,10 +362,12 @@
     if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
         iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
     fi
-    if [ "$VIRT_DRIVER" = "xenserver" ]; then
-        iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
-    fi
     iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW
+    iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True
+    if is_service_enabled g-api-r; then
+        iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote
+    fi
+
     # Compute
     iniset $TEMPEST_CONFIG compute image_ref $image_uuid
     iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt
@@ -423,15 +441,8 @@
     iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
 
     # Scenario
-    if [ "$VIRT_DRIVER" = "xenserver" ]; then
-        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
-        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
-        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
-        iniset $TEMPEST_CONFIG scenario img_container_format ovf
-    else
-        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
-        SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
-    fi
+    SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+    SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
     iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE
 
     # If using provider networking, use the physical network for validation rather than private
@@ -443,17 +454,11 @@
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True}
     iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
     iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
+    iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros}
+    iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER}
     iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
 
     # Volume
-    # Set the service catalog entry for Tempest to run on. Typically
-    # used to try different Volume API version targets. The tempest
-    # default it to 'volumev3'(v3 APIs endpoint) , so only set this
-    # if you want to change it.
-    if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then
-        iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE
-    fi
     # Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends
     if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then
         TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True}
@@ -477,12 +482,6 @@
     iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
     local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
     local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
-    # Reset microversions to None where v2 is running which does not support microversion.
-    # Both "None" means no microversion testing.
-    if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then
-        tempest_volume_min_microversion=None
-        tempest_volume_max_microversion=None
-    fi
     if [ "$tempest_volume_min_microversion" == "None" ]; then
         inicomment $TEMPEST_CONFIG volume min_microversion
     else
@@ -573,6 +572,7 @@
             iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
             iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
         else
+            iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True
             iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True
             iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True
         fi
@@ -614,15 +614,13 @@
         tox -revenv-tempest --notest
     fi
 
-    # The requirements might be on a different branch, while tempest needs master requirements.
     local tmp_u_c_m
     tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
-    (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m
+    set_tempest_venv_constraints $tmp_u_c_m
     tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
     rm -f $tmp_u_c_m
 
     # Auth:
-    iniset $TEMPEST_CONFIG auth tempest_roles "member"
     if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
         if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
             tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
@@ -699,12 +697,20 @@
     # TEMPEST_DIR already exist until RECLONE is true.
     git checkout $TEMPEST_BRANCH
 
+    local tmp_u_c_m
+    tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+    set_tempest_venv_constraints $tmp_u_c_m
+
     tox -r --notest -efull
+    # TODO: remove the trailing pip constraint when a proper fix
+    # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
+    $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt
     # NOTE(mtreinish) Respect constraints in the tempest full venv, things that
     # are using a tox job other than full will not be respecting constraints but
     # running pip install -U on tempest requirements
-    $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
+    $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt
     PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest
+    rm -f $tmp_u_c_m
     popd
 }
 
@@ -712,10 +718,9 @@
 function install_tempest_plugins {
     pushd $TEMPEST_DIR
     if [[ $TEMPEST_PLUGINS != 0 ]] ; then
-        # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements.
         local tmp_u_c_m
         tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
-        (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m
+        set_tempest_venv_constraints $tmp_u_c_m
         tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS
         rm -f $tmp_u_c_m
         echo "Checking installed Tempest plugins:"
diff --git a/lib/tls b/lib/tls
index 861496d..b3cc0b4 100644
--- a/lib/tls
+++ b/lib/tls
@@ -570,14 +570,6 @@
     restart_apache_server
 }
 
-# Follow TLS proxy
-function follow_tls_proxy {
-    sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
-    tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
-    sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
-    tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
-}
-
 # Cleanup Functions
 # =================
 
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
index ff97a1f..68cb1d8 100644
--- a/playbooks/pre.yaml
+++ b/playbooks/pre.yaml
@@ -19,12 +19,13 @@
           {% endfor -%}
           {{- mtus|min -}}
     - name: Calculate external_bridge_mtu
-      # 50 bytes is overhead for vxlan (which is greater than GRE
+      # 30 bytes is overhead for vxlan (which is greater than GRE
       # allowing us to use either overlay option with this MTU.
+      # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay.
       # TODO(andreaf) This should work, but it may have to be reconcilied with
       # the MTU setting used by the multinode setup roles in multinode pre.yaml
       set_fact:
-        external_bridge_mtu: "{{ local_mtu | int - 50 }}"
+        external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}"
   roles:
     - configure-swap
     - setup-stack-user
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index ef839ed..db38b10 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -45,7 +45,7 @@
     cmd: |
       journalctl -o export \
           --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
-        | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
+        | gzip > {{ stage_dir }}/logs/devstack.journal.gz
 
 - name: Save journal README
   become: true
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
index fe36653..30519f6 100644
--- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -7,7 +7,7 @@
 To use it, you will need to convert it so journalctl can read it
 locally.  After downloading the file:
 
- $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal
+ $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal
 
 Note this binary is not in the regular path.  On Debian/Ubuntu
 platforms, you will need to have the "systemd-journal-remote" package
diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml
index f747943..2b8ae01 100644
--- a/roles/orchestrate-devstack/tasks/main.yaml
+++ b/roles/orchestrate-devstack/tasks/main.yaml
@@ -18,6 +18,11 @@
       name: sync-devstack-data
     when: devstack_services['tls-proxy']|default(false)
 
+  - name: Sync controller ceph.conf and key rings to subnode
+    include_role:
+      name: sync-controller-ceph-conf-and-keys
+    when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins
+
   - name: Run devstack on the sub-nodes
     include_role:
       name: run-devstack
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
index c51c66c..3ba3d9c 100644
--- a/roles/process-stackviz/tasks/main.yaml
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -1,70 +1,73 @@
-- name: Devstack checks if stackviz archive exists
-  stat:
-    path: "/opt/cache/files/stackviz-latest.tar.gz"
-  register: stackviz_archive
-
-- debug:
-    msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
-  when: not stackviz_archive.stat.exists
-
-- name: Check if subunit data exists
-  stat:
-    path: "{{ zuul_work_dir }}/testrepository.subunit"
-  register: subunit_input
-
-- debug:
-    msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
-  when: not subunit_input.stat.exists
-
-- name: Install stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
+- name: Process Stackviz
   block:
-    - include_role:
-        name: ensure-pip
 
-    - pip:
-        name: "file://{{ stackviz_archive.stat.path }}"
-        virtualenv: /tmp/stackviz
-        virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
-        extra_args: -U
+  - name: Devstack checks if stackviz archive exists
+    stat:
+      path: "/opt/cache/files/stackviz-latest.tar.gz"
+    register: stackviz_archive
 
-- name: Deploy stackviz static html+js
-  command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
+  - debug:
+      msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
+    when: not stackviz_archive.stat.exists
 
-- name: Check if dstat data exists
-  stat:
-    path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
-  register: dstat_input
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
+  - name: Check if subunit data exists
+    stat:
+      path: "{{ zuul_work_dir }}/testrepository.subunit"
+    register: subunit_input
 
-- name: Run stackviz with dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - dstat_input.stat.exists
-  failed_when: False
+  - debug:
+      msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
+    when: not subunit_input.stat.exists
 
-- name: Run stackviz without dstat
-  shell: |
-    cat {{ subunit_input.stat.path }} | \
-      /tmp/stackviz/bin/stackviz-export \
-        --env --stdin \
-        {{ stage_dir }}/stackviz/data
-  when:
-    - stackviz_archive.stat.exists
-    - subunit_input.stat.exists
-    - not dstat_input.stat.exists
-  failed_when: False
+  - name: Install stackviz
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+    block:
+      - include_role:
+          name: ensure-pip
+
+      - pip:
+          name: "file://{{ stackviz_archive.stat.path }}"
+          virtualenv: /tmp/stackviz
+          virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
+          extra_args: -U
+
+  - name: Deploy stackviz static html+js
+    command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+
+  - name: Check if dstat data exists
+    stat:
+      path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
+    register: dstat_input
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+
+  - name: Run stackviz with dstat
+    shell: |
+      cat {{ subunit_input.stat.path }} | \
+        /tmp/stackviz/bin/stackviz-export \
+          --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
+          --env --stdin \
+          {{ stage_dir }}/stackviz/data
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+      - dstat_input.stat.exists
+
+  - name: Run stackviz without dstat
+    shell: |
+      cat {{ subunit_input.stat.path }} | \
+        /tmp/stackviz/bin/stackviz-export \
+          --env --stdin \
+          {{ stage_dir }}/stackviz/data
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+      - not dstat_input.stat.exists
+
+  ignore_errors: yes
diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst
new file mode 100644
index 0000000..e3d2bb4
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/README.rst
@@ -0,0 +1,3 @@
+Sync ceph config and keys between controller and subnodes
+
+Simply copy the contents of /etc/ceph on the controller to subnodes.
diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
new file mode 100644
index 0000000..71ece57
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
@@ -0,0 +1,15 @@
+- name: Ensure /etc/ceph exists on subnode
+  become: true
+  file:
+    path: /etc/ceph
+    state: directory
+
+- name: Copy /etc/ceph from controller to subnode
+  become: true
+  synchronize:
+    owner: yes
+    group: yes
+    perms: yes
+    src: /etc/ceph/
+    dest: /etc/ceph/
+  delegate_to: controller
diff --git a/stack.sh b/stack.sh
index bb4dfa2..6858ab8 100755
--- a/stack.sh
+++ b/stack.sh
@@ -96,19 +96,25 @@
 # templates and other useful files in the ``files`` subdirectory
 FILES=$TOP_DIR/files
 if [ ! -d $FILES ]; then
-    die $LINENO "missing devstack/files"
+    set +o xtrace
+    echo "missing devstack/files"
+    exit 1
 fi
 
 # ``stack.sh`` keeps function libraries here
 # Make sure ``$TOP_DIR/inc`` directory is present
 if [ ! -d $TOP_DIR/inc ]; then
-    die $LINENO "missing devstack/inc"
+    set +o xtrace
+    echo "missing devstack/inc"
+    exit 1
 fi
 
 # ``stack.sh`` keeps project libraries here
 # Make sure ``$TOP_DIR/lib`` directory is present
 if [ ! -d $TOP_DIR/lib ]; then
-    die $LINENO "missing devstack/lib"
+    set +o xtrace
+    echo "missing devstack/lib"
+    exit 1
 fi
 
 # Check if run in POSIX shell
@@ -221,7 +227,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8"
+SUPPORTED_DISTROS="focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8"
 
 if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
@@ -330,6 +336,9 @@
     safe_chmod 0755 $DATA_DIR
 fi
 
+# Create and/or clean the async state directory
+async_init
+
 # Configure proper hostname
 # Certain services such as rabbitmq require that the local hostname resolves
 # correctly.  Make sure it exists in /etc/hosts so that is always true.
@@ -356,6 +365,9 @@
     # EPEL packages assume that the PowerTools repository is enable.
     sudo dnf config-manager --set-enabled PowerTools
 
+    # CentOS 8.3 changed the repository name to lower case.
+    sudo dnf config-manager --set-enabled powertools
+
     if [[ ${SKIP_EPEL_INSTALL} != True ]]; then
         _install_epel
     fi
@@ -706,16 +718,6 @@
 fi
 
 
-# Nova
-# -----
-
-if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
-    # Look for the backend password here because read_password
-    # is not a library function.
-    read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
-fi
-
-
 # Swift
 # -----
 
@@ -761,7 +763,6 @@
 # Install subunit for the subunit output stream
 pip_install -U os-testr
 
-pip_install_gr systemd-python
 # the default rate limit of 1000 messages / 30 seconds is not
 # sufficient given how verbose our logging is.
 iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
@@ -1083,19 +1084,19 @@
 
     create_keystone_accounts
     if is_service_enabled nova; then
-        create_nova_accounts
+        async_runfunc create_nova_accounts
     fi
     if is_service_enabled glance; then
-        create_glance_accounts
+        async_runfunc create_glance_accounts
     fi
     if is_service_enabled cinder; then
-        create_cinder_accounts
+        async_runfunc create_cinder_accounts
     fi
     if is_service_enabled neutron; then
-        create_neutron_accounts
+        async_runfunc create_neutron_accounts
     fi
     if is_service_enabled swift; then
-        create_swift_accounts
+        async_runfunc create_swift_accounts
     fi
 
 fi
@@ -1108,9 +1109,11 @@
 
 if is_service_enabled horizon; then
     echo_summary "Configuring Horizon"
-    configure_horizon
+    async_runfunc configure_horizon
 fi
 
+async_wait create_nova_accounts create_glance_accounts create_cinder_accounts
+async_wait create_neutron_accounts create_swift_accounts configure_horizon
 
 # Glance
 # ------
@@ -1118,7 +1121,7 @@
 # NOTE(yoctozepto): limited to node hosting the database which is the controller
 if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
     echo_summary "Configuring Glance"
-    init_glance
+    async_runfunc init_glance
 fi
 
 
@@ -1132,7 +1135,7 @@
 
     # Run init_neutron only on the node hosting the Neutron API server
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then
-        init_neutron
+        async_runfunc init_neutron
     fi
 fi
 
@@ -1162,7 +1165,7 @@
 
 if is_service_enabled swift; then
     echo_summary "Configuring Swift"
-    init_swift
+    async_runfunc init_swift
 fi
 
 
@@ -1171,7 +1174,7 @@
 
 if is_service_enabled cinder; then
     echo_summary "Configuring Cinder"
-    init_cinder
+    async_runfunc init_cinder
 fi
 
 # Placement Service
@@ -1179,9 +1182,16 @@
 
 if is_service_enabled placement; then
     echo_summary "Configuring placement"
-    init_placement
+    async_runfunc init_placement
 fi
 
+# Wait for neutron and placement before starting nova
+async_wait init_neutron
+async_wait init_placement
+async_wait init_glance
+async_wait init_swift
+async_wait init_cinder
+
 # Compute Service
 # ---------------
 
@@ -1193,7 +1203,7 @@
     # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
     # not, remove the if here
     if is_service_enabled neutron; then
-        configure_neutron_nova
+        async_runfunc configure_neutron_nova
     fi
 fi
 
@@ -1228,15 +1238,21 @@
 # deployments.  This ensures the keys match across nova and cinder across all
 # hosts.
 FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec}
+if is_service_enabled cinder; then
+    iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
+fi
+
+async_wait configure_neutron_nova
+
+# NOTE(clarkb): This must come after async_wait configure_neutron_nova because
+# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If
+# we don't wait then these two ini updates race either other and can result
+# in unexpected configs.
 if is_service_enabled nova; then
     iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY"
     iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY"
 fi
 
-if is_service_enabled cinder; then
-    iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
-fi
-
 # Launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
     echo_summary "Starting Nova API"
@@ -1283,7 +1299,7 @@
 if is_service_enabled nova; then
     echo_summary "Starting Nova"
     start_nova
-    create_flavors
+    async_runfunc create_flavors
 fi
 if is_service_enabled cinder; then
     echo_summary "Starting Cinder"
@@ -1332,6 +1348,8 @@
     start_horizon
 fi
 
+async_wait create_flavors
+
 
 # Create account rc files
 # =======================
@@ -1468,8 +1486,12 @@
     exec 1>&3
 fi
 
+# Make sure we didn't leak any background tasks
+async_cleanup
+
 # Dump out the time totals
 time_totals
+async_print_timing
 
 # Using the cloud
 # ===============
diff --git a/stackrc b/stackrc
index a36f897..196f61f 100644
--- a/stackrc
+++ b/stackrc
@@ -245,7 +245,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="wallaby"
+DEVSTACK_SERIES="xena"
 
 ##############
 #
@@ -298,6 +298,7 @@
 # Tempest test suite
 TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
 TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master}
 
 
 ##############
@@ -554,6 +555,11 @@
 GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH}
 GITDIR["ovsdbapp"]=$DEST/ovsdbapp
 
+# os-ken used by neutron
+GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git}
+GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
+GITDIR["os-ken"]=$DEST/os-ken
+
 ##################
 #
 #  TripleO / Heat Agent Components
@@ -605,10 +611,8 @@
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
-# also install an **LXC**, **OpenVZ** or **XenAPI** based system.  If xenserver-core
-# is installed, the default will be XenAPI
+# also install an **LXC** or **OpenVZ** based system.
 DEFAULT_VIRT_DRIVER=libvirt
-is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
 VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
 case "$VIRT_DRIVER" in
     ironic|libvirt)
@@ -633,14 +637,6 @@
     fake)
         NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
         ;;
-    xenserver)
-        # Xen config common to nova and neutron
-        XENAPI_USER=${XENAPI_USER:-"root"}
-        # This user will be used for dom0 - domU communication
-        #   should be able to log in to dom0 without a password
-        #   will be used to install the plugins
-        DOMZERO_USER=${DOMZERO_USER:-"domzero"}
-        ;;
     *)
         ;;
 esac
@@ -667,7 +663,7 @@
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
-CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
 CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -695,11 +691,6 @@
             DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
             DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME}
             IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";;
-        xenserver)
-            DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk}
-            DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
-            IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
-            IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
         fake)
             # Use the same as the default for libvirt
             DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
@@ -767,8 +758,8 @@
     fi
 done
 
-# 24Gb default volume backing file size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G}
+# 30Gb default volume backing file size
+VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G}
 
 # Prefixes for volume and instance names
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index ab7583d..5b53389 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -44,7 +44,7 @@
 ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive"
 ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
 ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
-ALL_LIBS+=" castellan python-barbicanclient ovsdbapp"
+ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh
index b2bc0a2..71d8d51 100755
--- a/tests/test_write_devstack_local_conf_role.sh
+++ b/tests/test_write_devstack_local_conf_role.sh
@@ -6,4 +6,4 @@
 source $TOP/functions
 source $TOP/tests/unittest.sh
 
-python ./roles/write-devstack-local-conf/library/test.py
+${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 3703ece..fced2ab 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -17,6 +17,8 @@
 PASS=0
 FAILED_FUNCS=""
 
+export PYTHON=$(which python3 2>/dev/null)
+
 # pass a test, printing out MSG
 #  usage: passed message
 function passed {
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
new file mode 100644
index 0000000..8ee551b
--- /dev/null
+++ b/tools/cap-pip.txt
@@ -0,0 +1 @@
+pip<20.3
diff --git a/tools/debug_function.sh b/tools/debug_function.sh
new file mode 100755
index 0000000..68bd85d
--- /dev/null
+++ b/tools/debug_function.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# This is a small helper to speed development and debug with devstack.
+# It is intended to help you run a single function in a project module
+# without having to re-stack.
+#
+# For example, to run the just start_glance function, do this:
+#
+#   ./tools/debug_function.sh glance start_glance
+
+if [ ! -f "lib/$1" ]; then
+    echo "Usage: $0 [project] [function] [function...]"
+fi
+
+source stackrc
+source lib/$1
+shift
+set -x
+while [ "$1" ]; do
+    echo ==== Running $1 ====
+    $1
+    echo ==== Done with $1 ====
+    shift
+done
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index c0e07dd..1921943 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -59,43 +59,6 @@
     fi
 }
 
-# Ubuntu Repositories
-#--------------------
-# Enable universe for bionic since it is missing when installing from ISO.
-function fixup_ubuntu {
-    if [[ "$DISTRO" != "bionic" ]]; then
-        return
-    fi
-
-    # This pulls in apt-add-repository
-    install_package "software-properties-common"
-
-    # Enable universe
-    sudo add-apt-repository -y universe
-
-    if [[ -f /etc/ci/mirror_info.sh ]] ; then
-        # If we are on a nodepool provided host and it has told us about
-        # where we can find local mirrors then use that mirror.
-        source /etc/ci/mirror_info.sh
-        sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/train main"
-    else
-        # Enable UCA:train for updated versions of QEMU and libvirt
-        sudo add-apt-repository -y cloud-archive:train
-    fi
-    REPOS_UPDATED=False
-    apt_get_update
-
-    # Since pip10, pip will refuse to uninstall files from packages
-    # that were created with distutils (rather than more modern
-    # setuptools).  This is because it technically doesn't have a
-    # manifest of what to remove.  However, in most cases, simply
-    # overwriting works.  So this hacks around those packages that
-    # have been dragged in by some other system dependency
-    sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info
-    sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
-    sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
-}
-
 # Python Packages
 # ---------------
 
@@ -184,9 +147,16 @@
     sudo zypper up -y p11-kit ca-certificates-mozilla
 }
 
+function fixup_ovn_centos {
+    if [[ $os_VENDOR != "CentOS" ]]; then
+        return
+    fi
+    # OVN packages are part of this release for CentOS
+    yum_install centos-release-openstack-victoria
+}
+
 function fixup_all {
     fixup_keystone
-    fixup_ubuntu
     fixup_fedora
     fixup_suse
 }
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 3a27c4a..81231be 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -22,7 +22,7 @@
 
 # Possible virt drivers, if we have more, add them here. Always keep
 # dummy in the end position to trigger the fall through case.
-DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
+DRIVERS="openvz ironic libvirt vsphere dummy"
 
 # Extra variables to trigger getting additional images.
 export ENABLED_SERVICES="h-api,tr-api"
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index f3fd1e2..9afd2e5 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -91,7 +91,9 @@
             die $LINENO "Download of get-pip.py failed"
         touch $LOCAL_PIP.downloaded
     fi
-    sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
+    # TODO: remove the trailing pip constraint when a proper fix
+    # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
+    sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
 }
 
 
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
index b15a0bf..1b081bb 100644
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -24,17 +24,19 @@
         # iterate over the /proc/%pid/status files manually
         try:
             s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r')
-        except EnvironmentError:
+            with s:
+                for line in s:
+                    result = LCK_SUMMARY_REGEX.search(line)
+                    if result:
+                        locked = int(result.group('locked'))
+                        if locked:
+                            mlock_users.append({'name': proc.name(),
+                                                'pid': proc.pid,
+                                                'locked': locked})
+        except OSError:
+            # pids can disappear, we're ok with that
             continue
-        with s:
-            for line in s:
-                result = LCK_SUMMARY_REGEX.search(line)
-                if result:
-                    locked = int(result.group('locked'))
-                    if locked:
-                        mlock_users.append({'name': proc.name(),
-                                            'pid': proc.pid,
-                                            'locked': locked})
+
 
     # produce a single line log message with per process mlock stats
     if mlock_users:
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
deleted file mode 100644
index 1d994a6..0000000
--- a/tools/uec/meta.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import BaseHTTPServer
-import SimpleHTTPServer
-import sys
-
-
-def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
-         ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
-    """simple http server that listens on a give address:port."""
-
-    server_address = (host, port)
-
-    HandlerClass.protocol_version = protocol
-    httpd = ServerClass(server_address, HandlerClass)
-
-    sa = httpd.socket.getsockname()
-    print("Serving HTTP on", sa[0], "port", sa[1], "...")
-    httpd.serve_forever()
-
-if __name__ == '__main__':
-    if sys.argv[1:]:
-        address = sys.argv[1]
-    else:
-        address = '0.0.0.0'
-    if ':' in address:
-        host, port = address.split(':')
-    else:
-        host = address
-        port = 8080
-
-    main(host, int(port))
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 6a618f5..22770f1 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -165,7 +165,7 @@
 
     _dump_cmd("bridge link")
     _dump_cmd("ip link show type bridge")
-    ip_cmds = ["neigh", "addr", "link", "route"]
+    ip_cmds = ["neigh", "addr", "route", "-6 route"]
     for cmd in ip_cmds + ['netns']:
         _dump_cmd("ip %s" % cmd)
     for netns_ in _netns_list():
diff --git a/tools/xen/README.md b/tools/xen/README.md
deleted file mode 100644
index 2873011..0000000
--- a/tools/xen/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there.
-
-.. _os-xenapi: https://opendev.org/x/os-xenapi/
diff --git a/tox.ini b/tox.ini
index ed28636..ec764ab 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-minversion = 1.6
+minversion = 3.18.0
 skipsdist = True
 envlist = bashate
 
@@ -13,7 +13,7 @@
 # modified bashate tree
 deps =
    {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
-whitelist_externals = bash
+allowlist_externals = bash
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .?\* -prune \)    \
          -not \( -type d -name doc -prune \)     \
@@ -34,8 +34,10 @@
          -print0 | xargs -0 bashate -v -iE006 -eE005,E042"
 
 [testenv:docs]
-deps = -r{toxinidir}/doc/requirements.txt
-whitelist_externals = bash
+deps =
+  -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+  -r{toxinidir}/doc/requirements.txt
+allowlist_externals = bash
 setenv =
   TOP_DIR={toxinidir}
 commands =
@@ -43,7 +45,7 @@
 
 [testenv:pdf-docs]
 deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
    make
 commands =
    sphinx-build -W -b latex doc/source doc/build/pdf
diff --git a/unstack.sh b/unstack.sh
index 3197cf1..d9dca7c 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -184,3 +184,4 @@
 fi
 
 clean_pyc_files
+rm -Rf $DEST/async