Merge "Global option for enforcing scope (ENFORCE_SCOPE)"
diff --git a/.zuul.yaml b/.zuul.yaml
index 0f04716..ca3e692 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -57,16 +57,6 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-centos-8-stream
-    nodes:
-      - name: controller
-        label: centos-8-stream
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
     name: devstack-single-node-centos-9-stream
     nodes:
       - name: controller
@@ -147,12 +137,12 @@
           - compute1
 
 - nodeset:
-    name: openstack-two-node-centos-8-stream
+    name: openstack-two-node-centos-9-stream
     nodes:
       - name: controller
-        label: centos-8-stream
+        label: centos-9-stream
       - name: compute1
-        label: centos-8-stream
+        label: centos-9-stream
     groups:
       # Node where tests are executed and test results collected
       - name: tempest
@@ -389,6 +379,7 @@
         '{{ devstack_log_dir }}/worlddump-latest.txt': logs
         '{{ devstack_full_log}}': logs
         '{{ stage_dir }}/verify_tempest_conf.log': logs
+        '{{ stage_dir }}/performance.json': logs
         '{{ stage_dir }}/apache': logs
         '{{ stage_dir }}/apache_config': logs
         '{{ stage_dir }}/etc': logs
@@ -661,21 +652,10 @@
 # and these platforms don't have the round-the-clock support to avoid
 # becoming blockers in that situation.
 - job:
-    name: devstack-platform-centos-8-stream
-    parent: tempest-full-py3
-    description: CentOS 8 Stream platform test
-    nodeset: devstack-single-node-centos-8-stream
-    voting: false
-    timeout: 9000
-    vars:
-      configure_swap_size: 4096
-
-- job:
     name: devstack-platform-centos-9-stream
     parent: tempest-full-py3
     description: CentOS 9 Stream platform test
     nodeset: devstack-single-node-centos-9-stream
-    voting: false
     timeout: 9000
     vars:
       configure_swap_size: 4096
@@ -844,10 +824,8 @@
         - devstack-ipv6
         - devstack-enforce-scope
         - devstack-platform-fedora-latest
-        - devstack-platform-centos-8-stream
         - devstack-platform-centos-9-stream
         - devstack-platform-debian-bullseye
-        - devstack-platform-openEuler-20.03-SP2
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
@@ -891,6 +869,7 @@
       jobs:
         - devstack
         - devstack-ipv6
+        - devstack-platform-centos-9-stream
         - devstack-enforce-scope
         - devstack-multinode
         - devstack-unit-tests
@@ -945,6 +924,7 @@
 
     experimental:
       jobs:
+        - devstack-platform-openEuler-20.03-SP2
         - nova-multi-cell
         - nova-next
         - neutron-fullstack-with-uwsgi
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index dd8f21f..40a8725 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -279,7 +279,7 @@
 
 ::
 
-    LOGDAYS=1
+    LOGDAYS=2
 
 Some coloring is used during the DevStack runs to make it easier to
 see what is going on. This can be disabled with::
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 6850553..2e8e8f5 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -75,6 +75,7 @@
 openstack/networking-sfc                 `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
 openstack/neutron                        `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
 openstack/neutron-dynamic-routing        `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
+openstack/neutron-fwaas                  `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
 openstack/neutron-fwaas-dashboard        `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
 openstack/neutron-tempest-plugin         `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
 openstack/neutron-vpnaas                 `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template
new file mode 100644
index 0000000..dc519d7
--- /dev/null
+++ b/files/lvm-backing-file.template
@@ -0,0 +1,16 @@
+[Unit]
+Description=Activate LVM backing file %BACKING_FILE%
+DefaultDependencies=no
+After=systemd-udev-settle.service
+Before=lvm2-activation-early.service
+Wants=systemd-udev-settle.service
+
+[Service]
+ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE%
+ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)'
+RemainAfterExit=yes
+Type=oneshot
+
+[Install]
+WantedBy=local-fs.target
+Also=systemd-udev-settle.service
diff --git a/files/rpms/general b/files/rpms/general
index 163a7c8..668705b 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -17,6 +17,7 @@
 libxslt-devel # lxml
 libyaml-devel
 make # dist:openEuler-20.03
+mod_ssl # required for tls-proxy on centos 9 stream computes
 net-tools
 openssh-server
 openssl
diff --git a/functions-common b/functions-common
index 603e7d8..b660245 100644
--- a/functions-common
+++ b/functions-common
@@ -673,6 +673,18 @@
         fi
     fi
 
+    # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions
+    # about how we clone and work with repos.  Mark them safe globally
+    # as a work-around.
+    #
+    # NOTE(danms): On bionic (and likely others) git-config may write
+    # ~stackuser/.gitconfig if not run with sudo -H. Using --system
+    # writes these changes to /etc/gitconfig which is more
+    # discoverable anyway.
+    #
+    # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9
+    sudo git config --system --add safe.directory ${git_dest}
+
     # print out the results so we know what change was used in the logs
     cd $git_dest
     git show --oneline | head -1
diff --git a/lib/apache b/lib/apache
index f29c7ea..02827d1 100644
--- a/lib/apache
+++ b/lib/apache
@@ -27,6 +27,11 @@
 APACHE_USER=${APACHE_USER:-$STACK_USER}
 APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
 
+APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+    APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST]
+fi
+
 
 # Set up apache name and configuration directory
 # Note that APACHE_CONF_DIR is really more accurately apache's vhost
@@ -323,7 +328,7 @@
     rm -rf $file
     iniset "$file" uwsgi wsgi-file "$wsgi"
     port=$(get_random_port)
-    iniset "$file" uwsgi http-socket "127.0.0.1:$port"
+    iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
     iniset "$file" uwsgi processes $API_WORKERS
     # This is running standalone
     iniset "$file" uwsgi master true
@@ -359,7 +364,7 @@
     apache_conf=$(apache_site_config_for $name)
     echo "KeepAlive Off" | sudo tee $apache_conf
     echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
-    echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf
+    echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf
     enable_apache_site $name
     restart_apache_server
 }
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 30e4b7c..6b3ea02 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -86,10 +86,16 @@
         exit_distro_not_supported "mysql configuration"
     fi
 
-    # Start mysql-server
+    # Change bind-address from localhost (127.0.0.1) to any (::)
+    iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
+
+    # (Re)Start mysql-server
     if is_fedora || is_suse; then
         # service is not started by default
         start_service $MYSQL_SERVICE_NAME
+    elif is_ubuntu; then
+        # required since bind-address could have changed above
+        restart_service $MYSQL_SERVICE_NAME
     fi
 
     # Set the root password - only works the first time. For Ubuntu, we already
@@ -102,7 +108,7 @@
     if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
         local cmd_args="-uroot -p$DATABASE_PASSWORD "
     else
-        local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 "
+        local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST "
     fi
 
     # In mariadb e.g. on Ubuntu socket plugin is used for authentication
@@ -119,9 +125,7 @@
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
-    # Change bind-address from localhost (127.0.0.1) to any (::) and
-    # set default db type to InnoDB
-    iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
+    # Set default db type to InnoDB
     iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
     iniset -sudo $my_conf mysqld default-storage-engine InnoDB
     iniset -sudo $my_conf mysqld max_connections 1024
@@ -146,6 +150,15 @@
         iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
     fi
 
+    if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+        echo "enabling MySQL performance_schema items"
+        # Enable long query history
+        iniset -sudo $my_conf mysqld \
+               performance-schema-consumer-events-statements-history-long TRUE
+        iniset -sudo $my_conf mysqld \
+               performance_schema_events_stages_history_long_size 1000000
+    fi
+
     restart_service $MYSQL_SERVICE_NAME
 }
 
diff --git a/lib/glance b/lib/glance
index 04b9011..ba98f41 100644
--- a/lib/glance
+++ b/lib/glance
@@ -309,13 +309,13 @@
     iniset $GLANCE_API_CONF oslo_limit username glance
     iniset $GLANCE_API_CONF oslo_limit auth_type password
     iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
-    iniset $GLANCE_API_CONF oslo_limit system_scope "'all'"
+    iniset $GLANCE_API_CONF oslo_limit system_scope all
     iniset $GLANCE_API_CONF oslo_limit endpoint_id \
            $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID)
 
     # Allow the glance service user to read quotas
-    openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \
-        --system all reader
+    openstack --os-cloud devstack-system-admin role add --user glance \
+        --user-domain $SERVICE_DOMAIN_NAME --system all reader
 }
 
 # configure_glance() - Set config files, create data dirs, etc
diff --git a/lib/lvm b/lib/lvm
index b826c1b..d3f6bf1 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -53,28 +53,10 @@
     sudo vgremove -f $vg
 }
 
-# _clean_lvm_backing_file() removes the backing file of the
-# volume group
-#
-# Usage: _clean_lvm_backing_file() $backing_file
-function _clean_lvm_backing_file {
-    local backing_file=$1
-
-    # If the backing physical device is a loop device, it was probably setup by DevStack
-    if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
-        local vg_dev
-        vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
-        if [[ -n "$vg_dev" ]]; then
-            sudo losetup -d $vg_dev
-        fi
-        rm -f $backing_file
-    fi
-}
-
 # clean_lvm_volume_group() cleans up the volume group and removes the
 # backing file
 #
-# Usage: clean_lvm_volume_group $vg
+# Usage: clean_lvm_volume_group() $vg
 function clean_lvm_volume_group {
     local vg=$1
 
@@ -83,11 +65,22 @@
     # if there is no logical volume left, it's safe to attempt a cleanup
     # of the backing file
     if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
-        _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX
+        local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX
+
+        if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \
+           [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+            sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service
+            sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+            sudo systemctl daemon-reload
+        fi
+
+        # If the backing physical device is a loop device, it was probably setup by DevStack
+        if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
+            rm -f $backing_file
+        fi
     fi
 }
 
-
 # _create_lvm_volume_group creates default volume group
 #
 # Usage: _create_lvm_volume_group() $vg $size
@@ -106,8 +99,20 @@
             directio="--direct-io=on"
         fi
 
+        # Only create systemd service if it doesn't already exists
+        if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+            sed -e "
+                s|%DIRECTIO%|${directio}|g;
+                s|%BACKING_FILE%|${backing_file}|g;
+            " $FILES/lvm-backing-file.template | sudo tee \
+                /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+
+            sudo systemctl daemon-reload
+            sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service
+        fi
+
         local vg_dev
-        vg_dev=$(sudo losetup -f --show $directio $backing_file)
+        vg_dev=$(sudo losetup --associated $backing_file -O NAME -n)
 
         # Only create volume group if it doesn't already exist
         if ! sudo vgs $vg; then
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 09b28b6..927896b 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -253,7 +253,12 @@
 
     local testcmd="test -e $OVS_RUNDIR/$service.pid"
     test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
-    sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info
+    local service_ctl_file
+    service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+    if [ -z "$service_ctl_file" ]; then
+        die $LINENO "ctl file for service $service is not present."
+    fi
+    sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
 }
 
 function clone_repository {
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 8acf586..cc41a8c 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -68,7 +68,7 @@
 function _neutron_ovs_base_install_agent_packages {
     if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then
         remove_ovs_packages
-        compile_ovs False /usr /var
+        compile_ovs False /usr/local /var
         load_conntrack_gre_module
         start_new_ovs
     else
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 9c87dce..9ae5555 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -188,12 +188,12 @@
 # start_new_ovs() - removes old ovs database, creates a new one and starts ovs
 function start_new_ovs {
     sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~
-    sudo /usr/share/openvswitch/scripts/ovs-ctl start
+    sudo /usr/local/share/openvswitch/scripts/ovs-ctl start
 }
 
 # stop_new_ovs() - stops ovs
 function stop_new_ovs {
-    local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl'
+    local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl'
 
     if [ -x $ovs_ctl ] ; then
         sudo $ovs_ctl stop
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index cd98115..fbd4692 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -147,10 +147,6 @@
 }
 
 function create_neutron_initial_network {
-    local project_id
-    project_id=$(openstack project list | grep " demo " | get_field 1)
-    die_if_not_set $LINENO project_id "Failure retrieving project_id for demo"
-
     # Allow drivers that need to create an initial network to do so here
     if type -p neutron_plugin_create_initial_network_profile > /dev/null; then
         neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK
@@ -171,14 +167,14 @@
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
         NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
-        die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
+        die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
             if [ -z $SUBNETPOOL_V4_ID ]; then
                 fixed_range_v4=$FIXED_RANGE
             fi
             SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
-            die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
+            die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME"
         fi
 
         if [[ "$IP_VERSION" =~ .*6 ]]; then
@@ -188,7 +184,7 @@
                 fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
             fi
             IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
-            die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
+            die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME"
         fi
 
         if [[ $Q_AGENT == "openvswitch" ]]; then
@@ -198,16 +194,16 @@
         fi
     else
         NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
-        die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
+        die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
             # Create IPv4 private subnet
-            SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id)
+            SUBNET_ID=$(_neutron_create_private_subnet_v4)
         fi
 
         if [[ "$IP_VERSION" =~ .*6 ]]; then
             # Create IPv6 private subnet
-            IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id)
+            IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6)
         fi
     fi
 
@@ -216,11 +212,11 @@
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
             ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
-            die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
+            die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
         else
             # Plugin only supports creating a single router, which should be admin owned.
             ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
-            die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
+            die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
         fi
 
         EXTERNAL_NETWORK_FLAGS="--external"
@@ -249,7 +245,6 @@
 
 # Create private IPv4 subnet
 function _neutron_create_private_subnet_v4 {
-    local project_id=$1
     if [ -z $SUBNETPOOL_V4_ID ]; then
         fixed_range_v4=$FIXED_RANGE
     fi
@@ -263,13 +258,12 @@
     subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
     local subnet_id
     subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
+    die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet"
     echo $subnet_id
 }
 
 # Create private IPv6 subnet
 function _neutron_create_private_subnet_v6 {
-    local project_id=$1
     die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
     die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
     local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
@@ -285,7 +279,7 @@
     subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
     local ipv6_subnet_id
     ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
+    die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet"
     echo $ipv6_subnet_id
 }
 
@@ -409,7 +403,10 @@
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
             local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
 
-            # Configure interface for public bridge
+            # Configure interface for public bridge by setting the interface
+            # to "up" in case the job is running entirely private network based
+            # testing.
+            sudo ip link set $ext_gw_interface up
             sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
             # Any IPv6 private subnet that uses the default IPV6 subnet pool
             # and that is plugged into the default router (Q_ROUTER_NAME) will
diff --git a/lib/nova b/lib/nova
index 4f98d4d..4c14374 100644
--- a/lib/nova
+++ b/lib/nova
@@ -159,6 +159,9 @@
 # image in devstack is CirrOS.
 NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0}
 
+# Whether to use Keystone unified limits instead of legacy quota limits.
+NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS)
+
 # Functions
 # ---------
 
@@ -394,6 +397,13 @@
             "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
             "http://$SERVICE_HOST:$S3_SERVICE_PORT"
     fi
+
+    # Unified limits
+    if is_service_enabled n-api; then
+        if [[ "$NOVA_USE_UNIFIED_LIMITS" = True ]]; then
+            configure_nova_unified_limits
+        fi
+    fi
 }
 
 # create_nova_conf() - Create a new nova.conf file
@@ -735,6 +745,53 @@
     fi
 }
 
+function configure_nova_unified_limits {
+    # Registered limit resources in keystone are system-specific resources.
+    # Make sure we use a system-scoped token to interact with this API.
+
+    # Default limits here mirror the legacy config-based default values.
+    # Note: disk quota is new in nova as of unified limits.
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 10 --region $REGION_NAME servers
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 20 --region $REGION_NAME class:VCPU
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 128 --region $REGION_NAME server_metadata_items
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 5 --region $REGION_NAME server_injected_files
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 100 --region $REGION_NAME server_key_pairs
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 10 --region $REGION_NAME server_groups
+    openstack --os-cloud devstack-system-admin registered limit create \
+        --service nova --default-limit 10 --region $REGION_NAME server_group_members
+
+    # Tell nova to use these limits
+    iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver"
+
+    # Configure oslo_limit so it can talk to keystone
+    iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME
+    iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD
+    iniset $NOVA_CONF oslo_limit username nova
+    iniset $NOVA_CONF oslo_limit auth_type password
+    iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
+    iniset $NOVA_CONF oslo_limit system_scope all
+    iniset $NOVA_CONF oslo_limit endpoint_id \
+           $(openstack endpoint list --service nova -f value -c ID)
+
+    # Allow the nova service user to read quotas
+    openstack --os-cloud devstack-system-admin role add --user nova \
+        --user-domain $SERVICE_DOMAIN_NAME --system all reader
+}
+
 function init_nova_service_user_conf {
     iniset $NOVA_CONF service_user send_service_user_token True
     iniset $NOVA_CONF service_user auth_type password
@@ -988,7 +1045,6 @@
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
-    local api_cell_conf=$NOVA_CONF
     local compute_cell_conf=$NOVA_CONF
 
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
diff --git a/lib/tls b/lib/tls
index b3cc0b4..5a7f5ae 100644
--- a/lib/tls
+++ b/lib/tls
@@ -169,7 +169,7 @@
 
 [ req ]
 default_bits            = 1024
-default_md              = sha1
+default_md              = sha256
 
 prompt                  = no
 distinguished_name      = req_distinguished_name
@@ -261,7 +261,7 @@
     if [ ! -r "$ca_dir/$cert_name.crt" ]; then
         # Generate a signing request
         $OPENSSL req \
-            -sha1 \
+            -sha256 \
             -newkey rsa \
             -nodes \
             -keyout $ca_dir/private/$cert_name.key \
@@ -301,7 +301,7 @@
     if [ ! -r "$ca_dir/cacert.pem" ]; then
         # Create a signing certificate request
         $OPENSSL req -config $ca_dir/ca.conf \
-            -sha1 \
+            -sha256 \
             -newkey rsa \
             -nodes \
             -keyout $ca_dir/private/cacert.key \
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 9e66f20..d8d5f68 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -20,6 +20,9 @@
   roles:
     - export-devstack-journal
     - apache-logs-conf
+    # This should run as early as possible to make sure we don't skew
+    # the post-tempest results with other activities.
+    - capture-performance-data
     - devstack-project-conf
     # capture-system-logs should be the last role before stage-output
     - capture-system-logs
diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst
new file mode 100644
index 0000000..b7a37c2
--- /dev/null
+++ b/roles/capture-performance-data/README.rst
@@ -0,0 +1,25 @@
+Generate performance logs for staging
+
+Captures usage information from mysql, systemd, apache logs, and other
+parts of the system and generates a performance.json file in the
+staging directory.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory
+
+.. zuul:rolevar:: devstack_conf_dir
+   :default: /opt/stack
+
+   The base devstack destination directory
+
+.. zuul:rolevar:: debian_suse_apache_deref_logs
+
+   The apache logs found in the debian/suse locations
+
+.. zuul:rolevar:: redhat_apache_deref_logs
+
+   The apache logs found in the redhat locations
diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/roles/capture-performance-data/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_conf_dir: "{{ devstack_base_dir }}"
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
new file mode 100644
index 0000000..f9bb0f7
--- /dev/null
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -0,0 +1,16 @@
+- name: Generate statistics
+  shell:
+    executable: /bin/bash
+    cmd: |
+      source {{ devstack_conf_dir }}/stackrc
+      python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+        --db-user="$DATABASE_USER" \
+        --db-pass="$DATABASE_PASSWORD" \
+        --db-host="$DATABASE_HOST" \
+        {{ apache_logs }} > {{ stage_dir }}/performance.json
+  vars:
+    apache_logs: >-
+      {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %}
+      --apache-log="{{ i.stat.path }}"
+      {% endfor %}
+  ignore_errors: yes
diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml
index 84f33f0..3adff17 100644
--- a/roles/setup-devstack-cache/tasks/main.yaml
+++ b/roles/setup-devstack-cache/tasks/main.yaml
@@ -2,6 +2,7 @@
   # This uses hard links to avoid using extra space.
   command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;"
   become: true
+  ignore_errors: yes
 
 - name: Set ownership of cached files
   file:
diff --git a/samples/local.conf b/samples/local.conf
index 8b76137..55b7298 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -49,7 +49,7 @@
 # path of the destination log file.  A timestamp will be appended to the given name.
 LOGFILE=$DEST/logs/stack.sh.log
 
-# Old log files are automatically removed after 7 days to keep things neat.  Change
+# Old log files are automatically removed after 2 days to keep things neat.  Change
 # the number of days by setting ``LOGDAYS``.
 LOGDAYS=2
 
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index a4e621f..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-[metadata]
-name = DevStack
-summary = OpenStack DevStack
-description_file =
-    README.rst
-author = OpenStack
-author_email = openstack-discuss@lists.openstack.org
-home_page = https://docs.openstack.org/devstack/latest
-classifier =
-    Intended Audience :: Developers
-    License :: OSI Approved :: Apache Software License
-    Operating System :: POSIX :: Linux
diff --git a/setup.py b/setup.py
deleted file mode 100755
index 70c2b3f..0000000
--- a/setup.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-setuptools.setup(
-    setup_requires=['pbr'],
-    pbr=True)
diff --git a/stack.sh b/stack.sh
index 0082b99..6e9ced9 100755
--- a/stack.sh
+++ b/stack.sh
@@ -67,7 +67,9 @@
 umask 022
 
 # Not all distros have sbin in PATH for regular users.
-PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin
+# osc will normally be installed at /usr/local/bin/openstack so ensure
+# /usr/local/bin is also in the path
+PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin
 
 # Keep track of the DevStack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
diff --git a/stackrc b/stackrc
index 72180d0..0c76de0 100644
--- a/stackrc
+++ b/stackrc
@@ -197,6 +197,10 @@
 # (currently only implemented for MySQL backend)
 DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
 
+# This can be used to turn on various non-default items in the
+# performance_schema that are of interest to us
+MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
+
 # Set a timeout for git operations.  If git is still running when the
 # timeout expires, the command will be retried up to 3 times.  This is
 # in the format for timeout(1);
@@ -239,7 +243,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="yoga"
+DEVSTACK_SERIES="zed"
 
 ##############
 #
@@ -667,7 +671,7 @@
 #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
 CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
-CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
+CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
 # which may be set in ``local.conf``.  Also allow ``DEFAULT_IMAGE_NAME`` and
diff --git a/tools/get-stats.py b/tools/get-stats.py
new file mode 100755
index 0000000..670e723
--- /dev/null
+++ b/tools/get-stats.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python3
+
+import argparse
+import datetime
+import glob
+import itertools
+import json
+import os
+import re
+import socket
+import subprocess
+import sys
+
+try:
+    import psutil
+except ImportError:
+    psutil = None
+    print('No psutil, process information will not be included',
+          file=sys.stderr)
+
+try:
+    import pymysql
+except ImportError:
+    pymysql = None
+    print('No pymysql, database information will not be included',
+          file=sys.stderr)
+
+# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
+
+
+def tryint(value):
+    try:
+        return int(value)
+    except (ValueError, TypeError):
+        return value
+
+
+def get_service_stats(service):
+    stats = {'MemoryCurrent': 0}
+    output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] +
+                                     ['-p%s' % stat for stat in stats])
+    for line in output.decode().split('\n'):
+        if not line:
+            continue
+        stat, val = line.split('=')
+        stats[stat] = tryint(val)
+
+    return stats
+
+
+def get_services_stats():
+    services = [os.path.basename(s) for s in
+                glob.glob('/etc/systemd/system/devstack@*.service')]
+    return [dict(service=service, **get_service_stats(service))
+            for service in services]
+
+
+def get_process_stats(proc):
+    cmdline = proc.cmdline()
+    if 'python' in cmdline[0]:
+        cmdline = cmdline[1:]
+    return {'cmd': cmdline[0],
+            'pid': proc.pid,
+            'args': ' '.join(cmdline[1:]),
+            'rss': proc.memory_info().rss}
+
+
+def get_processes_stats(matches):
+    me = os.getpid()
+    procs = psutil.process_iter()
+
+    def proc_matches(proc):
+        return me != proc.pid and any(
+            re.search(match, ' '.join(proc.cmdline()))
+            for match in matches)
+
+    return [
+        get_process_stats(proc)
+        for proc in procs
+        if proc_matches(proc)]
+
+
+def get_db_stats(host, user, passwd):
+    dbs = []
+    db = pymysql.connect(host=host, user=user, password=passwd,
+                         database='performance_schema',
+                         cursorclass=pymysql.cursors.DictCursor)
+    with db:
+        with db.cursor() as cur:
+            cur.execute(
+                'SELECT COUNT(*) AS queries,current_schema AS db FROM '
+                'events_statements_history_long GROUP BY current_schema')
+            for row in cur:
+                dbs.append({k: tryint(v) for k, v in row.items()})
+    return dbs
+
+
+def get_http_stats_for_log(logfile):
+    stats = {}
+    for line in open(logfile).readlines():
+        m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)',
+                      line)
+        if m:
+            method = m.group(1)
+            path = m.group(2)
+            status = m.group(4)
+            size = int(m.group(5))
+
+            try:
+                service, rest = path.split('/', 1)
+            except ValueError:
+                # Root calls like "GET /identity"
+                service = path
+                rest = ''
+
+            stats.setdefault(service, {'largest': 0})
+            stats[service].setdefault(method, 0)
+            stats[service][method] += 1
+            stats[service]['largest'] = max(stats[service]['largest'], size)
+
+    # Flatten this for ES
+    return [{'service': service, 'log': os.path.basename(logfile),
+             **vals}
+            for service, vals in stats.items()]
+
+
+def get_http_stats(logfiles):
+    return list(itertools.chain.from_iterable(get_http_stats_for_log(log)
+                                              for log in logfiles))
+
+
+def get_report_info():
+    return {
+        'timestamp': datetime.datetime.now().isoformat(),
+        'hostname': socket.gethostname(),
+    }
+
+
+if __name__ == '__main__':
+    process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd']
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--db-user', default='root',
+                        help=('MySQL user for collecting stats '
+                              '(default: "root")'))
+    parser.add_argument('--db-pass', default=None,
+                        help='MySQL password for db-user')
+    parser.add_argument('--db-host', default='localhost',
+                        help='MySQL hostname')
+    parser.add_argument('--apache-log', action='append', default=[],
+                        help='Collect API call stats from this apache log')
+    parser.add_argument('--process', action='append',
+                        default=process_defaults,
+                        help=('Include process stats for this cmdline regex '
+                              '(default is %s)' % ','.join(process_defaults)))
+    args = parser.parse_args()
+
+    data = {
+        'services': get_services_stats(),
+        'db': pymysql and args.db_pass and get_db_stats(args.db_host,
+                                                        args.db_user,
+                                                        args.db_pass) or [],
+        'processes': psutil and get_processes_stats(args.process) or [],
+        'api': get_http_stats(args.apache_log),
+        'report': get_report_info(),
+    }
+
+    print(json.dumps(data, indent=2))
diff --git a/unstack.sh b/unstack.sh
index 4b57b6e..813f9a8 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -181,3 +181,8 @@
 
 clean_pyc_files
 rm -Rf $DEST/async
+
+# Clean any safe.directory items we wrote into the global
+# gitconfig. We can identify the relevant ones by checking that they
+# point to somewhere in our $DEST directory.
+sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig