Merge "Neutron: Set experimental option to use linuxbridge agent"
diff --git a/.zuul.yaml b/.zuul.yaml
index 5b93a77..c29cb31 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -661,6 +661,9 @@
     description: CentOS 9 Stream platform test
     nodeset: devstack-single-node-centos-9-stream
     timeout: 9000
+    # TODO(kopecmartin) n-v until the following is resolved:
+    # https://bugs.launchpad.net/neutron/+bug/1979047
+    voting: false
     vars:
       configure_swap_size: 4096
 
@@ -669,7 +672,6 @@
     parent: tempest-full-py3
     description: Debian Bullseye platform test
     nodeset: devstack-single-node-debian-bullseye
-    voting: false
     timeout: 9000
     vars:
       configure_swap_size: 4096
@@ -896,7 +898,10 @@
       jobs:
         - devstack
         - devstack-ipv6
-        - devstack-platform-centos-9-stream
+        # TODO(kopecmartin) n-v until the following is resolved:
+        # https://bugs.launchpad.net/neutron/+bug/1979047
+        # - devstack-platform-centos-9-stream
+        - devstack-platform-debian-bullseye
         - devstack-platform-ubuntu-jammy
         - devstack-enforce-scope
         - devstack-multinode
diff --git a/functions-common b/functions-common
index f299ef1..e16bb27 100644
--- a/functions-common
+++ b/functions-common
@@ -1564,6 +1564,7 @@
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
     local extra=""
     if [[ -n "$group" ]]; then
         extra="Group=$group"
@@ -1577,6 +1578,9 @@
     iniset -sudo $unitfile "Service" "KillMode" "process"
     iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
     iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
+    if [[ -n "$env_vars" ]] ; then
+        iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+    fi
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
     fi
@@ -1591,6 +1595,7 @@
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
     local unitfile="$SYSTEMD_DIR/$service"
     mkdir -p $SYSTEMD_DIR
 
@@ -1605,6 +1610,9 @@
     iniset -sudo $unitfile "Service" "NotifyAccess" "all"
     iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
 
+    if [[ -n "$env_vars" ]] ; then
+        iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+    fi
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
     fi
@@ -1652,10 +1660,14 @@
     local systemd_service="devstack@$service.service"
     local group=$3
     local user=${4:-$STACK_USER}
+    if [[ -z "$user" ]]; then
+        user=$STACK_USER
+    fi
+    local env_vars="$5"
     if [[ "$command" =~ "uwsgi" ]] ; then
-        write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user"
+        write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
     else
-        write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+        write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
     fi
 
     $SYSTEMCTL enable $systemd_service
@@ -1676,18 +1688,20 @@
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
 # If an optional group is provided sg will be used to run the
 # command as that group.
-# run_process service "command-line" [group] [user]
+# run_process service "command-line" [group] [user] [env_vars]
+# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2"
 function run_process {
     local service=$1
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
 
     local name=$service
 
     time_start "run_process"
     if is_service_enabled $service; then
-        _run_under_systemd "$name" "$command" "$group" "$user"
+        _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars"
     fi
     time_stop "run_process"
 }
diff --git a/lib/cinder b/lib/cinder
index 52818a8..ca2c084 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -552,8 +552,13 @@
     fi
 
     run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
-    run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
-    run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+    # Tune glibc for Python Services using single malloc arena for all threads
+    # and disabling dynamic thresholds to reduce memory usage when using native
+    # threads directly or via eventlet.tpool
+    # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html
+    malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144"
+    run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning"
+    run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning"
 
     # NOTE(jdg): For cinder, startup order matters.  To ensure that repor_capabilities is received
     # by the scheduler start the cinder-volume service last (or restart it) after the scheduler
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
index e4003c0..4b18049 100644
--- a/lib/cinder_backups/ceph
+++ b/lib/cinder_backups/ceph
@@ -26,12 +26,15 @@
 
 
 function configure_cinder_backup_ceph {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
-    if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+    # Execute this part only when cephadm is not used
+    if [[ "$CEPHADM_DEPLOY" = "False" ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+        if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+        fi
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
     fi
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-    sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
     iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
     iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
diff --git a/lib/neutron b/lib/neutron
index 6e787f2..370c994 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -637,7 +637,7 @@
 # configure_rbac_policies() - Configure Neutron to enforce new RBAC
 # policies and scopes if NEUTRON_ENFORCE_SCOPE == True
 function configure_rbac_policies {
-    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then
+    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
         iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
         iniset $NEUTRON_CONF oslo_policy enforce_scope True
     else
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 24bdf92..341b84d 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -176,7 +176,7 @@
     while [ ! -f $1 ]; do
         sleep 1
         count=$((count+1))
-        if [ "$count" -gt 5 ]; then
+        if [ "$count" -gt 40 ]; then
             die $LINENO "DB File $1 not found"
         fi
     done
@@ -187,7 +187,7 @@
     while [ ! -S $1 ]; do
         sleep 1
         count=$((count+1))
-        if [ "$count" -gt 5 ]; then
+        if [ "$count" -gt 40 ]; then
             die $LINENO "Socket $1 not found"
         fi
     done
@@ -642,7 +642,7 @@
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP"
-        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname)
         # Select this chassis to host gateway routers
         if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
             sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
diff --git a/tools/get-stats.py b/tools/get-stats.py
index a3ed7f2..b958af6 100755
--- a/tools/get-stats.py
+++ b/tools/get-stats.py
@@ -111,6 +111,7 @@
     apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
                      'length', 'c', 'agent')
     ignore_agents = ('curl', 'uwsgi', 'nova-status')
+    ignored_services = set()
     for line in csv.reader(open(logfile), delimiter=' '):
         fields = dict(zip(apache_fields, line))
         if len(fields) != len(apache_fields):
@@ -146,6 +147,10 @@
             service = url.strip('/')
             rest = ''
 
+        if not service.isalpha():
+            ignored_services.add(service)
+            continue
+
         method_key = '%s-%s' % (agent, method)
         try:
             length = int(fields['length'])
@@ -159,6 +164,10 @@
         stats[service]['largest'] = max(stats[service]['largest'],
                                         length)
 
+    if ignored_services:
+        LOG.warning('Ignored services: %s' % ','.join(
+            sorted(ignored_services)))
+
     # Flatten this for ES
     return [{'service': service, 'log': os.path.basename(logfile),
              **vals}