Merge "Configure placement section in neutron conf"
diff --git a/.zuul.yaml b/.zuul.yaml
index ca3e692..0e114af 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -17,6 +17,16 @@
- controller
- nodeset:
+ name: openstack-single-node-jammy
+ nodes:
+ - name: controller
+ label: ubuntu-jammy
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-focal
nodes:
- name: controller
@@ -398,6 +408,7 @@
'{{ stage_dir }}/rpm-qa.txt': logs
'{{ stage_dir }}/core': logs
'{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/services.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
/etc/ceph: logs
@@ -716,6 +727,72 @@
OVN_BUILD_FROM_SOURCE: True
- job:
+ name: devstack-platform-ubuntu-jammy
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test
+ nodeset: openstack-single-node-jammy
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 4096
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovn-source
+ parent: devstack-platform-ubuntu-jammy
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
+ voting: false
+ vars:
+ devstack_localrc:
+ OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovs
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+ nodeset: openstack-single-node-jammy
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 8192
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+
+- job:
name: devstack-no-tls-proxy
parent: tempest-full-py3
description: |
@@ -826,6 +903,9 @@
- devstack-platform-fedora-latest
- devstack-platform-centos-9-stream
- devstack-platform-debian-bullseye
+ - devstack-platform-ubuntu-jammy
+ - devstack-platform-ubuntu-jammy-ovn-source
+ - devstack-platform-ubuntu-jammy-ovs
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 6b3ea02..b292da2 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -151,12 +151,16 @@
fi
if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
- echo "enabling MySQL performance_schema items"
- # Enable long query history
- iniset -sudo $my_conf mysqld \
- performance-schema-consumer-events-statements-history-long TRUE
- iniset -sudo $my_conf mysqld \
- performance_schema_events_stages_history_long_size 1000000
+ echo "enabling MySQL performance counting"
+
+ # Install our sqlalchemy plugin
+ pip_install ${TOP_DIR}/tools/dbcounter
+
+ # Create our stats database for accounting
+ recreate_database stats
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+ "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+ count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
fi
restart_service $MYSQL_SERVICE_NAME
@@ -218,7 +222,17 @@
function database_connection_url_mysql {
local db=$1
- echo "$BASE_SQL_CONN/$db?charset=utf8"
+ local plugin
+
+ # NOTE(danms): We don't enable perf on subnodes yet because the
+ # plugin is not installed there
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ if is_service_enabled mysql; then
+ plugin="&plugin=dbcounter"
+ fi
+ fi
+
+ echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
}
diff --git a/lib/tempest b/lib/tempest
index 1fd4184..206b37b 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -71,6 +71,17 @@
TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them. In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
# Neutron/Network variables
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -127,6 +138,48 @@
fi
}
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images. Takes 3 arguments, an array and two
+# variables. The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value of *both* other parameters.
+function get_active_images {
+ declare -n img_array=$1
+ declare -n img_id=$2
+ declare -n img_id_alt=$3
+
+ # start with a fresh array in case we are called multiple times
+ img_array=()
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ img_id="$IMAGE_UUID"
+ img_id_alt="$IMAGE_UUID"
+ fi
+ img_array+=($IMAGE_UUID)
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+ declare -n image_array=$1
+ declare -n image_id=$2
+ declare -n image_id_alt=$3
+ local -i poll_count
+
+ poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+ while (( poll_count-- > 0 )) ; do
+ sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+ get_active_images image_array image_id image_id_alt
+ if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+ return
+ fi
+ done
+ local msg
+ msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+ msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+ warn $LINENO "$msg"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -168,13 +221,21 @@
declare -a images
if is_service_enabled glance; then
- while read -r IMAGE_NAME IMAGE_UUID; do
- if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
- image_uuid="$IMAGE_UUID"
- image_uuid_alt="$IMAGE_UUID"
+ get_active_images images image_uuid image_uuid_alt
+
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ # Glance image import is asynchronous and may be configured
+ # to do image conversion. If image import is being used,
+ # it's possible that this code is being executed before the
+ # import has completed and there may be no active images yet.
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ poll_glance_images images image_uuid image_uuid_alt
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+ exit 1
+ fi
fi
- images+=($IMAGE_UUID)
- done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ fi
case "${#images[*]}" in
0)
diff --git a/lib/tls b/lib/tls
index 5a7f5ae..b8758cd 100644
--- a/lib/tls
+++ b/lib/tls
@@ -557,7 +557,7 @@
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
</VirtualHost>
EOF
if is_suse ; then
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
- coredumps
- dns resolver
- listen53
+- services
- unbound.log
- deprecation messages
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # Services status
+ sudo systemctl status --all > services.txt 2>/dev/null
+
# NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
# failed to start due to denials from SELinux — useful for CentOS
# and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
diff --git a/stack.sh b/stack.sh
index 6e9ced9..e53280e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -229,7 +229,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
+SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -1512,6 +1512,19 @@
time_totals
async_print_timing
+if is_service_enabled mysql; then
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+ echo ""
+ echo ""
+ echo "Post-stack database query stats:"
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'SELECT * FROM queries' -t 2>/dev/null
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'DELETE FROM queries' 2>/dev/null
+ fi
+fi
+
+
# Using the cloud
# ===============
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..5057f0f
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,120 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.db_name = url.database
+ LOG.info('Registered counter for database %s' % self.db_name)
+ new_url = sqlalchemy.engine.URL.create(url.drivername,
+ url.username,
+ url.password,
+ url.host,
+ url.port,
+ 'stats')
+
+ self.engine = sqlalchemy.create_engine(new_url)
+ self.queue = queue.Queue()
+ self.thread = None
+
+ def engine_created(self, engine):
+ """Hook the engine creation process.
+
+ This is the plug point for the sqlalchemy plugin. Using
+ plugin=$this in the URL causes this method to be called when
+ the engine is created, giving us a chance to hook it below.
+ """
+ event.listen(engine, "before_cursor_execute", self._log_event)
+
+ def ensure_writer_thread(self):
+ self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+ self.thread.start()
+
+ def _log_event(self, conn, cursor, statement, parameters, context,
+ executemany):
+ """Queue a "hit" for this operation to be recorded.
+
+ Attepts to determine the operation by the first word of the
+ statement, or 'OTHER' if it cannot be determined.
+ """
+
+ # Start our thread if not running. If we were forked after the
+ # engine was created and this plugin was associated, our
+ # writer thread is gone, so respawn.
+ if not self.thread or not self.thread.is_alive():
+ self.ensure_writer_thread()
+
+ try:
+ op = statement.strip().split(' ', 1)[0] or 'OTHER'
+ except Exception:
+ op = 'OTHER'
+
+ self.queue.put((self.db_name, op))
+
+ def do_incr(self, db, op, count):
+ """Increment the counter for (db,op) by count."""
+
+ query = ('INSERT INTO queries (db, op, count) '
+ ' VALUES (%s, %s, %s) '
+ ' ON DUPLICATE KEY UPDATE count=count+%s')
+ try:
+ with self.engine.begin() as conn:
+ r = conn.execute(query, (db, op, count, count))
+ except Exception as e:
+ LOG.error('Failed to account for access to database %r: %s',
+ db, e)
+
+ def stat_writer(self):
+ """Consume messages from the queue and write them in batches.
+
+ This reads "hists" from from a queue fed by _log_event() and
+ writes (db,op)+=count stats to the database after ten seconds
+ of no activity to avoid triggering a write for every SELECT
+ call. Write no less often than every thirty seconds and/or 100
+ pending hits to avoid being starved by constant activity.
+ """
+ LOG.debug('[%i] Writer thread running' % os.getpid())
+ while True:
+ to_write = {}
+ total = 0
+ last = time.time()
+ while time.time() - last < 30 and total < 100:
+ try:
+ item = self.queue.get(timeout=10)
+ to_write.setdefault(item, 0)
+ to_write[item] += 1
+ total += 1
+ except queue.Empty:
+ break
+
+ if to_write:
+ LOG.debug('[%i] Writing DB stats %s' % (
+ os.getpid(),
+ ','.join(['%s:%s=%i' % (db, op, count)
+ for (db, op), count in to_write.items()])))
+
+ for (db, op), count in to_write.items():
+ self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..f9f26f2
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+modules = dbcounter
+entry_points =
+ [sqlalchemy.plugins]
+ dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/get-stats.py b/tools/get-stats.py
index 670e723..ffe4676 100755
--- a/tools/get-stats.py
+++ b/tools/get-stats.py
@@ -1,10 +1,12 @@
#!/usr/bin/python3
import argparse
+import csv
import datetime
import glob
import itertools
import json
+import logging
import os
import re
import socket
@@ -25,6 +27,8 @@
print('No pymysql, database information will not be included',
file=sys.stderr)
+LOG = logging.getLogger('perf')
+
# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
@@ -83,13 +87,11 @@
def get_db_stats(host, user, passwd):
dbs = []
db = pymysql.connect(host=host, user=user, password=passwd,
- database='performance_schema',
+ database='stats',
cursorclass=pymysql.cursors.DictCursor)
with db:
with db.cursor() as cur:
- cur.execute(
- 'SELECT COUNT(*) AS queries,current_schema AS db FROM '
- 'events_statements_history_long GROUP BY current_schema')
+ cur.execute('SELECT db,op,count FROM queries')
for row in cur:
dbs.append({k: tryint(v) for k, v in row.items()})
return dbs
@@ -97,26 +99,56 @@
def get_http_stats_for_log(logfile):
stats = {}
- for line in open(logfile).readlines():
- m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)',
- line)
- if m:
- method = m.group(1)
- path = m.group(2)
- status = m.group(4)
- size = int(m.group(5))
+ apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+ 'length', 'c', 'agent')
+ ignore_agents = ('curl', 'uwsgi', 'nova-status')
+ for line in csv.reader(open(logfile), delimiter=' '):
+ fields = dict(zip(apache_fields, line))
+ if len(fields) != len(apache_fields):
+ # Not a combined access log, so we can bail completely
+ return []
+ try:
+ method, url, http = fields['request'].split(' ')
+ except ValueError:
+ method = url = http = ''
+ if 'HTTP' not in http:
+ # Not a combined access log, so we can bail completely
+ return []
- try:
- service, rest = path.split('/', 1)
- except ValueError:
- # Root calls like "GET /identity"
- service = path
- rest = ''
+ # Tempest's User-Agent is unchanged, but client libraries and
+ # inter-service API calls use proper strings. So assume
+ # 'python-urllib' is tempest so we can tell it apart.
+ if 'python-urllib' in fields['agent'].lower():
+ agent = 'tempest'
+ else:
+ agent = fields['agent'].split(' ')[0]
+ if agent.startswith('python-'):
+ agent = agent.replace('python-', '')
+ if '/' in agent:
+ agent = agent.split('/')[0]
- stats.setdefault(service, {'largest': 0})
- stats[service].setdefault(method, 0)
- stats[service][method] += 1
- stats[service]['largest'] = max(stats[service]['largest'], size)
+ if agent in ignore_agents:
+ continue
+
+ try:
+ service, rest = url.strip('/').split('/', 1)
+ except ValueError:
+ # Root calls like "GET /identity"
+ service = url.strip('/')
+ rest = ''
+
+ method_key = '%s-%s' % (agent, method)
+ try:
+ length = int(fields['length'])
+ except ValueError:
+ LOG.warning('[%s] Failed to parse length %r from line %r' % (
+ logfile, fields['length'], line))
+ length = 0
+ stats.setdefault(service, {'largest': 0})
+ stats[service].setdefault(method_key, 0)
+ stats[service][method_key] += 1
+ stats[service]['largest'] = max(stats[service]['largest'],
+ length)
# Flatten this for ES
return [{'service': service, 'log': os.path.basename(logfile),
@@ -133,6 +165,7 @@
return {
'timestamp': datetime.datetime.now().isoformat(),
'hostname': socket.gethostname(),
+ 'version': 2,
}
@@ -154,6 +187,8 @@
'(default is %s)' % ','.join(process_defaults)))
args = parser.parse_args()
+ logging.basicConfig(level=logging.WARNING)
+
data = {
'services': get_services_stats(),
'db': pymysql and args.db_pass and get_db_stats(args.db_host,
diff --git a/unstack.sh b/unstack.sh
index 813f9a8..a36af3f 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -185,4 +185,4 @@
# Clean any safe.directory items we wrote into the global
# gitconfig. We can identify the relevant ones by checking that they
# point to somewhere in our $DEST directory.
-sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig