Merge "Add Ubuntu 22.04 LTS (jammy) platform job"
diff --git a/.zuul.yaml b/.zuul.yaml
index e87f410..b449ea6 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -408,6 +408,7 @@
'{{ stage_dir }}/rpm-qa.txt': logs
'{{ stage_dir }}/core': logs
'{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/services.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
/etc/ceph: logs
@@ -647,10 +648,7 @@
This job runs the devstack with scope checks enabled.
vars:
devstack_localrc:
- # Keep enabeling the services here to run with system scope
- CINDER_ENFORCE_SCOPE: true
- GLANCE_ENFORCE_SCOPE: true
- NEUTRON_ENFORCE_SCOPE: true
+ ENFORCE_SCOPE: true
- job:
name: devstack-multinode
diff --git a/functions-common b/functions-common
index 8651604..b660245 100644
--- a/functions-common
+++ b/functions-common
@@ -1166,7 +1166,7 @@
}
function is_ironic_enforce_scope {
- is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+ is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0
return 1
}
diff --git a/lib/cinder b/lib/cinder
index b029fa0..52818a8 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -380,7 +380,7 @@
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
- if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $CINDER_CONF oslo_policy enforce_scope true
iniset $CINDER_CONF oslo_policy enforce_new_defaults true
fi
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 6b3ea02..b292da2 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -151,12 +151,16 @@
fi
if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
- echo "enabling MySQL performance_schema items"
- # Enable long query history
- iniset -sudo $my_conf mysqld \
- performance-schema-consumer-events-statements-history-long TRUE
- iniset -sudo $my_conf mysqld \
- performance_schema_events_stages_history_long_size 1000000
+ echo "enabling MySQL performance counting"
+
+ # Install our sqlalchemy plugin
+ pip_install ${TOP_DIR}/tools/dbcounter
+
+ # Create our stats database for accounting
+ recreate_database stats
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+ "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+ count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
fi
restart_service $MYSQL_SERVICE_NAME
@@ -218,7 +222,17 @@
function database_connection_url_mysql {
local db=$1
- echo "$BASE_SQL_CONN/$db?charset=utf8"
+ local plugin
+
+ # NOTE(danms): We don't enable perf on subnodes yet because the
+ # plugin is not installed there
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ if is_service_enabled mysql; then
+ plugin="&plugin=dbcounter"
+ fi
+ fi
+
+ echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
}
diff --git a/lib/glance b/lib/glance
index b94c06d..ba98f41 100644
--- a/lib/glance
+++ b/lib/glance
@@ -432,7 +432,7 @@
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
- if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $GLANCE_API_CONF oslo_policy enforce_scope true
iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
diff --git a/lib/keystone b/lib/keystone
index a4c8a52..80a136f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -265,7 +265,7 @@
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
fi
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $KEYSTONE_CONF oslo_policy enforce_scope true
iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
diff --git a/lib/neutron b/lib/neutron
index e7719d4..f24ccfb 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -632,7 +632,7 @@
# configure_rbac_policies() - Configure Neutron to enforce new RBAC
# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then
iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
iniset $NEUTRON_CONF oslo_policy enforce_scope True
else
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index b906a1b..253b457 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -500,7 +500,7 @@
# configure_rbac_policies() - Configure Neutron to enforce new RBAC
# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
iniset $NEUTRON_CONF oslo_policy enforce_scope True
else
diff --git a/lib/tempest b/lib/tempest
index 4504663..206b37b 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -71,6 +71,17 @@
TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them. In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
# Neutron/Network variables
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -127,6 +138,48 @@
fi
}
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images. Takes 3 arguments, an array and two
+# variables. The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value of *both* other parameters.
+function get_active_images {
+ declare -n img_array=$1
+ declare -n img_id=$2
+ declare -n img_id_alt=$3
+
+ # start with a fresh array in case we are called multiple times
+ img_array=()
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ img_id="$IMAGE_UUID"
+ img_id_alt="$IMAGE_UUID"
+ fi
+ img_array+=($IMAGE_UUID)
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+ declare -n image_array=$1
+ declare -n image_id=$2
+ declare -n image_id_alt=$3
+ local -i poll_count
+
+ poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+ while (( poll_count-- > 0 )) ; do
+ sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+ get_active_images image_array image_id image_id_alt
+ if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+ return
+ fi
+ done
+ local msg
+ msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+ msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+ warn $LINENO "$msg"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -168,13 +221,21 @@
declare -a images
if is_service_enabled glance; then
- while read -r IMAGE_NAME IMAGE_UUID; do
- if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
- image_uuid="$IMAGE_UUID"
- image_uuid_alt="$IMAGE_UUID"
+ get_active_images images image_uuid image_uuid_alt
+
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ # Glance image import is asynchronous and may be configured
+ # to do image conversion. If image import is being used,
+ # it's possible that this code is being executed before the
+ # import has completed and there may be no active images yet.
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ poll_glance_images images image_uuid image_uuid_alt
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+ exit 1
+ fi
fi
- images+=($IMAGE_UUID)
- done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ fi
case "${#images[*]}" in
0)
@@ -607,14 +668,19 @@
# If services enable the enforce_scope for their policy
# we need to enable the same on Tempest side so that
# test can be run with scoped token.
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $TEMPEST_CONFIG enforce_scope keystone true
iniset $TEMPEST_CONFIG auth admin_system 'all'
iniset $TEMPEST_CONFIG auth admin_project_name ''
fi
- iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
- iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope glance true
+ fi
+
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope cinder true
+ fi
if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
# libvirt-lxc does not support boot from volume or attaching volumes
diff --git a/lib/tls b/lib/tls
index 5a7f5ae..b8758cd 100644
--- a/lib/tls
+++ b/lib/tls
@@ -557,7 +557,7 @@
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
</VirtualHost>
EOF
if is_suse ; then
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
- coredumps
- dns resolver
- listen53
+- services
- unbound.log
- deprecation messages
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # Services status
+ sudo systemctl status --all > services.txt 2>/dev/null
+
# NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
# failed to start due to denials from SELinux — useful for CentOS
# and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
diff --git a/stack.sh b/stack.sh
index 1970105..e53280e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1512,6 +1512,19 @@
time_totals
async_print_timing
+if is_service_enabled mysql; then
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+ echo ""
+ echo ""
+ echo "Post-stack database query stats:"
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'SELECT * FROM queries' -t 2>/dev/null
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'DELETE FROM queries' 2>/dev/null
+ fi
+fi
+
+
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
index c3254dc..0c76de0 100644
--- a/stackrc
+++ b/stackrc
@@ -179,6 +179,10 @@
# TODO(frickler): Drop this when plugins no longer need it
IDENTITY_API_VERSION=3
+# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
+# each services ${SERVICE}_ENFORCE_SCOPE variables
+ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
+
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
# an entry in the array will be installed into the named venv.
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..5057f0f
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,120 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.db_name = url.database
+ LOG.info('Registered counter for database %s' % self.db_name)
+ new_url = sqlalchemy.engine.URL.create(url.drivername,
+ url.username,
+ url.password,
+ url.host,
+ url.port,
+ 'stats')
+
+ self.engine = sqlalchemy.create_engine(new_url)
+ self.queue = queue.Queue()
+ self.thread = None
+
+ def engine_created(self, engine):
+ """Hook the engine creation process.
+
+ This is the plug point for the sqlalchemy plugin. Using
+ plugin=$this in the URL causes this method to be called when
+ the engine is created, giving us a chance to hook it below.
+ """
+ event.listen(engine, "before_cursor_execute", self._log_event)
+
+ def ensure_writer_thread(self):
+ self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+ self.thread.start()
+
+ def _log_event(self, conn, cursor, statement, parameters, context,
+ executemany):
+ """Queue a "hit" for this operation to be recorded.
+
+ Attepts to determine the operation by the first word of the
+ statement, or 'OTHER' if it cannot be determined.
+ """
+
+ # Start our thread if not running. If we were forked after the
+ # engine was created and this plugin was associated, our
+ # writer thread is gone, so respawn.
+ if not self.thread or not self.thread.is_alive():
+ self.ensure_writer_thread()
+
+ try:
+ op = statement.strip().split(' ', 1)[0] or 'OTHER'
+ except Exception:
+ op = 'OTHER'
+
+ self.queue.put((self.db_name, op))
+
+ def do_incr(self, db, op, count):
+ """Increment the counter for (db,op) by count."""
+
+ query = ('INSERT INTO queries (db, op, count) '
+ ' VALUES (%s, %s, %s) '
+ ' ON DUPLICATE KEY UPDATE count=count+%s')
+ try:
+ with self.engine.begin() as conn:
+ r = conn.execute(query, (db, op, count, count))
+ except Exception as e:
+ LOG.error('Failed to account for access to database %r: %s',
+ db, e)
+
+ def stat_writer(self):
+ """Consume messages from the queue and write them in batches.
+
+ This reads "hists" from from a queue fed by _log_event() and
+ writes (db,op)+=count stats to the database after ten seconds
+ of no activity to avoid triggering a write for every SELECT
+ call. Write no less often than every thirty seconds and/or 100
+ pending hits to avoid being starved by constant activity.
+ """
+ LOG.debug('[%i] Writer thread running' % os.getpid())
+ while True:
+ to_write = {}
+ total = 0
+ last = time.time()
+ while time.time() - last < 30 and total < 100:
+ try:
+ item = self.queue.get(timeout=10)
+ to_write.setdefault(item, 0)
+ to_write[item] += 1
+ total += 1
+ except queue.Empty:
+ break
+
+ if to_write:
+ LOG.debug('[%i] Writing DB stats %s' % (
+ os.getpid(),
+ ','.join(['%s:%s=%i' % (db, op, count)
+ for (db, op), count in to_write.items()])))
+
+ for (db, op), count in to_write.items():
+ self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..f9f26f2
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+modules = dbcounter
+entry_points =
+ [sqlalchemy.plugins]
+ dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/get-stats.py b/tools/get-stats.py
index 670e723..ffe4676 100755
--- a/tools/get-stats.py
+++ b/tools/get-stats.py
@@ -1,10 +1,12 @@
#!/usr/bin/python3
import argparse
+import csv
import datetime
import glob
import itertools
import json
+import logging
import os
import re
import socket
@@ -25,6 +27,8 @@
print('No pymysql, database information will not be included',
file=sys.stderr)
+LOG = logging.getLogger('perf')
+
# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
@@ -83,13 +87,11 @@
def get_db_stats(host, user, passwd):
dbs = []
db = pymysql.connect(host=host, user=user, password=passwd,
- database='performance_schema',
+ database='stats',
cursorclass=pymysql.cursors.DictCursor)
with db:
with db.cursor() as cur:
- cur.execute(
- 'SELECT COUNT(*) AS queries,current_schema AS db FROM '
- 'events_statements_history_long GROUP BY current_schema')
+ cur.execute('SELECT db,op,count FROM queries')
for row in cur:
dbs.append({k: tryint(v) for k, v in row.items()})
return dbs
@@ -97,26 +99,56 @@
def get_http_stats_for_log(logfile):
stats = {}
- for line in open(logfile).readlines():
- m = re.search('"([A-Z]+) /([^" ]+)( HTTP/1.1)?" ([0-9]{3}) ([0-9]+)',
- line)
- if m:
- method = m.group(1)
- path = m.group(2)
- status = m.group(4)
- size = int(m.group(5))
+ apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+ 'length', 'c', 'agent')
+ ignore_agents = ('curl', 'uwsgi', 'nova-status')
+ for line in csv.reader(open(logfile), delimiter=' '):
+ fields = dict(zip(apache_fields, line))
+ if len(fields) != len(apache_fields):
+ # Not a combined access log, so we can bail completely
+ return []
+ try:
+ method, url, http = fields['request'].split(' ')
+ except ValueError:
+ method = url = http = ''
+ if 'HTTP' not in http:
+ # Not a combined access log, so we can bail completely
+ return []
- try:
- service, rest = path.split('/', 1)
- except ValueError:
- # Root calls like "GET /identity"
- service = path
- rest = ''
+ # Tempest's User-Agent is unchanged, but client libraries and
+ # inter-service API calls use proper strings. So assume
+ # 'python-urllib' is tempest so we can tell it apart.
+ if 'python-urllib' in fields['agent'].lower():
+ agent = 'tempest'
+ else:
+ agent = fields['agent'].split(' ')[0]
+ if agent.startswith('python-'):
+ agent = agent.replace('python-', '')
+ if '/' in agent:
+ agent = agent.split('/')[0]
- stats.setdefault(service, {'largest': 0})
- stats[service].setdefault(method, 0)
- stats[service][method] += 1
- stats[service]['largest'] = max(stats[service]['largest'], size)
+ if agent in ignore_agents:
+ continue
+
+ try:
+ service, rest = url.strip('/').split('/', 1)
+ except ValueError:
+ # Root calls like "GET /identity"
+ service = url.strip('/')
+ rest = ''
+
+ method_key = '%s-%s' % (agent, method)
+ try:
+ length = int(fields['length'])
+ except ValueError:
+ LOG.warning('[%s] Failed to parse length %r from line %r' % (
+ logfile, fields['length'], line))
+ length = 0
+ stats.setdefault(service, {'largest': 0})
+ stats[service].setdefault(method_key, 0)
+ stats[service][method_key] += 1
+ stats[service]['largest'] = max(stats[service]['largest'],
+ length)
# Flatten this for ES
return [{'service': service, 'log': os.path.basename(logfile),
@@ -133,6 +165,7 @@
return {
'timestamp': datetime.datetime.now().isoformat(),
'hostname': socket.gethostname(),
+ 'version': 2,
}
@@ -154,6 +187,8 @@
'(default is %s)' % ','.join(process_defaults)))
args = parser.parse_args()
+ logging.basicConfig(level=logging.WARNING)
+
data = {
'services': get_services_stats(),
'db': pymysql and args.db_pass and get_db_stats(args.db_host,
diff --git a/unstack.sh b/unstack.sh
index 813f9a8..a36af3f 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -185,4 +185,4 @@
# Clean any safe.directory items we wrote into the global
# gitconfig. We can identify the relevant ones by checking that they
# point to somewhere in our $DEST directory.
-sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig