Merge "Fix doc and user create script to set homedir permissions"
diff --git a/.zuul.yaml b/.zuul.yaml
index 1c517f1..001ac84 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -17,6 +17,16 @@
- controller
- nodeset:
+ name: openstack-single-node-jammy
+ nodes:
+ - name: controller
+ label: ubuntu-jammy
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-focal
nodes:
- name: controller
@@ -57,16 +67,6 @@
- controller
- nodeset:
- name: devstack-single-node-centos-8-stream
- nodes:
- - name: controller
- label: centos-8-stream
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
name: devstack-single-node-centos-9-stream
nodes:
- name: controller
@@ -107,16 +107,6 @@
- controller
- nodeset:
- name: devstack-single-node-openeuler-20.03-sp2
- nodes:
- - name: controller
- label: openEuler-20-03-LTS-SP2
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
name: openstack-two-node
nodes:
- name: controller
@@ -147,36 +137,6 @@
- compute1
- nodeset:
- name: openstack-two-node-centos-8-stream
- nodes:
- - name: controller
- label: centos-8-stream
- - name: compute1
- label: centos-8-stream
- groups:
- # Node where tests are executed and test results collected
- - name: tempest
- nodes:
- - controller
- # Nodes running the compute service
- - name: compute
- nodes:
- - controller
- - compute1
- # Nodes that are not the controller
- - name: subnode
- nodes:
- - compute1
- # Switch node for multinode networking setup
- - name: switch
- nodes:
- - controller
- # Peer nodes for multinode networking setup
- - name: peers
- nodes:
- - compute1
-
-- nodeset:
name: openstack-two-node-centos-9-stream
nodes:
- name: controller
@@ -419,6 +379,7 @@
'{{ devstack_log_dir }}/worlddump-latest.txt': logs
'{{ devstack_full_log}}': logs
'{{ stage_dir }}/verify_tempest_conf.log': logs
+ '{{ stage_dir }}/performance.json': logs
'{{ stage_dir }}/apache': logs
'{{ stage_dir }}/apache_config': logs
'{{ stage_dir }}/etc': logs
@@ -437,6 +398,7 @@
'{{ stage_dir }}/rpm-qa.txt': logs
'{{ stage_dir }}/core': logs
'{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/services.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
/etc/ceph: logs
@@ -676,10 +638,7 @@
This job runs the devstack with scope checks enabled.
vars:
devstack_localrc:
- # Keep enabeling the services here to run with system scope
- CINDER_ENFORCE_SCOPE: true
- GLANCE_ENFORCE_SCOPE: true
- NEUTRON_ENFORCE_SCOPE: true
+ ENFORCE_SCOPE: true
- job:
name: devstack-multinode
@@ -694,16 +653,6 @@
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
- name: devstack-platform-centos-8-stream
- parent: tempest-full-py3
- description: CentOS 8 Stream platform test
- nodeset: devstack-single-node-centos-8-stream
- voting: false
- timeout: 9000
- vars:
- configure_swap_size: 4096
-
-- job:
name: devstack-platform-centos-9-stream
parent: tempest-full-py3
description: CentOS 9 Stream platform test
@@ -754,18 +703,69 @@
q-agt: true
- job:
- name: devstack-platform-openEuler-20.03-SP2
+ name: devstack-platform-ubuntu-jammy
parent: tempest-full-py3
- description: openEuler 20.03 SP2 platform test
- nodeset: devstack-single-node-openeuler-20.03-sp2
- voting: false
+ description: Ubuntu 22.04 LTS (jammy) platform test
+ nodeset: openstack-single-node-jammy
timeout: 9000
vars:
configure_swap_size: 4096
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovn-source
+ parent: devstack-platform-ubuntu-jammy
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
+ voting: false
+ vars:
devstack_localrc:
- # NOTE(wxy): OVN package is not supported by openEuler yet. Build it
- # from source instead.
OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovs
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+ nodeset: openstack-single-node-jammy
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 8192
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
- job:
name: devstack-no-tls-proxy
@@ -876,9 +876,11 @@
- devstack-ipv6
- devstack-enforce-scope
- devstack-platform-fedora-latest
- - devstack-platform-centos-8-stream
- devstack-platform-centos-9-stream
- devstack-platform-debian-bullseye
+ - devstack-platform-ubuntu-jammy
+ - devstack-platform-ubuntu-jammy-ovn-source
+ - devstack-platform-ubuntu-jammy-ovs
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
@@ -923,6 +925,7 @@
- devstack
- devstack-ipv6
- devstack-platform-centos-9-stream
+ - devstack-platform-ubuntu-jammy
- devstack-enforce-scope
- devstack-multinode
- devstack-unit-tests
@@ -977,7 +980,6 @@
experimental:
jobs:
- - devstack-platform-openEuler-20.03-SP2
- nova-multi-cell
- nova-next
- neutron-fullstack-with-uwsgi
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index dd8f21f..40a8725 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -279,7 +279,7 @@
::
- LOGDAYS=1
+ LOGDAYS=2
Some coloring is used during the DevStack runs to make it easier to
see what is going on. This can be disabled with::
diff --git a/doc/source/index.rst b/doc/source/index.rst
index a79a7e6..0434d68 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,7 +38,7 @@
Start with a clean and minimal install of a Linux system. DevStack
attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler.
+latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
most tested, and will probably go the smoothest.
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 93b5746..33a55f8 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
ceph # NOPRIME
-redhat-lsb-core # not:rhel9,openEuler-20.03
+redhat-lsb-core # not:rhel9
xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 163a7c8..7697513 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -16,7 +16,7 @@
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
-make # dist:openEuler-20.03
+mod_ssl # required for tls-proxy on centos 9 stream computes
net-tools
openssh-server
openssl
@@ -28,8 +28,7 @@
python3-devel
python3-pip
python3-systemd
-redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # dist:openEuler-20.03
+redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
tar
tcpdump
unzip
diff --git a/files/rpms/nova b/files/rpms/nova
index 9e8621c..9522e57 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -6,7 +6,7 @@
genisoimage # not:rhel9 required for config_drive
iptables
iputils
-kernel-modules # not:openEuler-20.03
+kernel-modules
kpartx
parted
polkit
diff --git a/files/rpms/swift b/files/rpms/swift
index a838d78..7d906aa 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,5 +1,5 @@
curl
-liberasurecode-devel # not:openEuler-20.03
+liberasurecode-devel
memcached
rsync-daemon
sqlite
diff --git a/functions-common b/functions-common
index 8651604..be966e9 100644
--- a/functions-common
+++ b/functions-common
@@ -399,7 +399,7 @@
elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
sudo zypper -n install lsb-release
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
- sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb
+ sudo dnf install -y redhat-lsb-core
else
die $LINENO "Unable to find or auto-install lsb_release"
fi
@@ -471,10 +471,6 @@
# Drop the . release as we assume it's compatible
# XXX re-evaluate when we get RHEL10
DISTRO="rhel${os_RELEASE::1}"
- elif [[ "$os_VENDOR" =~ (openEuler) ]]; then
- # The DISTRO here is `openEuler-20.03`. While, actually only openEuler
- # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs.
- DISTRO="openEuler-$os_RELEASE"
else
# We can't make a good choice here. Setting a sensible DISTRO
# is part of the problem, but not the major issue -- we really
@@ -526,7 +522,6 @@
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
- [ "$os_VENDOR" = "openEuler" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "RedHatEnterprise" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
@@ -576,12 +571,6 @@
[ "$os_PACKAGE" = "deb" ]
}
-function is_openeuler {
- if [[ -z "$os_PACKAGE" ]]; then
- GetOSVersion
- fi
- [ "$os_VENDOR" = "openEuler" ]
-}
# Git Functions
# =============
@@ -1166,7 +1155,7 @@
}
function is_ironic_enforce_scope {
- is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+ is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0
return 1
}
diff --git a/inc/python b/inc/python
index 9382d35..3eb3efe 100644
--- a/inc/python
+++ b/inc/python
@@ -186,15 +186,11 @@
$xtrace
- # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
- # the same behaviour of setuptools before version 25.0.0.
- # related issue: https://github.com/pypa/pip/issues/3874
$sudo_pip \
http_proxy="${http_proxy:-}" \
https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
- SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
$cmd_pip $upgrade \
$@
result=$?
diff --git a/lib/apache b/lib/apache
index 02827d1..94f3cfc 100644
--- a/lib/apache
+++ b/lib/apache
@@ -95,7 +95,7 @@
# didn't fix Python 3.10 compatibility before release. Should be
# fixed in uwsgi 4.9.0; can remove this when packages available
# or we drop this release
- elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then
+ elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then
# Note httpd comes with mod_proxy_uwsgi and it is loaded by
# default; the mod_proxy_uwsgi package actually conflicts now.
# See:
diff --git a/lib/cinder b/lib/cinder
index b029fa0..52818a8 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -380,7 +380,7 @@
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
- if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $CINDER_CONF oslo_policy enforce_scope true
iniset $CINDER_CONF oslo_policy enforce_new_defaults true
fi
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0f45273..b292da2 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -150,6 +150,19 @@
iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
fi
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ echo "enabling MySQL performance counting"
+
+ # Install our sqlalchemy plugin
+ pip_install ${TOP_DIR}/tools/dbcounter
+
+ # Create our stats database for accounting
+ recreate_database stats
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+ "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+ count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
+ fi
+
restart_service $MYSQL_SERVICE_NAME
}
@@ -209,7 +222,17 @@
function database_connection_url_mysql {
local db=$1
- echo "$BASE_SQL_CONN/$db?charset=utf8"
+ local plugin
+
+ # NOTE(danms): We don't enable perf on subnodes yet because the
+ # plugin is not installed there
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ if is_service_enabled mysql; then
+ plugin="&plugin=dbcounter"
+ fi
+ fi
+
+ echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
}
diff --git a/lib/glance b/lib/glance
index b94c06d..ba98f41 100644
--- a/lib/glance
+++ b/lib/glance
@@ -432,7 +432,7 @@
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
- if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $GLANCE_API_CONF oslo_policy enforce_scope true
iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
diff --git a/lib/keystone b/lib/keystone
index a4c8a52..80a136f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -265,7 +265,7 @@
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
fi
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $KEYSTONE_CONF oslo_policy enforce_scope true
iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
diff --git a/lib/neutron b/lib/neutron
index e7719d4..f24ccfb 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -632,7 +632,7 @@
# configure_rbac_policies() - Configure Neutron to enforce new RBAC
# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "ENFORCE_SCOPE" == "True" ]]; then
iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
iniset $NEUTRON_CONF oslo_policy enforce_scope True
else
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index b906a1b..88ac991 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -500,7 +500,7 @@
# configure_rbac_policies() - Configure Neutron to enforce new RBAC
# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
iniset $NEUTRON_CONF oslo_policy enforce_scope True
else
@@ -931,6 +931,9 @@
configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
+ # Configuration for placement client
+ configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
+
# Configure plugin
neutron_plugin_configure_service
}
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 927896b..9022f2d 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -169,6 +169,17 @@
# Utility Functions
# -----------------
+function wait_for_db_file {
+ local count=0
+ while [ ! -f $1 ]; do
+ sleep 1
+ count=$((count+1))
+ if [ "$count" -gt 5 ]; then
+ die $LINENO "DB File $1 not found"
+ fi
+ done
+}
+
function wait_for_sock_file {
local count=0
while [ ! -S $1 ]; do
@@ -695,8 +706,11 @@
fi
# Wait for the service to be ready
+ # Check for socket and db files for both OVN NB and SB
wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+ wait_for_db_file $OVN_DATADIR/ovnnb_db.db
+ wait_for_db_file $OVN_DATADIR/ovnsb_db.db
if is_service_enabled tls-proxy; then
sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index c0d74c7..fbd4692 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -403,7 +403,10 @@
ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
- # Configure interface for public bridge
+ # Configure interface for public bridge by setting the interface
+ # to "up" in case the job is running entirely private network based
+ # testing.
+ sudo ip link set $ext_gw_interface up
sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
# Any IPv6 private subnet that uses the default IPV6 subnet pool
# and that is plugged into the default router (Q_ROUTER_NAME) will
diff --git a/lib/nova b/lib/nova
index 4c14374..da3a10e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -324,11 +324,7 @@
# set chap algorithms. The default chap_algorithm is md5 which will
# not work under FIPS.
- # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in
- # and determine the correct solution for openeuler here
- if ! is_openeuler; then
- iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
- fi
+ iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
# ensure that iscsid is started, even when disabled by default
restart_service iscsid
diff --git a/lib/tempest b/lib/tempest
index 4504663..206b37b 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -71,6 +71,17 @@
TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them. In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
# Neutron/Network variables
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -127,6 +138,48 @@
fi
}
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images. Takes 3 arguments, an array and two
+# variables. The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value of *both* other parameters.
+function get_active_images {
+ declare -n img_array=$1
+ declare -n img_id=$2
+ declare -n img_id_alt=$3
+
+ # start with a fresh array in case we are called multiple times
+ img_array=()
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ img_id="$IMAGE_UUID"
+ img_id_alt="$IMAGE_UUID"
+ fi
+ img_array+=($IMAGE_UUID)
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+ declare -n image_array=$1
+ declare -n image_id=$2
+ declare -n image_id_alt=$3
+ local -i poll_count
+
+ poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+ while (( poll_count-- > 0 )) ; do
+ sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+ get_active_images image_array image_id image_id_alt
+ if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+ return
+ fi
+ done
+ local msg
+ msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+ msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+ warn $LINENO "$msg"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -168,13 +221,21 @@
declare -a images
if is_service_enabled glance; then
- while read -r IMAGE_NAME IMAGE_UUID; do
- if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
- image_uuid="$IMAGE_UUID"
- image_uuid_alt="$IMAGE_UUID"
+ get_active_images images image_uuid image_uuid_alt
+
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ # Glance image import is asynchronous and may be configured
+ # to do image conversion. If image import is being used,
+ # it's possible that this code is being executed before the
+ # import has completed and there may be no active images yet.
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ poll_glance_images images image_uuid image_uuid_alt
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+ exit 1
+ fi
fi
- images+=($IMAGE_UUID)
- done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ fi
case "${#images[*]}" in
0)
@@ -607,14 +668,19 @@
# If services enable the enforce_scope for their policy
# we need to enable the same on Tempest side so that
# test can be run with scoped token.
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $TEMPEST_CONFIG enforce_scope keystone true
iniset $TEMPEST_CONFIG auth admin_system 'all'
iniset $TEMPEST_CONFIG auth admin_project_name ''
fi
- iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
- iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope glance true
+ fi
+
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope cinder true
+ fi
if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
# libvirt-lxc does not support boot from volume or attaching volumes
diff --git a/lib/tls b/lib/tls
index 5a7f5ae..b8758cd 100644
--- a/lib/tls
+++ b/lib/tls
@@ -557,7 +557,7 @@
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
</VirtualHost>
EOF
if is_suse ; then
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 9e66f20..d8d5f68 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -20,6 +20,9 @@
roles:
- export-devstack-journal
- apache-logs-conf
+ # This should run as early as possible to make sure we don't skew
+ # the post-tempest results with other activities.
+ - capture-performance-data
- devstack-project-conf
# capture-system-logs should be the last role before stage-output
- capture-system-logs
diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml
index 6b7ea37..bd64574 100644
--- a/roles/apache-logs-conf/tasks/main.yaml
+++ b/roles/apache-logs-conf/tasks/main.yaml
@@ -64,7 +64,6 @@
'Debian': '/etc/apache2/sites-enabled/'
'Suse': '/etc/apache2/conf.d/'
'RedHat': '/etc/httpd/conf.d/'
- 'openEuler': '/etc/httpd/conf.d/'
- name: Discover configurations
find:
diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst
new file mode 100644
index 0000000..b7a37c2
--- /dev/null
+++ b/roles/capture-performance-data/README.rst
@@ -0,0 +1,25 @@
+Generate performance logs for staging
+
+Captures usage information from mysql, systemd, apache logs, and other
+parts of the system and generates a performance.json file in the
+staging directory.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory
+
+.. zuul:rolevar:: devstack_conf_dir
+ :default: /opt/stack
+
+ The base devstack destination directory
+
+.. zuul:rolevar:: debian_suse_apache_deref_logs
+
+ The apache logs found in the debian/suse locations
+
+.. zuul:rolevar:: redhat_apache_deref_logs
+
+ The apache logs found in the redhat locations
diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/roles/capture-performance-data/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_conf_dir: "{{ devstack_base_dir }}"
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
new file mode 100644
index 0000000..f9bb0f7
--- /dev/null
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -0,0 +1,16 @@
+- name: Generate statistics
+ shell:
+ executable: /bin/bash
+ cmd: |
+ source {{ devstack_conf_dir }}/stackrc
+ python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+ --db-user="$DATABASE_USER" \
+ --db-pass="$DATABASE_PASSWORD" \
+ --db-host="$DATABASE_HOST" \
+ {{ apache_logs }} > {{ stage_dir }}/performance.json
+ vars:
+ apache_logs: >-
+ {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %}
+ --apache-log="{{ i.stat.path }}"
+ {% endfor %}
+ ignore_errors: yes
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
- coredumps
- dns resolver
- listen53
+- services
- unbound.log
- deprecation messages
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # Services status
+ sudo systemctl status --all > services.txt 2>/dev/null
+
# NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
# failed to start due to denials from SELinux — useful for CentOS
# and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
diff --git a/samples/local.conf b/samples/local.conf
index 8b76137..55b7298 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -49,7 +49,7 @@
# path of the destination log file. A timestamp will be appended to the given name.
LOGFILE=$DEST/logs/stack.sh.log
-# Old log files are automatically removed after 7 days to keep things neat. Change
+# Old log files are automatically removed after 2 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
diff --git a/stack.sh b/stack.sh
index 6e9ced9..df283bb 100755
--- a/stack.sh
+++ b/stack.sh
@@ -229,7 +229,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
+SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -280,13 +280,6 @@
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
-# TODO(wxy): Currently some base packages are not installed by default in
-# openEuler. Remove the code below once the packaged are installed by default
-# in the future.
-if [[ $DISTRO == "openEuler-20.03" ]]; then
- install_package hostname
-fi
-
# Configure Distro Repositories
# -----------------------------
@@ -1512,6 +1505,19 @@
time_totals
async_print_timing
+if is_service_enabled mysql; then
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+ echo ""
+ echo ""
+ echo "Post-stack database query stats:"
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'SELECT * FROM queries' -t 2>/dev/null
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'DELETE FROM queries' 2>/dev/null
+ fi
+fi
+
+
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
index d22fa88..0c76de0 100644
--- a/stackrc
+++ b/stackrc
@@ -179,6 +179,10 @@
# TODO(frickler): Drop this when plugins no longer need it
IDENTITY_API_VERSION=3
+# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
+# each services ${SERVICE}_ENFORCE_SCOPE variables
+ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
+
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
# an entry in the array will be installed into the named venv.
@@ -193,6 +197,10 @@
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
+# This can be used to turn on various non-default items in the
+# performance_schema that are of interest to us
+MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
+
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
# in the format for timeout(1);
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..5057f0f
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,120 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.db_name = url.database
+ LOG.info('Registered counter for database %s' % self.db_name)
+ new_url = sqlalchemy.engine.URL.create(url.drivername,
+ url.username,
+ url.password,
+ url.host,
+ url.port,
+ 'stats')
+
+ self.engine = sqlalchemy.create_engine(new_url)
+ self.queue = queue.Queue()
+ self.thread = None
+
+ def engine_created(self, engine):
+ """Hook the engine creation process.
+
+ This is the plug point for the sqlalchemy plugin. Using
+ plugin=$this in the URL causes this method to be called when
+ the engine is created, giving us a chance to hook it below.
+ """
+ event.listen(engine, "before_cursor_execute", self._log_event)
+
+ def ensure_writer_thread(self):
+ self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+ self.thread.start()
+
+ def _log_event(self, conn, cursor, statement, parameters, context,
+ executemany):
+ """Queue a "hit" for this operation to be recorded.
+
+ Attepts to determine the operation by the first word of the
+ statement, or 'OTHER' if it cannot be determined.
+ """
+
+ # Start our thread if not running. If we were forked after the
+ # engine was created and this plugin was associated, our
+ # writer thread is gone, so respawn.
+ if not self.thread or not self.thread.is_alive():
+ self.ensure_writer_thread()
+
+ try:
+ op = statement.strip().split(' ', 1)[0] or 'OTHER'
+ except Exception:
+ op = 'OTHER'
+
+ self.queue.put((self.db_name, op))
+
+ def do_incr(self, db, op, count):
+ """Increment the counter for (db,op) by count."""
+
+ query = ('INSERT INTO queries (db, op, count) '
+ ' VALUES (%s, %s, %s) '
+ ' ON DUPLICATE KEY UPDATE count=count+%s')
+ try:
+ with self.engine.begin() as conn:
+ r = conn.execute(query, (db, op, count, count))
+ except Exception as e:
+ LOG.error('Failed to account for access to database %r: %s',
+ db, e)
+
+ def stat_writer(self):
+ """Consume messages from the queue and write them in batches.
+
+ This reads "hists" from from a queue fed by _log_event() and
+ writes (db,op)+=count stats to the database after ten seconds
+ of no activity to avoid triggering a write for every SELECT
+ call. Write no less often than every thirty seconds and/or 100
+ pending hits to avoid being starved by constant activity.
+ """
+ LOG.debug('[%i] Writer thread running' % os.getpid())
+ while True:
+ to_write = {}
+ total = 0
+ last = time.time()
+ while time.time() - last < 30 and total < 100:
+ try:
+ item = self.queue.get(timeout=10)
+ to_write.setdefault(item, 0)
+ to_write[item] += 1
+ total += 1
+ except queue.Empty:
+ break
+
+ if to_write:
+ LOG.debug('[%i] Writing DB stats %s' % (
+ os.getpid(),
+ ','.join(['%s:%s=%i' % (db, op, count)
+ for (db, op), count in to_write.items()])))
+
+ for (db, op), count in to_write.items():
+ self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..12300bf
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+py_modules = dbcounter
+entry_points =
+ [sqlalchemy.plugins]
+ dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index f24ac40..daa1bc6 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -153,32 +153,8 @@
sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
}
-function fixup_openeuler {
- if ! is_openeuler; then
- return
- fi
-
- if is_arch "x86_64"; then
- arch="x86_64"
- elif is_arch "aarch64"; then
- arch="aarch64"
- fi
-
- # Some packages' version in openEuler are too old, use the newer ones we
- # provide in oepkg. (oepkg is an openEuler third part yum repo which is
- # endorsed by openEuler community)
- (echo '[openstack-ci]'
- echo 'name=openstack'
- echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/'
- echo 'enabled=1'
- echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null
-
- yum_install liberasurecode-devel
-}
-
function fixup_all {
fixup_ubuntu
fixup_fedora
fixup_suse
- fixup_openeuler
}
diff --git a/tools/get-stats.py b/tools/get-stats.py
new file mode 100755
index 0000000..e0c20f2
--- /dev/null
+++ b/tools/get-stats.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python3
+
+import argparse
+import csv
+import datetime
+import glob
+import itertools
+import json
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+
+try:
+ import psutil
+except ImportError:
+ psutil = None
+ print('No psutil, process information will not be included',
+ file=sys.stderr)
+
+try:
+ import pymysql
+except ImportError:
+ pymysql = None
+ print('No pymysql, database information will not be included',
+ file=sys.stderr)
+
+LOG = logging.getLogger('perf')
+
+# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
+
+
+def tryint(value):
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+
+
+def get_service_stats(service):
+ stats = {'MemoryCurrent': 0}
+ output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] +
+ ['-p%s' % stat for stat in stats])
+ for line in output.decode().split('\n'):
+ if not line:
+ continue
+ stat, val = line.split('=')
+ stats[stat] = tryint(val)
+
+ return stats
+
+
+def get_services_stats():
+ services = [os.path.basename(s) for s in
+ glob.glob('/etc/systemd/system/devstack@*.service')]
+ return [dict(service=service, **get_service_stats(service))
+ for service in services]
+
+
+def get_process_stats(proc):
+ cmdline = proc.cmdline()
+ if 'python' in cmdline[0]:
+ cmdline = cmdline[1:]
+ return {'cmd': cmdline[0],
+ 'pid': proc.pid,
+ 'args': ' '.join(cmdline[1:]),
+ 'rss': proc.memory_info().rss}
+
+
+def get_processes_stats(matches):
+ me = os.getpid()
+ procs = psutil.process_iter()
+
+ def proc_matches(proc):
+ return me != proc.pid and any(
+ re.search(match, ' '.join(proc.cmdline()))
+ for match in matches)
+
+ return [
+ get_process_stats(proc)
+ for proc in procs
+ if proc_matches(proc)]
+
+
+def get_db_stats(host, user, passwd):
+ dbs = []
+ try:
+ db = pymysql.connect(host=host, user=user, password=passwd,
+ database='stats',
+ cursorclass=pymysql.cursors.DictCursor)
+ except pymysql.err.OperationalError as e:
+ if 'Unknown database' in str(e):
+ print('No stats database; assuming devstack failed',
+ file=sys.stderr)
+ return []
+ raise
+
+ with db:
+ with db.cursor() as cur:
+ cur.execute('SELECT db,op,count FROM queries')
+ for row in cur:
+ dbs.append({k: tryint(v) for k, v in row.items()})
+ return dbs
+
+
+def get_http_stats_for_log(logfile):
+ stats = {}
+ apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+ 'length', 'c', 'agent')
+ ignore_agents = ('curl', 'uwsgi', 'nova-status')
+ for line in csv.reader(open(logfile), delimiter=' '):
+ fields = dict(zip(apache_fields, line))
+ if len(fields) != len(apache_fields):
+ # Not a combined access log, so we can bail completely
+ return []
+ try:
+ method, url, http = fields['request'].split(' ')
+ except ValueError:
+ method = url = http = ''
+ if 'HTTP' not in http:
+ # Not a combined access log, so we can bail completely
+ return []
+
+ # Tempest's User-Agent is unchanged, but client libraries and
+ # inter-service API calls use proper strings. So assume
+ # 'python-urllib' is tempest so we can tell it apart.
+ if 'python-urllib' in fields['agent'].lower():
+ agent = 'tempest'
+ else:
+ agent = fields['agent'].split(' ')[0]
+ if agent.startswith('python-'):
+ agent = agent.replace('python-', '')
+ if '/' in agent:
+ agent = agent.split('/')[0]
+
+ if agent in ignore_agents:
+ continue
+
+ try:
+ service, rest = url.strip('/').split('/', 1)
+ except ValueError:
+ # Root calls like "GET /identity"
+ service = url.strip('/')
+ rest = ''
+
+ method_key = '%s-%s' % (agent, method)
+ try:
+ length = int(fields['length'])
+ except ValueError:
+ LOG.warning('[%s] Failed to parse length %r from line %r' % (
+ logfile, fields['length'], line))
+ length = 0
+ stats.setdefault(service, {'largest': 0})
+ stats[service].setdefault(method_key, 0)
+ stats[service][method_key] += 1
+ stats[service]['largest'] = max(stats[service]['largest'],
+ length)
+
+ # Flatten this for ES
+ return [{'service': service, 'log': os.path.basename(logfile),
+ **vals}
+ for service, vals in stats.items()]
+
+
+def get_http_stats(logfiles):
+ return list(itertools.chain.from_iterable(get_http_stats_for_log(log)
+ for log in logfiles))
+
+
+def get_report_info():
+ return {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ 'hostname': socket.gethostname(),
+ 'version': 2,
+ }
+
+
+if __name__ == '__main__':
+ process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd']
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--db-user', default='root',
+ help=('MySQL user for collecting stats '
+ '(default: "root")'))
+ parser.add_argument('--db-pass', default=None,
+ help='MySQL password for db-user')
+ parser.add_argument('--db-host', default='localhost',
+ help='MySQL hostname')
+ parser.add_argument('--apache-log', action='append', default=[],
+ help='Collect API call stats from this apache log')
+ parser.add_argument('--process', action='append',
+ default=process_defaults,
+ help=('Include process stats for this cmdline regex '
+ '(default is %s)' % ','.join(process_defaults)))
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.WARNING)
+
+ data = {
+ 'services': get_services_stats(),
+ 'db': pymysql and args.db_pass and get_db_stats(args.db_host,
+ args.db_user,
+ args.db_pass) or [],
+ 'processes': psutil and get_processes_stats(args.process) or [],
+ 'api': get_http_stats(args.apache_log),
+ 'report': get_report_info(),
+ }
+
+ print(json.dumps(data, indent=2))
diff --git a/unstack.sh b/unstack.sh
index 813f9a8..a36af3f 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -185,4 +185,4 @@
# Clean any safe.directory items we wrote into the global
# gitconfig. We can identify the relevant ones by checking that they
# point to somewhere in our $DEST directory.
-sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig