Merge "Create /opt/stack and make it home directory"
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 7793d8e..f03304f 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -130,8 +130,8 @@
 DevStack master tracks the upstream master of all the projects. If you
 would like to run a stable branch of OpenStack, you should use the
 corresponding stable branch of DevStack as well. For instance the
-``stable/kilo`` version of DevStack will already default to all the
-projects running at ``stable/kilo`` levels.
+``stable/ocata`` version of DevStack will already default to all the
+projects running at ``stable/ocata`` levels.
 
 Note: it's also possible to manually adjust the ``*_BRANCH`` variables
 further if you would like to test specific milestones, or even custom
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 84dc273..1284360 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -7,7 +7,7 @@
 </Directory>
 
 <VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup keystone-public
     WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public
     WSGIApplicationGroup %{GLOBAL}
@@ -21,7 +21,7 @@
 </VirtualHost>
 
 <VirtualHost *:%ADMINPORT%>
-    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup keystone-admin
     WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
     WSGIApplicationGroup %{GLOBAL}
diff --git a/files/debs/q-agt b/files/debs/neutron-agent
similarity index 100%
rename from files/debs/q-agt
rename to files/debs/neutron-agent
diff --git a/files/debs/q-l3 b/files/debs/neutron-l3
similarity index 100%
rename from files/debs/q-l3
rename to files/debs/neutron-l3
diff --git a/files/debs/q-agt b/files/debs/q-agt
new file mode 120000
index 0000000..99fe353
--- /dev/null
+++ b/files/debs/q-agt
@@ -0,0 +1 @@
+neutron-agent
\ No newline at end of file
diff --git a/files/debs/q-l3 b/files/debs/q-l3
new file mode 120000
index 0000000..0a5ca2a
--- /dev/null
+++ b/files/debs/q-l3
@@ -0,0 +1 @@
+neutron-l3
\ No newline at end of file
diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/neutron-agent
similarity index 100%
rename from files/rpms-suse/q-agt
rename to files/rpms-suse/neutron-agent
diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/neutron-l3
similarity index 100%
rename from files/rpms-suse/q-l3
rename to files/rpms-suse/neutron-l3
diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt
new file mode 120000
index 0000000..99fe353
--- /dev/null
+++ b/files/rpms-suse/q-agt
@@ -0,0 +1 @@
+neutron-agent
\ No newline at end of file
diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3
new file mode 120000
index 0000000..0a5ca2a
--- /dev/null
+++ b/files/rpms-suse/q-l3
@@ -0,0 +1 @@
+neutron-l3
\ No newline at end of file
diff --git a/files/rpms/q-agt b/files/rpms/neutron-agent
similarity index 100%
rename from files/rpms/q-agt
rename to files/rpms/neutron-agent
diff --git a/files/rpms/q-l3 b/files/rpms/neutron-l3
similarity index 100%
rename from files/rpms/q-l3
rename to files/rpms/neutron-l3
diff --git a/files/rpms/q-agt b/files/rpms/q-agt
new file mode 120000
index 0000000..99fe353
--- /dev/null
+++ b/files/rpms/q-agt
@@ -0,0 +1 @@
+neutron-agent
\ No newline at end of file
diff --git a/files/rpms/q-l3 b/files/rpms/q-l3
new file mode 120000
index 0000000..0a5ca2a
--- /dev/null
+++ b/files/rpms/q-l3
@@ -0,0 +1 @@
+neutron-l3
\ No newline at end of file
diff --git a/functions b/functions
index 1aa7517..872f216 100644
--- a/functions
+++ b/functions
@@ -666,11 +666,7 @@
 
 # running_in_container - Returns true otherwise false
 function running_in_container {
-    if grep -q lxc /proc/1/cgroup; then
-        return 0
-    fi
-
-    return 1
+    [[ $(systemd-detect-virt --container) != 'none' ]]
 }
 
 
diff --git a/lib/dstat b/lib/dstat
index b705948..62795f5 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -21,16 +21,22 @@
     # A better kind of sysstat, with the top process per time slice
     run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR"
 
-    # To enable peakmem_tracker add:
-    #    enable_service peakmem_tracker
+    # To enable memory_tracker add:
+    #    enable_service memory_tracker
     # to your localrc
-    run_process peakmem_tracker "$TOP_DIR/tools/peakmem_tracker.sh"
+    run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh"
+
+    # remove support for the old name when it's no longer used (sometime in Queens)
+    if is_service_enabled peakmem_tracker; then
+        deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead"
+        run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh"
+    fi
 }
 
 # stop_dstat() stop dstat process
 function stop_dstat {
     stop_process dstat
-    stop_process peakmem_tracker
+    stop_process memory_tracker
 }
 
 # Restore xtrace
diff --git a/lib/glance b/lib/glance
index 6125f45..2f4aa5f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -55,8 +55,6 @@
 GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
 GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
 GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
-GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf
-GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini
 GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False}
 
 if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then
@@ -72,8 +70,6 @@
 GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
 GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
-GLANCE_GLARE_PORT=${GLANCE_GLARE_PORT:-9494}
-GLANCE_GLARE_HOSTPORT=${GLANCE_GLARE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_GLARE_PORT}
 
 # Functions
 # ---------
@@ -98,9 +94,6 @@
     sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
 
     # Copy over our glance configurations and update them
-    if is_service_enabled g-glare; then
-        cp $GLANCE_DIR/etc/glance-glare.conf $GLANCE_GLARE_CONF
-    fi
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
@@ -143,9 +136,6 @@
 
     # Store specific configs
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
-    if is_service_enabled g-glare; then
-        iniset $GLANCE_GLARE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
-    fi
     iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
 
     iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
@@ -172,22 +162,6 @@
 
         iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
 
-        # Store the glare in swift if enabled.
-        if is_service_enabled g-glare; then
-            iniset $GLANCE_GLARE_CONF glance_store default_store swift
-            iniset $GLANCE_GLARE_CONF glance_store swift_store_create_container_on_put True
-
-            iniset $GLANCE_GLARE_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
-            iniset $GLANCE_GLARE_CONF glance_store default_swift_reference ref1
-            iniset $GLANCE_GLARE_CONF glance_store stores "file, http, swift"
-            iniset $GLANCE_GLARE_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
-
-            # commenting is not strictly necessary but it's confusing to have bad values in conf
-            inicomment $GLANCE_GLARE_CONF glance_store swift_store_user
-            inicomment $GLANCE_GLARE_CONF glance_store swift_store_key
-            inicomment $GLANCE_GLARE_CONF glance_store swift_store_auth_address
-        fi
-
         iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
         if python3_enabled; then
             # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag
@@ -266,29 +240,6 @@
         iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
         iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
     fi
-
-    # Configure GLANCE_GLARE (Glance Glare)
-    if is_service_enabled g-glare; then
-        local dburl
-        dburl=`database_connection_url glance`
-        setup_logging $GLANCE_GLARE_CONF
-        iniset $GLANCE_GLARE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $GLANCE_GLARE_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
-        iniset $GLANCE_GLARE_CONF DEFAULT bind_port $GLANCE_GLARE_PORT
-        inicomment $GLANCE_GLARE_CONF DEFAULT log_file
-        iniset $GLANCE_GLARE_CONF DEFAULT workers "$API_WORKERS"
-
-        iniset $GLANCE_GLARE_CONF database connection $dburl
-        iniset $GLANCE_GLARE_CONF paste_deploy flavor keystone
-        configure_auth_token_middleware $GLANCE_GLARE_CONF glare $GLANCE_AUTH_CACHE_DIR/artifact
-        # Register SSL certificates if provided
-        if is_ssl_enabled_service glance; then
-            ensure_certificates GLANCE
-            iniset $GLANCE_GLARE_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
-            iniset $GLANCE_GLARE_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
-        fi
-        cp $GLANCE_DIR/etc/glance-glare-paste.ini $GLANCE_GLARE_PASTE_INI
-    fi
 }
 
 # create_glance_accounts() - Set up common required glance accounts
@@ -298,7 +249,6 @@
 # SERVICE_PROJECT_NAME  glance          service
 # SERVICE_PROJECT_NAME  glance-swift    ResellerAdmin (if Swift is enabled)
 # SERVICE_PROJECT_NAME  glance-search   search (if Search is enabled)
-# SERVICE_PROJECT_NAME  glare           service (if enabled)
 
 function create_glance_accounts {
     if is_service_enabled g-api; then
@@ -321,16 +271,6 @@
         iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
         iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
     fi
-
-    # Add glance-glare service and endpoints
-    if is_service_enabled g-glare; then
-        create_service_user "glare"
-        get_or_create_service "glare" "artifact" "Glance Artifact Service"
-
-        get_or_create_endpoint "artifact" \
-            "$REGION_NAME" \
-            "$GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT"
-    fi
 }
 
 # create_glance_cache_dir() - Part of the init_glance() process
@@ -400,15 +340,6 @@
     if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then
         die $LINENO "g-api did not start"
     fi
-
-    #Start g-glare after g-reg/g-api
-    if is_service_enabled g-glare; then
-        run_process g-glare "$GLANCE_BIN_DIR/glance-glare --config-file=$GLANCE_CONF_DIR/glance-glare.conf"
-        echo "Waiting for Glare [g-glare] ($GLANCE_GLARE_HOSTPORT) to start..."
-        if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_GLARE_HOSTPORT; then
-            die $LINENO " Glare [g-glare] did not start"
-        fi
-    fi
 }
 
 # stop_glance() - Stop running processes
@@ -416,10 +347,6 @@
     # Kill the Glance screen windows
     stop_process g-api
     stop_process g-reg
-
-    if is_service_enabled g-glare; then
-        stop_process g-glare
-    fi
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index e87a30c..07974fe 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -197,8 +197,8 @@
             if [ -z $SUBNETPOOL_V6_ID ]; then
                 fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
             fi
-            SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2)
-            die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
+            IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID --subnet-range $fixed_range_v6 | grep ' id ' | get_field 2)
+            die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
 
         if [[ $Q_AGENT == "openvswitch" ]]; then
diff --git a/lib/tempest b/lib/tempest
index a9461d4..cf7eb6f 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -223,7 +223,7 @@
             # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values.
             # Some resize instance in tempest tests depends on this.
             for f in ${flavors[@]:1}; do
-                if [[ $f -ne $flavor_ref ]]; then
+                if [[ "$f" != "$flavor_ref" ]]; then
                     flavor_ref_alt=$f
                     break
                 fi
diff --git a/lib/tls b/lib/tls
index f9ef554..fb2fa3a 100644
--- a/lib/tls
+++ b/lib/tls
@@ -457,29 +457,30 @@
 # MaxClients: maximum number of simultaneous client connections
 # MaxRequestsPerChild: maximum number of requests a server process serves
 #
-# The apache defaults are too conservative if we want reliable tempest
-# testing. Bump these values up from ~400 max clients to 1024 max clients.
+# We want to be memory thrifty so tune down apache to allow 256 total
+# connections. This should still be plenty for a dev env yet lighter than
+# apache defaults.
 <IfModule mpm_worker_module>
 # Note that the next three conf values must be changed together.
 # MaxClients = ServerLimit * ThreadsPerChild
-ServerLimit          32
+ServerLimit           8
 ThreadsPerChild      32
-MaxClients         1024
-StartServers          3
-MinSpareThreads      96
-MaxSpareThreads     192
+MaxClients          256
+StartServers          2
+MinSpareThreads      32
+MaxSpareThreads      96
 ThreadLimit          64
 MaxRequestsPerChild   0
 </IfModule>
 <IfModule mpm_event_module>
 # Note that the next three conf values must be changed together.
 # MaxClients = ServerLimit * ThreadsPerChild
-ServerLimit          32
+ServerLimit           8
 ThreadsPerChild      32
-MaxClients         1024
-StartServers          3
-MinSpareThreads      96
-MaxSpareThreads     192
+MaxClients          256
+StartServers          2
+MinSpareThreads      32
+MaxSpareThreads      96
 ThreadLimit          64
 MaxRequestsPerChild   0
 </IfModule>
diff --git a/stack.sh b/stack.sh
index f08d56f..20cdc1d 100755
--- a/stack.sh
+++ b/stack.sh
@@ -192,7 +192,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|sid|testing|jessie|f24|f25|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
new file mode 100755
index 0000000..dac0267
--- /dev/null
+++ b/tools/memory_tracker.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+# time to sleep between checks
+SLEEP_TIME=20
+
+# MemAvailable is the best estimation and has built-in heuristics
+# around reclaimable memory.  However, it is not available until 3.14
+# kernel (i.e. Ubuntu LTS Trusty misses it).  In that case, we fall
+# back to free+buffers+cache as the available memory.
+USE_MEM_AVAILABLE=0
+if grep -q '^MemAvailable:' /proc/meminfo; then
+    USE_MEM_AVAILABLE=1
+fi
+
+function get_mem_unevictable {
+    awk '/^Unevictable:/ {print $2}' /proc/meminfo
+}
+
+function get_mem_available {
+    if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then
+        awk '/^MemAvailable:/ {print $2}' /proc/meminfo
+    else
+        awk '/^MemFree:/ {free=$2}
+            /^Buffers:/ {buffers=$2}
+            /^Cached:/  {cached=$2}
+            END { print free+buffers+cached }' /proc/meminfo
+    fi
+}
+
+function tracker {
+    local low_point
+    local unevictable_point
+    low_point=$(get_mem_available)
+    # log mlocked memory at least on first iteration
+    unevictable_point=0
+    while [ 1 ]; do
+
+        local mem_available
+        mem_available=$(get_mem_available)
+
+        local unevictable
+        unevictable=$(get_mem_unevictable)
+
+        if [ $mem_available -lt $low_point -o $unevictable -ne $unevictable_point ]; then
+            echo "[[["
+            date
+
+            # whenever we see less memory available than last time, dump the
+            # snapshot of current usage; i.e. checking the latest entry in the file
+            # will give the peak-memory usage
+            if [[ $mem_available -lt $low_point ]]; then
+                low_point=$mem_available
+                echo "---"
+                # always available greppable output; given difference in
+                # meminfo output as described above...
+                echo "memory_tracker low_point: $mem_available"
+                echo "---"
+                cat /proc/meminfo
+                echo "---"
+                # would hierarchial view be more useful (-H)?  output is
+                # not sorted by usage then, however, and the first
+                # question is "what's using up the memory"
+                #
+                # there are a lot of kernel threads, especially on a 8-cpu
+                # system.  do a best-effort removal to improve
+                # signal/noise ratio of output.
+                ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 |
+                    grep -v ']$'
+            fi
+            echo "---"
+
+            # list processes that lock memory from swap
+            if [[ $unevictable -ne $unevictable_point ]]; then
+                unevictable_point=$unevictable
+                sudo ./tools/mlock_report.py
+            fi
+
+            echo "]]]"
+        fi
+        sleep $SLEEP_TIME
+    done
+}
+
+function usage {
+    echo "Usage: $0 [-x] [-s N]" 1>&2
+    exit 1
+}
+
+while getopts ":s:x" opt; do
+    case $opt in
+        s)
+            SLEEP_TIME=$OPTARG
+            ;;
+        x)
+            set -o xtrace
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+shift $((OPTIND-1))
+
+tracker
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
new file mode 100755
index 0000000..1d23af9
--- /dev/null
+++ b/tools/mlock_report.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# This tool lists processes that lock memory pages from swapping to disk.
+
+import re
+import subprocess
+
+import psutil
+
+
+SUMMARY_REGEX = re.compile(r".*\s+(?P<locked>[\d]+)\s+KB")
+
+
+def main():
+    try:
+        print _get_report()
+    except Exception as e:
+        print "Failure listing processes locking memory: %s" % str(e)
+
+
+def _get_report():
+    mlock_users = []
+    for proc in psutil.process_iter():
+        pid = proc.pid
+        # sadly psutil does not expose locked pages info, that's why we
+        # call to pmap and parse the output here
+        try:
+            out = subprocess.check_output(['pmap', '-XX', str(pid)])
+        except subprocess.CalledProcessError as e:
+            # 42 means process just vanished, which is ok
+            if e.returncode == 42:
+                continue
+            raise
+        last_line = out.splitlines()[-1]
+
+        # some processes don't provide a memory map, for example those
+        # running as kernel services, so we need to skip those that don't
+        # match
+        result = SUMMARY_REGEX.match(last_line)
+        if result:
+            locked = int(result.group('locked'))
+            if locked:
+                mlock_users.append({'name': proc.name(),
+                                    'pid': pid,
+                                    'locked': locked})
+
+    # produce a single line log message with per process mlock stats
+    if mlock_users:
+        return "; ".join(
+            "[%(name)s (pid:%(pid)s)]=%(locked)dKB" % args
+            # log heavy users first
+            for args in sorted(mlock_users, key=lambda d: d['locked'])
+        )
+    else:
+        return "no locked memory"
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh
deleted file mode 100755
index ecbd79a..0000000
--- a/tools/peakmem_tracker.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-set -o errexit
-
-# time to sleep between checks
-SLEEP_TIME=20
-
-# MemAvailable is the best estimation and has built-in heuristics
-# around reclaimable memory.  However, it is not available until 3.14
-# kernel (i.e. Ubuntu LTS Trusty misses it).  In that case, we fall
-# back to free+buffers+cache as the available memory.
-USE_MEM_AVAILBLE=0
-if grep -q '^MemAvailable:' /proc/meminfo; then
-    USE_MEM_AVAILABLE=1
-fi
-
-function get_mem_available {
-    if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then
-        awk '/^MemAvailable:/ {print $2}' /proc/meminfo
-    else
-        awk '/^MemFree:/ {free=$2}
-            /^Buffers:/ {buffers=$2}
-            /^Cached:/  {cached=$2}
-            END { print free+buffers+cached }' /proc/meminfo
-    fi
-}
-
-# whenever we see less memory available than last time, dump the
-# snapshot of current usage; i.e. checking the latest entry in the
-# file will give the peak-memory usage
-function tracker {
-    local low_point
-    low_point=$(get_mem_available)
-    while [ 1 ]; do
-
-        local mem_available
-        mem_available=$(get_mem_available)
-
-        if [[ $mem_available -lt $low_point ]]; then
-            low_point=$mem_available
-            echo "[[["
-            date
-            echo "---"
-            # always available greppable output; given difference in
-            # meminfo output as described above...
-            echo "peakmem_tracker low_point: $mem_available"
-            echo "---"
-            cat /proc/meminfo
-            echo "---"
-            # would hierarchial view be more useful (-H)?  output is
-            # not sorted by usage then, however, and the first
-            # question is "what's using up the memory"
-            #
-            # there are a lot of kernel threads, especially on a 8-cpu
-            # system.  do a best-effort removal to improve
-            # signal/noise ratio of output.
-            ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 |
-                grep -v ']$'
-            echo "]]]"
-        fi
-
-        sleep $SLEEP_TIME
-    done
-}
-
-function usage {
-    echo "Usage: $0 [-x] [-s N]" 1>&2
-    exit 1
-}
-
-while getopts ":s:x" opt; do
-    case $opt in
-        s)
-            SLEEP_TIME=$OPTARG
-            ;;
-        x)
-            set -o xtrace
-            ;;
-        *)
-            usage
-            ;;
-    esac
-done
-shift $((OPTIND-1))
-
-tracker
diff --git a/tools/xen/functions b/tools/xen/functions
index 93f3413..bc0c515 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -294,6 +294,18 @@
     # Assert ithas a numeric nonzero value
     expr "$cpu_count" + 0
 
+    # 8 VCPUs should be enough for devstack VM; avoid using too
+    # many VCPUs:
+    # 1. too many VCPUs may trigger a kernel bug which result VM
+    #    not able to boot:
+    #    https://kernel.googlesource.com/pub/scm/linux/kernel/git/wsa/linux/+/e2e004acc7cbe3c531e752a270a74e95cde3ea48
+    # 2. The remaining CPUs can be used for other purpose:
+    #    e.g. boot test VMs.
+    MAX_VCPUS=8
+    if [ $cpu_count -ge $MAX_VCPUS ]; then
+        cpu_count=$MAX_VCPUS
+    fi
+
     xe vm-param-set uuid=$vm VCPUs-max=$cpu_count
     xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
 }