Merge "Remove proxy-sendcl from mod_proxy_uwsgi apache path"
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000..bb7239a
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,79 @@
+- nodeset:
+    name: openstack-single-node
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: openstack-two-node
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+      - name: compute1
+        label: ubuntu-xenial
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+
+- job:
+    name: devstack
+    parent: multinode
+    description: Base devstack job
+    nodeset: openstack-single-node
+    required-projects:
+      - openstack-dev/devstack
+      - openstack/cinder
+      - openstack/glance
+      - openstack/keystone
+      - openstack/neutron
+      - openstack/nova
+      - openstack/requirements
+      - openstack/swift
+    timeout: 7200
+    vars:
+      devstack_localrc:
+        DATABASE_PASSWORD: secretdatabase
+        RABBIT_PASSWORD: secretrabbit
+        ADMIN_PASSWORD: secretadmin
+        SERVICE_PASSWORD: secretservice
+        NETWORK_GATEWAY: 10.1.0.1
+        Q_USE_DEBUG_COMMAND: True
+        FIXED_RANGE: 10.1.0.0/20
+        IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
+        FLOATING_RANGE: 172.24.5.0/24
+        PUBLIC_NETWORK_GATEWAY: 172.24.5.1
+        FLOATING_HOST_PREFIX: 172.24.4
+        FLOATING_HOST_MASK: 23
+        SWIFT_REPLICAS: 1
+        SWIFT_START_ALL_SERVICES: False
+        LOGFILE: /opt/stack/logs/devstacklog.txt
+        LOG_COLOR: False
+        VERBOSE: True
+        NETWORK_GATEWAY: 10.1.0.1
+        NOVNC_FROM_PACKAGE: True
+        ERROR_ON_CLONE: True
+        # NOTE(dims): etcd 3.x is not available in debian/ubuntu
+        # etc. As a stop gap measure, devstack uses wget to download
+        # from the location below for all the CI jobs.
+        ETCD_DOWNLOAD_URL: "http://tarballs.openstack.org/etcd/"
+      devstack_services:
+        horizon: False
+        tempest: False
+    pre-run: playbooks/pre
+    post-run: playbooks/post
+
+
+- project:
+    name: openstack-dev/devstack
+    check:
+      jobs:
+        - devstack
diff --git a/HACKING.rst b/HACKING.rst
index fc67f09..d5d6fbc 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -20,7 +20,7 @@
 contains the usual links for blueprints, bugs, etc.
 
 __ contribute_
-.. _contribute: http://docs.openstack.org/infra/manual/developers.html
+.. _contribute: https://docs.openstack.org/infra/manual/developers.html
 
 __ lp_
 .. _lp: https://launchpad.net/~devstack
@@ -255,7 +255,7 @@
 * The ``OS_*`` environment variables should be the only ones used for all
   authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
 
-.. _CLIAuth: http://wiki.openstack.org/CLIAuth
+.. _CLIAuth: https://wiki.openstack.org/CLIAuth
 
 * The exercise MUST clean up after itself if successful.  If it is not successful,
   it is assumed that state will be left behind; this allows a chance for developers
diff --git a/README.rst b/README.rst
index adbf59a..6885546 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,5 @@
-DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud.
+DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud
+from git source trees.
 
 Goals
 =====
@@ -27,9 +28,9 @@
 The DevStack master branch generally points to trunk versions of OpenStack
 components.  For older, stable versions, look for branches named
 stable/[release] in the DevStack repo.  For example, you can do the
-following to create a Newton OpenStack cloud::
+following to create a Pike OpenStack cloud::
 
-    git checkout stable/newton
+    git checkout stable/pike
     ./stack.sh
 
 You can also pick specific OpenStack project releases by setting the appropriate
@@ -54,7 +55,7 @@
 endpoints, like so:
 
 * Horizon: http://myhost/
-* Keystone: http://myhost:5000/v2.0/
+* Keystone: http://myhost/identity/v2.0/
 
 We also provide an environment file that you can use to interact with your
 cloud via CLI::
diff --git a/clean.sh b/clean.sh
index 9ffe3be..2333596 100755
--- a/clean.sh
+++ b/clean.sh
@@ -88,6 +88,7 @@
 cleanup_glance
 cleanup_keystone
 cleanup_nova
+cleanup_placement
 cleanup_neutron
 cleanup_swift
 cleanup_horizon
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 064bf51..23f680a 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -136,7 +136,7 @@
 
     ::
 
-        OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0
+        OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0
 
 KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG
     Set command-line client log level to ``DEBUG``. These are commented
@@ -779,9 +779,15 @@
     DOWNLOAD_DEFAULT_IMAGES=False
     IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img"
 
+    # Provide a custom etcd3 binary download URL and ints sha256.
+    # The binary must be located under '/<etcd version>/etcd-<etcd-version>-linux-s390x.tar.gz'
+    # on this URL.
+    # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd
+    ETCD_DOWNLOAD_URL=<your-etcd-download-url>
+    ETCD_SHA256=<your-etcd3-sha256>
+
     enable_service n-sproxy
     disable_service n-novnc
-    disable_service etcd3  # https://bugs.launchpad.net/devstack/+bug/1693192
 
     [[post-config|$NOVA_CONF]]
 
@@ -803,8 +809,11 @@
   needed if you want to use the *serial console* outside of the all-in-one
   setup.
 
-* The service ``etcd3`` needs to be disabled as long as bug report
-  https://bugs.launchpad.net/devstack/+bug/1693192 is not resolved.
+* A link to an etcd3 binary and its sha256 needs to be provided as the
+  binary for s390x is not hosted on github like it is for other
+  architectures. For more details see
+  https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be
+  built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd.
 
 .. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need
    to use a guest image which is smaller than 1GB when uncompressed.
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index a186336..ed9b4da 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -32,9 +32,9 @@
 `git.openstack.org
 <https://git.openstack.org/cgit/openstack-dev/devstack>`__ and bug
 reports go to `LaunchPad
-<http://bugs.launchpad.net/devstack/>`__. Contributions follow the
+<https://bugs.launchpad.net/devstack/>`__. Contributions follow the
 usual process as described in the `developer guide
-<http://docs.openstack.org/infra/manual/developers.html>`__. This
+<https://docs.openstack.org/infra/manual/developers.html>`__. This
 Sphinx documentation is housed in the doc directory.
 
 Why not use packages?
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 4ed64bf..3592844 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -39,7 +39,6 @@
     LOGFILE=$DEST/logs/stack.sh.log
     VERBOSE=True
     LOG_COLOR=True
-    SCREEN_LOGDIR=$DEST/logs
     # Pre-requisite
     ENABLED_SERVICES=rabbit,mysql,key
     # Horizon
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 6bbab53..0f105d7 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -66,5 +66,5 @@
 <https://github.com/openstack/nova/blob/master/nova/conf/serial_console.py>`_.
 
 For more information on OpenStack configuration see the `OpenStack
-Configuration Reference
-<https://docs.openstack.org/ocata/config-reference/compute.html>`_
+Compute Service Configuration Reference
+<https://docs.openstack.org/nova/latest/admin/configuration/index.html>`_
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index bdbeaaa..74010cd 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -69,7 +69,7 @@
 
    This is not a recommended configuration. Because of interactions
    between ovs and bridging, if you reboot your box with active
-   networking you may loose network connectivity to your system.
+   networking you may lose network connectivity to your system.
 
 If you need your guests accessible on the network, but only have 1
 interface (using something like a NUC), you can share your one
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index f9ca055..5fd6697 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -39,6 +39,7 @@
 collectd-ceilometer-plugin             `git://git.openstack.org/openstack/collectd-ceilometer-plugin <https://git.openstack.org/cgit/openstack/collectd-ceilometer-plugin>`__
 congress                               `git://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
 cue                                    `git://git.openstack.org/openstack/cue <https://git.openstack.org/cgit/openstack/cue>`__
+cyborg                                 `git://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
 designate                              `git://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
 devstack-plugin-additional-pkg-repos   `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
 devstack-plugin-amqp1                  `git://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
@@ -99,6 +100,7 @@
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
 networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
+networking-baremetal                   `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
 networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
 networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
 networking-calico                      `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
@@ -109,13 +111,16 @@
 networking-generic-switch              `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
 networking-hpe                         `git://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
 networking-huawei                      `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
+networking-hyperv                      `git://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
 networking-infoblox                    `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
 networking-l2gw                        `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
+networking-lagopus                     `git://git.openstack.org/openstack/networking-lagopus <https://git.openstack.org/cgit/openstack/networking-lagopus>`__
 networking-midonet                     `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
 networking-mlnx                        `git://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
 networking-nec                         `git://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
 networking-odl                         `git://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
 networking-onos                        `git://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
+networking-opencontrail                `git://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
 networking-ovn                         `git://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
 networking-ovs-dpdk                    `git://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
 networking-plumgrid                    `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
@@ -138,11 +143,14 @@
 oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
 octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
 octavia-dashboard                      `git://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
+omni                                   `git://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
 os-xenapi                              `git://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
 osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
+oswin-tempest-plugin                   `git://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
 panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
 patrole                                `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
 picasso                                `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
+qinling                                `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
 sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
@@ -152,6 +160,7 @@
 senlin                                 `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
 solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
 stackube                               `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
+storlets                               `git://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
 tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
 tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
 tap-as-a-service-dashboard             `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 5b3c6cf..fae1a1d 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -12,6 +12,15 @@
 be sure that they will continue to work in the future as DevStack
 evolves.
 
+Prerequisites
+=============
+
+If you are planning to create a plugin that is going to host a service in the
+service catalog (that is, your plugin will use the command
+``get_or_create_service``) please make sure that you apply to the `service
+types authority`_ to reserve a valid service-type. This will help to make sure
+that all deployments of your service use the same service-type.
+
 Plugin Interface
 ================
 
@@ -250,3 +259,5 @@
 
 For additional inspiration on devstack plugins you can check out the
 `Plugin Registry <plugin-registry.html>`_.
+
+.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 60a7719..9cc4017 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -94,33 +94,95 @@
 
 Follow logs for a specific service::
 
-  journalctl -f --unit devstack@n-cpu.service
+  sudo journalctl -f --unit devstack@n-cpu.service
 
 Following logs for multiple services simultaneously::
 
-  journalctl -f --unit devstack@n-cpu.service --unit
-  devstack@n-cond.service
+  sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service
 
 or you can even do wild cards to follow all the nova services::
 
-  journalctl -f --unit devstack@n-*
+  sudo journalctl -f --unit devstack@n-*
 
 Use higher precision time stamps::
 
-  journalctl -f -o short-precise --unit devstack@n-cpu.service
+  sudo journalctl -f -o short-precise --unit devstack@n-cpu.service
 
 By default, journalctl strips out "unprintable" characters, including
 ASCII color codes. To keep the color codes (which can be interpreted by
 an appropriate terminal/pager - e.g. ``less``, the default)::
 
-  journalctl -a --unit devstack@n-cpu.service
+  sudo journalctl -a --unit devstack@n-cpu.service
 
 When outputting to the terminal using the default pager, long lines
-appear to be truncated, but horizontal scrolling is supported via the
-left/right arrow keys.
+will be truncated, but horizontal scrolling is supported via the
+left/right arrow keys. You can override this by setting the
+``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``.
+
+You can pipe the output to another tool, such as ``grep``. For
+example, to find a server instance UUID in the nova logs::
+
+  sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6
 
 See ``man 1 journalctl`` for more.
 
+Debugging
+=========
+
+Using pdb
+---------
+
+In order to break into a regular pdb session on a systemd-controlled
+service, you need to invoke the process manually - that is, take it out
+of systemd's control.
+
+Discover the command systemd is using to run the service::
+
+  systemctl show devstack@n-sch.service -p ExecStart --no-pager
+
+Stop the systemd service::
+
+  sudo systemctl stop devstack@n-sch.service
+
+Inject your breakpoint in the source, e.g.::
+
+  import pdb; pdb.set_trace()
+
+Invoke the command manually::
+
+  /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf
+
+Using remote-pdb
+----------------
+
+`remote-pdb`_ works while the process is under systemd control.
+
+Make sure you have remote-pdb installed::
+
+  sudo pip install remote-pdb
+
+Inject your breakpoint in the source, e.g.::
+
+  import remote_pdb; remote_pdb.set_trace()
+
+Restart the relevant service::
+
+  sudo systemctl restart devstack@n-api.service
+
+The remote-pdb code configures the telnet port when ``set_trace()`` is
+invoked.  Do whatever it takes to hit the instrumented code path, and
+inspect the logs for a message displaying the listening port::
+
+  Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ...
+
+Telnet to that port to enter the pdb session::
+
+  telnet 127.0.0.1 46771
+
+See the `remote-pdb`_ home page for more options.
+
+.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb
+
 Known Issues
 ============
 
diff --git a/files/debs/general b/files/debs/general
index 1dde03b..8e0018d 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -29,7 +29,6 @@
 python2.7
 python-dev
 python-gdbm # needed for testr
-screen
 tar
 tcpdump
 unzip
diff --git a/files/debs/neutron b/files/debs/neutron-common
similarity index 100%
rename from files/debs/neutron
rename to files/debs/neutron-common
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
index 2b643b8..0d9da44 100644
--- a/files/rpms-suse/dstat
+++ b/files/rpms-suse/dstat
@@ -1 +1,2 @@
 dstat
+python-psutil
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 370f240..0c1a281 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -24,7 +24,6 @@
 python-cmd2 # dist:opensuse-12.3
 python-devel  # pyOpenSSL
 python-xml
-screen
 systemd-devel # for systemd-python
 tar
 tcpdump
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron-common
similarity index 100%
rename from files/rpms-suse/neutron
rename to files/rpms-suse/neutron-common
diff --git a/files/rpms/general b/files/rpms/general
index 2443cc8..f3f8708 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -28,7 +28,6 @@
 pyOpenSSL # version in pip uses too much memory
 python-devel
 redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
-screen
 systemd-devel # for systemd-python
 tar
 tcpdump
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 1703083..5f19c6f 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,4 +1,3 @@
 memcached
 mod_ssl
-MySQL-python
 sqlite
diff --git a/files/rpms/neutron b/files/rpms/neutron-common
similarity index 94%
rename from files/rpms/neutron
rename to files/rpms/neutron-common
index a4e029a..0cc8d11 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron-common
@@ -6,7 +6,6 @@
 iptables
 iputils
 mysql-devel
-MySQL-python
 mysql-server # NOPRIME
 openvswitch # NOPRIME
 rabbitmq-server # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 632e796..64ed480 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -12,7 +12,6 @@
 libxml2-python
 m2crypto
 mysql-devel
-MySQL-python
 mysql-server # NOPRIME
 numpy # needed by websockify for spice console
 parted
diff --git a/functions b/functions
index 6f2164a..8b69c73 100644
--- a/functions
+++ b/functions
@@ -45,6 +45,37 @@
 # export it so child shells have access to the 'short_source' function also.
 export -f short_source
 
+# Download a file from a URL
+#
+# Will check cache (in $FILES) or download given URL.
+#
+# Argument is the URL to the remote file
+#
+# Will echo the local path to the file as the output.  Will die on
+# failure to download.
+#
+# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS
+# and tools/image_list.sh
+function get_extra_file {
+    local file_url=$1
+
+    file_name=$(basename "$file_url")
+    if [[ $file_url != file* ]]; then
+        # If the file isn't cache, download it
+        if [[ ! -f $FILES/$file_name ]]; then
+            wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name
+            if [[ $? -ne 0 ]]; then
+                die "$file_url could not be downloaded"
+            fi
+        fi
+        echo "$FILES/$file_name"
+        return
+    else
+        # just strip the file:// bit and that's the path to the file
+        echo $file_url | sed 's/$file:\/\///g'
+    fi
+}
+
 
 # Retrieve an image from a URL and upload into Glance.
 # Uses the following variables:
@@ -407,6 +438,26 @@
     return $rval
 }
 
+function wait_for_compute {
+    local timeout=$1
+    local rval=0
+    time_start "wait_for_service"
+    timeout $timeout bash -x <<EOF || rval=$?
+        ID=""
+        while [[ "\$ID" == "" ]]; do
+            sleep 1
+            ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname` --service nova-compute -c ID -f value)
+        done
+EOF
+    time_stop "wait_for_service"
+    # Figure out what's happening on platforms where this doesn't work
+    if [[ "$rval" != 0 ]]; then
+        echo "Didn't find service registered by hostname after $timeout seconds"
+        openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list
+    fi
+    return $rval
+}
+
 
 # ping check
 # Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK``
diff --git a/functions-common b/functions-common
index 660df79..030ff8c 100644
--- a/functions-common
+++ b/functions-common
@@ -45,6 +45,7 @@
 declare -A -g GITDIR
 
 TRACK_DEPENDS=${TRACK_DEPENDS:-False}
+KILL_PATH="$(which kill)"
 
 # Save these variables to .stackenv
 STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
@@ -319,10 +320,7 @@
     if [[ -x $(command -v apt-get 2>/dev/null) ]]; then
         sudo apt-get install -y lsb-release
     elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
-        # XXX: old code paths seem to have assumed SUSE platforms also
-        # had "yum".  Keep this ordered above yum so we don't try to
-        # install the rh package.  suse calls it just "lsb"
-        sudo zypper -n install lsb
+        sudo zypper -n install lsb-release
     elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
         sudo dnf install -y redhat-lsb-core
     elif [[ -x $(command -v yum 2>/dev/null) ]]; then
@@ -519,7 +517,7 @@
         if [[ ! -d $git_dest ]]; then
             if [[ "$ERROR_ON_CLONE" = "True" ]]; then
                 echo "The $git_dest project was not found; if this is a gate job, add"
-                echo "the project to the \$PROJECTS variable in the job definition."
+                echo "the project to 'required-projects' in the job definition."
                 die $LINENO "Cloning not allowed in this configuration"
             fi
             git_timed clone $git_clone_flags $git_remote $git_dest
@@ -1211,9 +1209,9 @@
             if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/keystone"
             fi
-        elif [[ $service == q-* ]]; then
-            if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then
-                file_to_parse="${file_to_parse} ${package_dir}/neutron"
+        elif [[ $service == q-* || $service == neutron-* ]]; then
+            if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then
+                file_to_parse="${file_to_parse} ${package_dir}/neutron-common"
             fi
         elif [[ $service == ir-* ]]; then
             if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then
@@ -1380,62 +1378,6 @@
         zypper --non-interactive install --auto-agree-with-licenses "$@"
 }
 
-
-# Process Functions
-# =================
-
-# _run_process() is designed to be backgrounded by run_process() to simulate a
-# fork.  It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it().  The log filename is derived
-# from the service name.
-# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
-# If an optional group is provided sg will be used to set the group of
-# the command.
-# _run_process service "command-line" [group]
-function _run_process {
-    # disable tracing through the exec redirects, it's just confusing in the logs.
-    xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-
-    local service=$1
-    local command="$2"
-    local group=$3
-
-    # Undo logging redirections and close the extra descriptors
-    exec 1>&3
-    exec 2>&3
-    exec 3>&-
-    exec 6>&-
-
-    local logfile="${service}.log.${CURRENT_LOG_TIME}"
-    local real_logfile="${LOGDIR}/${logfile}"
-    if [[ -n ${LOGDIR} ]]; then
-        exec 1>&"$real_logfile" 2>&1
-        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            # Drop the backward-compat symlink
-            ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
-        fi
-
-        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
-        export PYTHONUNBUFFERED=1
-    fi
-
-    # reenable xtrace before we do *real* work
-    $xtrace
-
-    # Run under ``setsid`` to force the process to become a session and group leader.
-    # The pid saved can be used with pkill -g to get the entire process group.
-    if [[ -n "$group" ]]; then
-        setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
-    else
-        setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
-    fi
-
-    # Just silently exit this process
-    exit 0
-}
-
 function write_user_unit_file {
     local service=$1
     local command="$2"
@@ -1451,6 +1393,9 @@
     iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
     iniset -sudo $unitfile "Service" "User" "$user"
     iniset -sudo $unitfile "Service" "ExecStart" "$command"
+    iniset -sudo $unitfile "Service" "KillMode" "process"
+    iniset -sudo $unitfile "Service" "TimeoutStopSec" "infinity"
+    iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
     fi
@@ -1472,8 +1417,9 @@
     iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service"
     iniset -sudo $unitfile "Service" "User" "$user"
     iniset -sudo $unitfile "Service" "ExecStart" "$command"
+    iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
     iniset -sudo $unitfile "Service" "Type" "notify"
-    iniset -sudo $unitfile "Service" "KillSignal" "SIGQUIT"
+    iniset -sudo $unitfile "Service" "KillMode" "process"
     iniset -sudo $unitfile "Service" "Restart" "always"
     iniset -sudo $unitfile "Service" "NotifyAccess" "all"
     iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
@@ -1535,21 +1481,6 @@
     $SYSTEMCTL start $systemd_service
 }
 
-# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
-# This is used for ``service_check`` when all the ``screen_it`` are called finished
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# init_service_check
-function init_service_check {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
-    fi
-
-    rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
-}
-
 # Find out if a process exists by partial name.
 # is_running name
 function is_running {
@@ -1564,7 +1495,6 @@
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
 # If an optional group is provided sg will be used to run the
 # command as that group.
-# Uses globals ``USE_SCREEN``
 # run_process service "command-line" [group] [user]
 function run_process {
     local service=$1
@@ -1576,140 +1506,16 @@
 
     time_start "run_process"
     if is_service_enabled $service; then
-        if [[ "$USE_SYSTEMD" = "True" ]]; then
-            _run_under_systemd "$name" "$command" "$group" "$user"
-        elif [[ "$USE_SCREEN" = "True" ]]; then
-            if [[ "$user" == "root" ]]; then
-                command="sudo $command"
-            fi
-            screen_process "$name" "$command" "$group"
-        else
-            # Spawn directly without screen
-            if [[ "$user" == "root" ]]; then
-                command="sudo $command"
-            fi
-            _run_process "$name" "$command" "$group" &
-        fi
+        _run_under_systemd "$name" "$command" "$group" "$user"
     fi
     time_stop "run_process"
 }
 
-# Helper to launch a process in a named screen
-# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
-# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING``
-# screen_process name "command-line" [group]
-# Run a command in a shell in a screen window, if an optional group
-# is provided, use sg to set the group of the command.
-function screen_process {
-    local name=$1
-    local command="$2"
-    local group=$3
-
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-    screen -S $SCREEN_NAME -X screen -t $name
-
-    local logfile="${name}.log.${CURRENT_LOG_TIME}"
-    local real_logfile="${LOGDIR}/${logfile}"
-    echo "LOGDIR: $LOGDIR"
-    echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
-    echo "log: $real_logfile"
-    if [[ -n ${LOGDIR} ]]; then
-        if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
-            screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
-            screen -S $SCREEN_NAME -p $name -X log on
-        fi
-        # If logging isn't active then avoid a broken symlink
-        touch "$real_logfile"
-        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log"
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            # Drop the backward-compat symlink
-            ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
-        fi
-    fi
-
-    # sleep to allow bash to be ready to be send the command - we are
-    # creating a new window in screen and then sends characters, so if
-    # bash isn't running by the time we send the command, nothing
-    # happens.  This sleep was added originally to handle gate runs
-    # where we needed this to be at least 3 seconds to pass
-    # consistently on slow clouds. Now this is configurable so that we
-    # can determine a reasonable value for the local case which should
-    # be much smaller.
-    sleep ${SCREEN_SLEEP:-3}
-
-    NL=`echo -ne '\015'`
-    # This fun command does the following:
-    # - the passed server command is backgrounded
-    # - the pid of the background process is saved in the usual place
-    # - the server process is brought back to the foreground
-    # - if the server process exits prematurely the fg command errors
-    # and a message is written to stdout and the process failure file
-    #
-    # The pid saved can be used in stop_process() as a process group
-    # id to kill off all child processes
-    if [[ -n "$group" ]]; then
-        command="sg $group '$command'"
-    fi
-
-    # Append the process to the screen rc file
-    screen_rc "$name" "$command"
-
-    screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start. Exit code: \$?\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL"
-}
-
-# Screen rc file builder
-# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING``
-# screen_rc service "command-line"
-function screen_rc {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
-    if [[ ! -e $SCREENRC ]]; then
-        # Name the screen session
-        echo "sessionname $SCREEN_NAME" > $SCREENRC
-        # Set a reasonable statusbar
-        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
-        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
-        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
-        echo "screen -t shell bash" >> $SCREENRC
-    fi
-    # If this service doesn't already exist in the screenrc file
-    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
-        NL=`echo -ne '\015'`
-        echo "screen -t $1 bash" >> $SCREENRC
-        echo "stuff \"$2$NL\"" >> $SCREENRC
-
-        if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
-            echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
-            echo "log on" >>$SCREENRC
-        fi
-    fi
-}
-
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# screen_stop_service service
-function screen_stop_service {
-    local service=$1
-
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-    if is_service_enabled $service; then
-        # Clean up the screen window
-        screen -S $SCREEN_NAME -p $service -X kill || true
-    fi
-}
-
 # Stop a service process
 # If a PID is available use it, kill the whole process group via TERM
 # If screen is being used kill the screen window; this will catch processes
 # that did not leave a PID behind
-# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
+# Uses globals ``SERVICE_DIR``
 # stop_process service
 function stop_process {
     local service=$1
@@ -1724,149 +1530,27 @@
             $SYSTEMCTL stop devstack@$service.service
             $SYSTEMCTL disable devstack@$service.service
         fi
-
-        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
-            pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
-            # oslo.service tends to stop actually shutting down
-            # reliably in between releases because someone believes it
-            # is dying too early due to some inflight work they
-            # have. This is a tension. It happens often enough we're
-            # going to just account for it in devstack and assume it
-            # doesn't work.
-            #
-            # Set OSLO_SERVICE_WORKS=True to skip this block
-            if [[ -z "$OSLO_SERVICE_WORKS" ]]; then
-                # TODO(danms): Remove this double-kill when we have
-                # this fixed in all services:
-                # https://bugs.launchpad.net/oslo-incubator/+bug/1446583
-                sleep 1
-                # /bin/true because pkill on a non existent process returns an error
-                pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true
-            fi
-            rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
-        fi
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            # Clean up the screen window
-            screen_stop_service $service
-        fi
     fi
 }
 
-# Helper to get the status of each running service
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# service_check
+# use systemctl to check service status
 function service_check {
     local service
-    local failures
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        echo "No service status directory found"
-        return
-    fi
-
-    # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME
-    # make this -o errexit safe
-    failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
-
-    for service in $failures; do
-        service=`basename $service`
-        service=${service%.failure}
-        echo "Error: Service $service is not running"
-    done
-
-    if [ -n "$failures" ]; then
-        die $LINENO "More details about the above errors can be found with screen"
-    fi
-}
-
-# Tail a log file in a screen if USE_SCREEN is true.
-# Uses globals ``USE_SCREEN``
-function tail_log {
-    local name=$1
-    local logfile=$2
-
-    if [[ "$USE_SCREEN" = "True" ]]; then
-        screen_process "$name" "sudo tail -f $logfile | sed -u 's/\\\\\\\\x1b/\o033/g'"
-    fi
-}
-
-
-# Deprecated Functions
-# --------------------
-
-# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
-# fork.  It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it().  The log filename is derived
-# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
-# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
-# _old_run_process service "command-line"
-function _old_run_process {
-    local service=$1
-    local command="$2"
-
-    # Undo logging redirections and close the extra descriptors
-    exec 1>&3
-    exec 2>&3
-    exec 3>&-
-    exec 6>&-
-
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1
-        ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log
-
-        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
-        export PYTHONUNBUFFERED=1
-    fi
-
-    exec /bin/bash -c "$command"
-    die "$service exec failure: $command"
-}
-
-# old_run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command.  This is meant to duplicate the semantics
-# of screen_it() without screen.  PIDs are written to
-# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
-# old_run_process service "command-line"
-function old_run_process {
-    local service=$1
-    local command="$2"
-
-    # Spawn the child process
-    _old_run_process "$service" "$command" &
-    echo $!
-}
-
-# Compatibility for existing start_XXXX() functions
-# Uses global ``USE_SCREEN``
-# screen_it service "command-line"
-function screen_it {
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            screen_process "$1" "$2"
-        else
-            # Spawn directly without screen
-            old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+    for service in ${ENABLED_SERVICES//,/ }; do
+        # because some things got renamed like key => keystone
+        if $SYSTEMCTL is-enabled devstack@$service.service; then
+            # no-pager is needed because otherwise status dumps to a
+            # pager when in interactive mode, which will stop a manual
+            # devstack run.
+            $SYSTEMCTL status devstack@$service.service --no-pager
         fi
-    fi
+    done
 }
 
-# Compatibility for existing stop_XXXX() functions
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# screen_stop service
-function screen_stop {
-    # Clean up the screen window
-    stop_process $1
-}
 
+function tail_log {
+    deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens"
+}
 
 # Plugin Functions
 # =================
@@ -1882,7 +1566,7 @@
     local name=$1
     local url=$2
     local branch=${3:-master}
-    if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then
+    if is_plugin_enabled $name; then
         die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}"
     fi
     DEVSTACK_PLUGINS+=",$name"
@@ -1891,6 +1575,19 @@
     GITBRANCH[$name]=$branch
 }
 
+# is_plugin_enabled <name>
+#
+# Check if the plugin was enabled, e.g. using enable_plugin
+#
+# ``name`` The name with which the plugin was enabled
+function is_plugin_enabled {
+    local name=$1
+    if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then
+        return 0
+    fi
+    return 1
+}
+
 # fetch_plugins
 #
 # clones all plugins
@@ -2316,7 +2013,7 @@
 # Check if this is a valid ipv4 address string
 function is_ipv4_address {
     local address=$1
-    local regex='([0-9]{1,3}.){3}[0-9]{1,3}'
+    local regex='([0-9]{1,3}\.){3}[0-9]{1,3}'
     # TODO(clarkb) make this more robust
     if [[ "$address" =~ $regex ]] ; then
         return 0
@@ -2380,13 +2077,31 @@
 }
 
 
+# Return just the <major>.<minor> for the given python interpreter
+function _get_python_version {
+    local interp=$1
+    local version
+    # disable erroring out here, otherwise if python 3 doesn't exist we fail hard.
+    if [[ -x $(which $interp 2> /dev/null) ]]; then
+        version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+    fi
+    echo ${version}
+}
+
 # Return the current python as "python<major>.<minor>"
 function python_version {
     local python_version
-    python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+    python_version=$(_get_python_version python2)
     echo "python${python_version}"
 }
 
+function python3_version {
+    local python3_version
+    python3_version=$(_get_python_version python3)
+    echo "python${python_version}"
+}
+
+
 # Service wrapper to restart services
 # restart_service service-name
 function restart_service {
@@ -2574,11 +2289,13 @@
 function time_totals {
     local elapsed_time
     local end_time
-    local len=15
+    local len=20
     local xtrace
+    local unaccounted_time
 
     end_time=$(date +%s)
     elapsed_time=$(($end_time - $_TIME_BEGIN))
+    unaccounted_time=$elapsed_time
 
     # pad 1st column this far
     for t in ${!_TIME_TOTAL[*]}; do
@@ -2595,16 +2312,19 @@
     echo
     echo "========================="
     echo "DevStack Component Timing"
+    echo " (times are in seconds)  "
     echo "========================="
-    printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time"
-    echo
     for t in ${!_TIME_TOTAL[*]}; do
         local v=${_TIME_TOTAL[$t]}
         # because we're recording in milliseconds
         v=$(($v / 1000))
         printf "%-${len}s %3d\n" "$t" "$v"
+        unaccounted_time=$(($unaccounted_time - $v))
     done
+    echo "-------------------------"
+    printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time"
     echo "========================="
+    printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time"
 
     $xtrace
 }
diff --git a/inc/python b/inc/python
index f388f48..9c810ec 100644
--- a/inc/python
+++ b/inc/python
@@ -219,7 +219,8 @@
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
 # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
-# pip_install package [package ...]
+# Usage:
+#  pip_install pip_arguments
 function pip_install {
     local xtrace result
     xtrace=$(set +o | grep xtrace)
@@ -241,6 +242,26 @@
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
     fi
+
+    # Try to extract the path of the package we are installing into
+    # package_dir.  We need this to check for test-requirements.txt,
+    # at least.
+    #
+    # ${!#} expands to the last positional argument to this function.
+    # With "extras" syntax included, our arguments might be something
+    # like:
+    #  -e /path/to/fooproject[extra]
+    # Thus this magic line grabs just the path without extras
+    #
+    # Note that this makes no sense if this is a pypi (rather than
+    # local path) install; ergo you must check this path exists before
+    # use.  Also, if we had multiple or mixed installs, we would also
+    # likely break.  But for historical reasons, it's basically only
+    # the other wrapper functions in here calling this to install
+    # local packages, and they do so with single call per install.  So
+    # this works (for now...)
+    local package_dir=${!#%\[*\]}
+
     if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
         # TRACK_DEPENDS=True installation creates a circular dependency when
         # we attempt to install virtualenv into a virtualenv, so we must global
@@ -261,7 +282,6 @@
                 # versions supported, and if we find the version of
                 # python3 we've been told to use, use that instead of the
                 # default pip
-                local package_dir=${!#}
                 local python_versions
 
                 # Special case some services that have experimental
@@ -323,7 +343,7 @@
 
     # Also install test requirements
     local install_test_reqs=""
-    local test_req="${!#}/test-requirements.txt"
+    local test_req="${package_dir}/test-requirements.txt"
     if [[ -e "$test_req" ]]; then
         install_test_reqs="-r $test_req"
     fi
@@ -346,6 +366,9 @@
 }
 
 function pip_uninstall {
+    # Skip uninstall if offline
+    [[ "${OFFLINE}" = "True" ]] && return
+
     local name=$1
     if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
         local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
@@ -383,7 +406,20 @@
 # determine if a package was installed from git
 function lib_installed_from_git {
     local name=$1
-    pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git'
+    # Note "pip freeze" doesn't always work here, because it tries to
+    # be smart about finding the remote of the git repo the package
+    # was installed from.  This doesn't work with zuul which clones
+    # repos with no remote.
+    #
+    # The best option seems to be to use "pip list" which will tell
+    # you the path an editable install was installed from; for example
+    # in response to something like
+    #  pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate'
+    # pip list shows
+    #  bashate (0.5.2.dev19, /tmp/env/src/bashate)
+    # Thus we look for "path after a comma" to indicate we were
+    # installed from some local place
+    pip list 2>/dev/null | grep -- "$name" | grep -q -- ', .*)$'
 }
 
 # check that everything that's in LIBS_FROM_GIT was actually installed
diff --git a/lib/apache b/lib/apache
index 5838a4d..3af3411 100644
--- a/lib/apache
+++ b/lib/apache
@@ -259,11 +259,16 @@
     iniset "$file" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
     iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload true
+    iniset "$file" uwsgi exit-on-reload false
+    # Set worker-reload-mercy so that worker will not exit till the time
+    # configured after graceful shutdown
+    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
     iniset "$file" uwsgi enable-threads true
     iniset "$file" uwsgi plugins python
     # uwsgi recommends this to prevent thundering herd on accept.
     iniset "$file" uwsgi thunder-lock true
+    # Set hook to trigger graceful shutdown on SIGTERM
+    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
     # Override the default size for headers from the 4k default.
     iniset "$file" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
@@ -310,11 +315,16 @@
     iniset "$file" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
     iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload true
+    iniset "$file" uwsgi exit-on-reload false
     iniset "$file" uwsgi enable-threads true
     iniset "$file" uwsgi plugins python
     # uwsgi recommends this to prevent thundering herd on accept.
     iniset "$file" uwsgi thunder-lock true
+    # Set hook to trigger graceful shutdown on SIGTERM
+    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+    # Set worker-reload-mercy so that worker will not exit till the time
+    # configured after graceful shutdown
+    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
     # Override the default size for headers from the 4k default.
     iniset "$file" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
diff --git a/lib/cinder b/lib/cinder
index 4274be7..07f82a1 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -70,12 +70,11 @@
 CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 
 # What type of LVM device should Cinder use for LVM backend
-# Defaults to default, which is thick, the other valid choice
-# is thin, which as the name implies utilizes lvm thin provisioning.
-# Thinly provisioned LVM volumes may be more efficient when using the Cinder
-# image cache, but there are also known race failures with volume snapshots
-# and thinly provisioned LVM volumes, see bug 1642111 for details.
-CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
+# Defaults to auto, which will do thin provisioning if it's a fresh
+# volume group, otherwise it will do thick. The other valid choices are
+# default, which is thick, or thin, which as the name implies utilizes lvm
+# thin provisioning.
+CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto}
 
 # Default backends
 # The backend format is type:name where type is one of the supported backend
@@ -230,16 +229,6 @@
 
     configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
 
-    # Change the default nova_catalog_info and nova_catalog_admin_info values in
-    # cinder so that the service name cinder is searching for matches that set for
-    # nova in keystone.
-    if [[ -n "$CINDER_NOVA_CATALOG_INFO" ]]; then
-        iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO
-    fi
-    if [[ -n "$CINDER_NOVA_CATALOG_ADMIN_INFO" ]]; then
-        iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO
-    fi
-
     iniset $CINDER_CONF DEFAULT auth_strategy keystone
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
@@ -254,9 +243,7 @@
     iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
     iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
 
-    iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
-
-    iniset $CINDER_CONF key_manager api_class cinder.keymgr.conf_key_mgr.ConfKeyManager
+    iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
@@ -296,8 +283,7 @@
             # Set the service port for a proxy to take the original
             if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
                 iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
-                iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST
-                iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST
+                iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True
             else
                 iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
                 iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
@@ -333,10 +319,9 @@
         iniset $CINDER_CONF DEFAULT glance_api_version 2
     fi
 
-    # Set os_privileged_user credentials (used for os-assisted-snapshots)
-    iniset $CINDER_CONF DEFAULT os_privileged_user_name nova
-    iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
-    iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME"
+    # Set nova credentials (used for os-assisted-snapshots)
+    configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova
+    iniset $CINDER_CONF nova region_name "$REGION_NAME"
     iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
     if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
@@ -473,12 +458,12 @@
     fi
 }
 
-# start_cinder() - Start running processes, including screen
+# start_cinder() - Start running processes
 function start_cinder {
     local service_port=$CINDER_SERVICE_PORT
     local service_protocol=$CINDER_SERVICE_PROTOCOL
     local cinder_url
-    if is_service_enabled tls-proxy && ["$CINDER_USE_MOD_WSGI" == "False"]; then
+    if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
         service_port=$CINDER_SERVICE_PORT_INT
         service_protocol="http"
     fi
@@ -506,12 +491,12 @@
         if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
             run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
             cinder_url=$service_protocol://$SERVICE_HOST:$service_port
-            # Start proxy if tsl enabled
-            if is_service_enabled tls_proxy; then
-                start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_POR_INT
+            # Start proxy if tls enabled
+            if is_service_enabled tls-proxy; then
+                start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
             fi
         else
-            run_process "c-api" "$CINDER_BIN_DIR/uwsgi --ini $CINDER_UWSGI_CONF"
+            run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
             cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
         fi
     fi
@@ -533,12 +518,9 @@
 # stop_cinder() - Stop running processes
 function stop_cinder {
     stop_process c-api
-
-    # Kill the cinder screen windows
-    local serv
-    for serv in c-bak c-sch c-vol; do
-        stop_process $serv
-    done
+    stop_process c-bak
+    stop_process c-sch
+    stop_process c-vol
 }
 
 # create_volume_types() - Create Cinder's configured volume types
diff --git a/lib/dstat b/lib/dstat
index 982b703..fe38d75 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -16,7 +16,7 @@
 _XTRACE_DSTAT=$(set +o | grep xtrace)
 set +o xtrace
 
-# start_dstat() - Start running processes, including screen
+# start_dstat() - Start running processes
 function start_dstat {
     # A better kind of sysstat, with the top process per time slice
     run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR"
diff --git a/lib/etcd3 b/lib/etcd3
index 0e1fbd5..51df8e4 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -24,15 +24,9 @@
 # --------
 
 # Set up default values for etcd
-ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
-ETCD_VERSION=${ETCD_VERSION:-v3.1.7}
-ETCD_DATA_DIR="$DEST/data/etcd"
+ETCD_DATA_DIR="$DATA_DIR/etcd"
 ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
 ETCD_BIN_DIR="$DEST/bin"
-ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52"
-# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does.
-ETCD_SHA256_ARM64=""
-ETCD_SHA256_PPC64=""
 ETCD_PORT=2379
 
 if is_ubuntu ; then
@@ -46,9 +40,13 @@
     cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01"
     cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380"
     cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380"
-    cmd+=" --advertise-client-urls http://${HOST_IP}:$ETCD_PORT"
-    cmd+=" --listen-peer-urls http://0.0.0.0:2380 "
-    cmd+=" --listen-client-urls http://${HOST_IP}:$ETCD_PORT"
+    cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT"
+    if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then
+        cmd+=" --listen-peer-urls http://[::]:2380 "
+    else
+        cmd+=" --listen-peer-urls http://0.0.0.0:2380 "
+    fi
+    cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
 
     local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
     write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root"
@@ -57,6 +55,9 @@
     iniset -sudo $unitfile "Service" "Type" "notify"
     iniset -sudo $unitfile "Service" "Restart" "on-failure"
     iniset -sudo $unitfile "Service" "LimitNOFILE" "65536"
+    if is_arch "aarch64"; then
+        iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64"
+    fi
 
     $SYSTEMCTL daemon-reload
     $SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE
@@ -92,37 +93,19 @@
 function install_etcd3 {
     echo "Installing etcd"
 
-    # Make sure etcd3 downloads the correct architecture
-    if is_arch "x86_64"; then
-        ETCD_ARCH="amd64"
-        ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64}
-    elif is_arch "aarch64"; then
-        ETCD_ARCH="arm64"
-        ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64}
-    elif is_arch "ppc64le"; then
-        ETCD_ARCH="ppc64le"
-        ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64}
-    else
-        exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
-    fi
-
-    ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
-
     # Create the necessary directories
     sudo mkdir -p $ETCD_BIN_DIR
     sudo mkdir -p $ETCD_DATA_DIR
 
     # Download and cache the etcd tgz for subsequent use
+    local etcd_file
+    etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)"
     if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then
-        ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
-        if [ ! -f "$FILES/$ETCD_DOWNLOAD_FILE" ]; then
-            wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE
-        fi
-        echo "${ETCD_SHA256} $FILES/${ETCD_DOWNLOAD_FILE}" > $FILES/etcd.sha256sum
-        # NOTE(sdague): this should go fatal if this fails
-        sha256sum -c $FILES/etcd.sha256sum
+        echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum
+        # NOTE(yuanke wei): rm the damaged file when checksum fails
+        sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1)
 
-        tar xzvf $FILES/$ETCD_DOWNLOAD_FILE -C $FILES
+        tar xzvf $etcd_file -C $FILES
         sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd
     fi
     if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then
diff --git a/lib/glance b/lib/glance
index 0a5b9f5..ad286ba 100644
--- a/lib/glance
+++ b/lib/glance
@@ -105,6 +105,11 @@
 function configure_glance {
     sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
 
+    # We run this here as this configures cache dirs for the auth middleware
+    # which is used in the api server and not in the registry. The api
+    # Server is configured through this function and not init_glance.
+    create_glance_cache_dir
+
     # Copy over our glance configurations and update them
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -279,7 +284,7 @@
     fi
 }
 
-# create_glance_cache_dir() - Part of the init_glance() process
+# create_glance_cache_dir() - Part of the configure_glance() process
 function create_glance_cache_dir {
     # Create cache dir
     sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact
@@ -306,8 +311,6 @@
     # Load metadata definitions
     $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
     time_stop "dbsync"
-
-    create_glance_cache_dir
 }
 
 # install_glanceclient() - Collect source and prepare
@@ -333,7 +336,7 @@
     setup_develop $GLANCE_DIR
 }
 
-# start_glance() - Start running processes, including screen
+# start_glance() - Start running processes
 function start_glance {
     local service_protocol=$GLANCE_SERVICE_PROTOCOL
     if is_service_enabled tls-proxy; then
@@ -345,7 +348,7 @@
 
     run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
-        run_process g-api "$GLANCE_BIN_DIR/uwsgi --ini $GLANCE_UWSGI_CONF"
+        run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
     else
         run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     fi
@@ -358,7 +361,6 @@
 
 # stop_glance() - Stop running processes
 function stop_glance {
-    # Kill the Glance screen windows
     stop_process g-api
     stop_process g-reg
 }
diff --git a/lib/horizon b/lib/horizon
index becc5a0..3d2f68d 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -181,13 +181,12 @@
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH
 }
 
-# start_horizon() - Start running processes, including screen
+# start_horizon() - Start running processes
 function start_horizon {
     restart_apache_server
-    tail_log horizon /var/log/$APACHE_NAME/horizon_error.log
 }
 
-# stop_horizon() - Stop running processes (non-screen)
+# stop_horizon() - Stop running processes
 function stop_horizon {
     stop_apache_server
 }
diff --git a/lib/keystone b/lib/keystone
index 1061081..714f089 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -148,16 +148,18 @@
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_keystone {
-    # TODO: remove admin at pike-2
-    # These files will be created if we are running WSGI_MODE="uwsgi"
-    remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
-    remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
-    sudo rm -f $(apache_site_config_for keystone-wsgi-public)
-    sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
-
-    # These files will be created if we are running WSGI_MODE="mod_wsgi"
-    disable_apache_site keystone
-    sudo rm -f $(apache_site_config_for keystone)
+    if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
+        # These files will be created if we are running WSGI_MODE="mod_wsgi"
+        disable_apache_site keystone
+        sudo rm -f $(apache_site_config_for keystone)
+    else
+        stop_process "keystone"
+        # TODO: remove admin at pike-2
+        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
+        remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
+        sudo rm -f $(apache_site_config_for keystone-wsgi-public)
+        sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
+    fi
 }
 
 # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
@@ -350,7 +352,7 @@
     # The Member role is used by Horizon and Swift so we need to keep it:
     local member_role="member"
 
-    # Captial Member role is legacy hard coded in Horizon / Swift
+    # Capital Member role is legacy hard coded in Horizon / Swift
     # configs. Keep it around.
     get_or_create_role "Member"
 
@@ -448,7 +450,7 @@
 
     iniset $conf_file $section cafile $SSL_BUNDLE_FILE
     iniset $conf_file $section signing_dir $signing_dir
-    iniset $conf_file $section memcached_servers $SERVICE_HOST:11211
+    iniset $conf_file $section memcached_servers localhost:11211
 }
 
 # init_keystone() - Initialize databases, etc.
@@ -534,7 +536,7 @@
     fi
 }
 
-# start_keystone() - Start running processes, including screen
+# start_keystone() - Start running processes
 function start_keystone {
     # Get right service port for testing
     local service_port=$KEYSTONE_SERVICE_PORT
@@ -547,10 +549,8 @@
     if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
         enable_apache_site keystone
         restart_apache_server
-        tail_log key /var/log/$APACHE_NAME/keystone.log
-        tail_log key-access /var/log/$APACHE_NAME/keystone_access.log
     else # uwsgi
-        run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
+        run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
     fi
 
     echo "Waiting for keystone to start..."
@@ -582,12 +582,7 @@
         restart_apache_server
     else
         stop_process keystone
-        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
-        # TODO(remove in at pike-2)
-        remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
     fi
-    # Kill the Keystone screen window
-    stop_process key
 }
 
 # bootstrap_keystone() - Initialize user, role and project
@@ -626,12 +621,6 @@
     iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap"
 
     # LDAP settings for Users domain
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_delete "False"
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_update "False"
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_allow_create "False"
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_delete "False"
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_update "False"
-    iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_allow_create "False"
     iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
     iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson"
     iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn"
diff --git a/lib/libraries b/lib/libraries
index 4ceb804..6d52f64 100644
--- a/lib/libraries
+++ b/lib/libraries
@@ -30,6 +30,7 @@
 GITDIR["futurist"]=$DEST/futurist
 GITDIR["os-client-config"]=$DEST/os-client-config
 GITDIR["osc-lib"]=$DEST/osc-lib
+GITDIR["osc-placement"]=$DEST/osc-placement
 GITDIR["oslo.cache"]=$DEST/oslo.cache
 GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
 GITDIR["oslo.config"]=$DEST/oslo.config
@@ -91,6 +92,7 @@
     _install_lib_from_source "debtcollector"
     _install_lib_from_source "futurist"
     _install_lib_from_source "osc-lib"
+    _install_lib_from_source "osc-placement"
     _install_lib_from_source "os-client-config"
     _install_lib_from_source "oslo.cache"
     _install_lib_from_source "oslo.concurrency"
diff --git a/lib/neutron b/lib/neutron
index 2a660ec..21c8d4c 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -72,7 +72,8 @@
 NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone}
 NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
 NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
+NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
+NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
 
 # This is needed because _neutron_ovs_base_configure_l3_agent will set
 # external_network_bridge
@@ -125,6 +126,13 @@
     done
 }
 
+# configure_root_helper_options() - Configure agent rootwrap helper options
+function configure_root_helper_options {
+    local conffile=$1
+    iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD"
+    iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD"
+}
+
 # configure_neutron() - Set config files, create data dirs, etc
 function configure_neutron_new {
     sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
@@ -185,6 +193,7 @@
     if is_service_enabled neutron-agent; then
         iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
         iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF
 
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
@@ -208,7 +217,7 @@
         # make it so we have working DNS from guests
         iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
 
-        iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+        configure_root_helper_options $NEUTRON_DHCP_CONF
         iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
         neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
     fi
@@ -217,9 +226,16 @@
         cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
         iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
         neutron_service_plugin_class_add router
-        iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+        configure_root_helper_options $NEUTRON_L3_CONF
         iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
+
+        # Configure the neutron agent to serve external network ports
+        if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
+        else
+            iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
+        fi
     fi
 
     # Metadata
@@ -229,7 +245,8 @@
         iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
         iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
-        iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+        # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
+        configure_root_helper_options $NEUTRON_META_CONF
 
         # TODO(dtroyer): remove the v2.0 hard code below
         iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
@@ -242,6 +259,7 @@
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
+        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
     fi
 
     # Metering
@@ -253,12 +271,6 @@
 
 # configure_neutron_rootwrap() - configure Neutron's rootwrap
 function configure_neutron_rootwrap {
-    # Set the paths of certain binaries
-    neutron_rootwrap=$(get_rootwrap_location neutron)
-
-    # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
-    local rootwrap_sudoer_cmd="${neutron_rootwrap} $NEUTRON_CONF_DIR/rootwrap.conf"
-
     # Deploy new rootwrap filters files (owned by root).
     # Wipe any existing rootwrap.d files first
     if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then
@@ -275,7 +287,8 @@
 
     # Set up the rootwrap sudoers for Neutron
     tempfile=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd *" >$tempfile
+    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile
+    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile
     chmod 0440 $tempfile
     sudo chown root:root $tempfile
     sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap
@@ -409,7 +422,7 @@
     fi
 }
 
-# start_neutron() - Start running processes, including screen
+# start_neutron() - Start running processes
 function start_neutron_new {
     # Start up the neutron agents if enabled
     # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
@@ -442,11 +455,11 @@
     fi
 
     if is_service_enabled neutron-metering; then
-        run_process neutron-metering "$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
+        run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
     fi
 }
 
-# stop_neutron() - Stop running processes (non-screen)
+# stop_neutron() - Stop running processes
 function stop_neutron_new {
     for serv in neutron-api neutron-agent neutron-l3; do
         stop_process $serv
@@ -493,6 +506,13 @@
     _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
 }
 
+# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
+function neutron_deploy_rootwrap_filters_new {
+    local srcdir=$1
+    sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
+    sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
+}
+
 # Dispatch functions
 # These are needed for compatibility between the old and new implementations
 # where there are function name overlaps.  These will be removed when
@@ -607,5 +627,14 @@
     fi
 }
 
+function neutron_deploy_rootwrap_filters {
+    if is_neutron_legacy_enabled; then
+        # Call back to old function
+        _neutron_deploy_rootwrap_filters "$@"
+    else
+        neutron_deploy_rootwrap_filters_new "$@"
+    fi
+}
+
 # Restore xtrace
 $XTRACE
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 784f3a8..0ccb17c 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -168,7 +168,7 @@
 #
 Q_DVR_MODE=${Q_DVR_MODE:-legacy}
 if [[ "$Q_DVR_MODE" != "legacy" ]]; then
-    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
 fi
 
 # Provider Network Configurations
@@ -455,7 +455,7 @@
     fi
 }
 
-# Start running processes, including screen
+# Start running processes
 function start_neutron_service_and_check {
     local service_port=$Q_PORT
     local service_protocol=$Q_PROTOCOL
@@ -524,7 +524,7 @@
     stop_process q-agt
 }
 
-# stop_mutnauq_other() - Stop running processes (non-screen)
+# stop_mutnauq_other() - Stop running processes
 function stop_mutnauq_other {
     if is_service_enabled q-dhcp; then
         stop_process q-dhcp
@@ -718,6 +718,7 @@
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
     fi
 
     _neutron_setup_rootwrap
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 07974fe..98315b7 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -87,7 +87,8 @@
 
 # Subnetpool defaults
 USE_SUBNETPOOL=${USE_SUBNETPOOL:-True}
-SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"}
+SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"}
+SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"}
 
 SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE}
 SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE}
@@ -169,10 +170,10 @@
     if is_networking_extension_supported "auto-allocated-topology"; then
         if [[ "$USE_SUBNETPOOL" == "True" ]]; then
             if [[ "$IP_VERSION" =~ 4.* ]]; then
-                SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2)
+                SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id)
             fi
             if [[ "$IP_VERSION" =~ .*6 ]]; then
-                SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2)
+                SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id)
             fi
         fi
     fi
diff --git a/lib/nova b/lib/nova
index 8311a54..ea0d2f7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -101,7 +101,7 @@
 
 # The following FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
-FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
@@ -221,7 +221,10 @@
         instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
         if [ ! "$instances" = "" ]; then
             echo $instances | xargs -n1 sudo virsh destroy || true
-            echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
+            if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then
+                # Can't delete with nvram flags, then just try without this flag
+                xargs -n1 sudo virsh undefine --managed-save <<< $instances
+            fi
         fi
 
         # Logout and delete iscsi sessions
@@ -440,6 +443,9 @@
             local db="nova_cell1"
         else
             local db="nova_cell0"
+            # When in superconductor mode, nova-compute can't send instance
+            # info updates to the scheduler, so just disable it.
+            iniset $NOVA_CONF filter_scheduler track_instance_changes False
         fi
 
         iniset $NOVA_CONF database connection `database_connection_url $db`
@@ -514,8 +520,8 @@
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN"
-        iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+        iniset $NOVA_CONF vnc server_listen "$VNCSERVER_LISTEN"
+        iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     else
@@ -536,7 +542,7 @@
     # Set the oslo messaging driver to the typical default. This does not
     # enable notifications, but it will allow them to function when enabled.
     iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2"
-    iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_transport_url)
+    iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url)
     iniset_rpc_backend nova $NOVA_CONF
     iniset $NOVA_CONF glance api_servers "$GLANCE_URL"
 
@@ -549,6 +555,7 @@
 
     if is_service_enabled tls-proxy; then
         iniset $NOVA_CONF DEFAULT glance_protocol https
+        iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
     fi
 
     if is_service_enabled n-sproxy; then
@@ -567,10 +574,6 @@
         if [[ -n ${LOGDIR} ]]; then
             bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
             iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
-            if [[ -n ${SCREEN_LOGDIR} ]]; then
-                # Drop the backward-compat symlink
-                ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
-            fi
         fi
 
         iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
@@ -596,6 +599,8 @@
                 rpc_backend_add_vhost $vhost
                 iniset_rpc_backend nova $conf DEFAULT $vhost
             fi
+            # Format logging
+            setup_logging $conf
         done
     fi
 }
@@ -799,7 +804,7 @@
             start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
         fi
     else
-        run_process "n-api" "$NOVA_BIN_DIR/uwsgi --ini $NOVA_UWSGI_CONF"
+        run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
         nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
     fi
 
@@ -843,6 +848,9 @@
         cp $compute_cell_conf $NOVA_CPU_CONF
         # FIXME(danms): Should this be configurable?
         iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
+        # Since the nova-compute service cannot reach nova-scheduler over
+        # RPC, we also disable track_instance_changes.
+        iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False
         iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
     fi
 
@@ -875,7 +883,7 @@
     export PATH=$old_path
 }
 
-# start_nova() - Start running processes, including screen
+# start_nova() - Start running processes
 function start_nova_rest {
     # Hack to set the path for rootwrap
     local old_path=$PATH
@@ -903,7 +911,7 @@
     if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
         run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
     else
-        run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --ini $NOVA_METADATA_UWSGI_CONF"
+        run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
     fi
 
     run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
@@ -944,6 +952,28 @@
     done
 }
 
+function is_nova_ready {
+    # NOTE(sdague): with cells v2 all the compute services must be up
+    # and checked into the database before discover_hosts is run. This
+    # happens in all in one installs by accident, because > 30 seconds
+    # happen between here and the script ending. However, in multinode
+    # tests this can very often not be the case. So ensure that the
+    # compute is up before we move on.
+    if is_service_enabled n-cell; then
+        # cells v1 can't complete the check below because it munges
+        # hostnames with cell information (grumble grumble).
+        return
+    fi
+    # TODO(sdague): honestly, this probably should be a plug point for
+    # an external system.
+    if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
+        # xenserver encodes information in the hostname of the compute
+        # because of the dom0/domU split. Just ignore for now.
+        return
+    fi
+    wait_for_compute 60
+}
+
 function start_nova {
     # this catches the cells v1 case early
     _set_singleconductor
@@ -979,6 +1009,11 @@
 }
 
 function stop_nova_conductor {
+    if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+        stop_process n-cond
+        return
+    fi
+
     enable_nova_fleet
     for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do
         if is_service_enabled $srv; then
@@ -987,7 +1022,7 @@
     done
 }
 
-# stop_nova() - Stop running processes (non-screen)
+# stop_nova() - Stop running processes
 function stop_nova {
     stop_nova_rest
     stop_nova_conductor
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index d59473c..ee1a0e0 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -41,14 +41,15 @@
 
     iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
     iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-    iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
 
     if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then
+        iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
         iniset $NOVA_CONF filter_scheduler use_baremetal_filters True
+        iniset $NOVA_CONF filter_scheduler host_subset_size 999
+        iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+        iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     fi
 
-    iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
-    iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
     iniset $NOVA_CONF ironic auth_type password
     iniset $NOVA_CONF ironic username admin
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 0c08a0f..3d676b9 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -71,8 +71,8 @@
         iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
         iniset $NOVA_CONF libvirt images_type "ploop"
         iniset $NOVA_CONF DEFAULT force_raw_images  "False"
-        iniset $NOVA_CONF vnc vncserver_proxyclient_address  $HOST_IP
-        iniset $NOVA_CONF vnc vncserver_listen $HOST_IP
+        iniset $NOVA_CONF vnc server_proxyclient_address  $HOST_IP
+        iniset $NOVA_CONF vnc server_listen $HOST_IP
         iniset $NOVA_CONF vnc keymap
     elif [[ "$NOVA_BACKEND" == "LVM" ]]; then
         iniset $NOVA_CONF libvirt images_type "lvm"
diff --git a/lib/placement b/lib/placement
index 8adbbde..d3fb8c8 100644
--- a/lib/placement
+++ b/lib/placement
@@ -159,12 +159,15 @@
 # install_placement() - Collect source and prepare
 function install_placement {
     install_apache_wsgi
+    # Install the openstackclient placement client plugin for CLI
+    # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r.
+    pip_install osc-placement
 }
 
 # start_placement_api() - Start the API processes ahead of other things
 function start_placement_api {
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
-        run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --ini $PLACEMENT_UWSGI_CONF"
+        run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
     else
         enable_apache_site placement-api
         restart_apache_server
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3177e88..44d0717 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -97,6 +97,8 @@
 
             break
         done
+        # NOTE(frickler): Remove the default guest user
+        sudo rabbitmqctl delete_user guest || true
     fi
 }
 
@@ -114,7 +116,7 @@
     fi
 }
 
-# builds transport url string
+# Returns the address of the RPC backend in URL format.
 function get_transport_url {
     local virtual_host=$1
     if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
@@ -122,8 +124,9 @@
     fi
 }
 
-# Repeat the definition, in case get_transport_url is overriden for RPC purpose.
-# get_notification_url can then be used to talk to rabbit for notifications.
+# Returns the address of the Notification backend in URL format.  This
+# should be used to set the transport_url option in the
+# oslo_messaging_notifications group.
 function get_notification_url {
     local virtual_host=$1
     if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
diff --git a/lib/stack b/lib/stack
index f09ddce..bada26f 100644
--- a/lib/stack
+++ b/lib/stack
@@ -33,5 +33,8 @@
         if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then
             unset PIP_VIRTUAL_ENV
         fi
+    else
+        echo "No function declared with name 'install_${service}'."
+        exit 1
     fi
 }
diff --git a/lib/swift b/lib/swift
index 455740e..1601e2b 100644
--- a/lib/swift
+++ b/lib/swift
@@ -7,7 +7,7 @@
 #
 # - ``functions`` file
 # - ``apache`` file
-# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
+# - ``DEST``, `SWIFT_HASH` must be defined
 # - ``STACK_USER`` must be defined
 # - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
 # - ``lib/keystone`` file
@@ -464,6 +464,9 @@
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH"
 
+    # Allow both reseller prefixes to be used with domain_remap
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH"
+
     if is_service_enabled swift3; then
         cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
 [filter:s3token]
@@ -608,15 +611,13 @@
     # create all of the directories needed to emulate a few different servers
     local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
-        local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
-        local node=${SWIFT_DATA_DIR}/${node_number}/node
-        local node_device=${node}/sdb1
-        [[ -d $node ]] && continue
-        [[ -d $drive ]] && continue
-        sudo install -o ${STACK_USER} -g $user_group -d $drive
-        sudo install -o ${STACK_USER} -g $user_group -d $node_device
-        sudo chown -R ${STACK_USER}: ${node}
+        # node_devices must match *.conf devices option
+        local node_devices=${SWIFT_DATA_DIR}/${node_number}
+        local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number
+        sudo ln -sf $real_devices $node_devices;
+        local device=${real_devices}/sdb1
+        [[ -d $device ]] && continue
+        sudo install -o ${STACK_USER} -g $user_group -d $device
     done
 }
 
@@ -780,7 +781,7 @@
     fi
 }
 
-# start_swift() - Start running processes, including screen
+# start_swift() - Start running processes
 function start_swift {
     # (re)start memcached to make sure we have a clean memcache.
     restart_service memcached
@@ -799,13 +800,6 @@
         restart_apache_server
         # The rest of the services should be started in backgroud
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
-        # Be we still want the logs of Swift Proxy in our screen session
-        tail_log s-proxy /var/log/$APACHE_NAME/proxy-server
-        if [[ ${SWIFT_REPLICAS} == 1 ]]; then
-            for type in object container account; do
-                tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1
-            done
-        fi
         return 0
     fi
 
@@ -859,7 +853,7 @@
     fi
 }
 
-# stop_swift() - Stop running processes (non-screen)
+# stop_swift() - Stop running processes
 function stop_swift {
     local type
 
diff --git a/lib/tempest b/lib/tempest
index cc65ec7..bdbaaa5 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -293,9 +293,11 @@
         iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True
     fi
 
-    # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed in
-    # Newton and Ocata. This option can be removed after Mitaka is end of life.
-    iniset $TEMPEST_CONFIG identity-feature-enabled forbid_global_implied_dsr True
+    # When LDAP is enabled domain specific drivers are also enabled and the users
+    # and groups identity tests must adapt to this scenario
+    if is_service_enabled ldap; then
+        iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True
+    fi
 
     # Image
     # We want to be able to override this variable in the gate to avoid
@@ -430,6 +432,12 @@
         TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True}
     fi
     iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME)
+    # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends
+    # in Cinder and the libvirt driver in Nova.
+    if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then
+        TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
+    fi
+    iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
     # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
     iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
     iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1)
@@ -574,6 +582,11 @@
         DISABLE_NETWORK_API_EXTENSIONS+=", metering"
     fi
 
+    # disable l3_agent_scheduler if we didn't enable L3 agent
+    if ! is_service_enabled q-l3; then
+        DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler"
+    fi
+
     local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then
         # Enabled extensions are either the ones explicitly specified or those available on the API endpoint
@@ -608,7 +621,7 @@
 # install_tempest() - Collect source and prepare
 function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-    pip_install tox
+    pip_install 'tox!=2.8.0'
     pushd $TEMPEST_DIR
     tox -r --notest -efull
     # NOTE(mtreinish) Respect constraints in the tempest full venv, things that
diff --git a/lib/template b/lib/template
index 25d653c..e6d0032 100644
--- a/lib/template
+++ b/lib/template
@@ -81,7 +81,7 @@
     :
 }
 
-# start_XXXX() - Start running processes, including screen
+# start_XXXX() - Start running processes
 function start_XXXX {
     # The quoted command must be a single command and not include an
     # shell metacharacters, redirections or shell builtins.
@@ -89,7 +89,7 @@
     :
 }
 
-# stop_XXXX() - Stop running processes (non-screen)
+# stop_XXXX() - Stop running processes
 function stop_XXXX {
     # for serv in serv-a serv-b; do
     #     stop_process $serv
diff --git a/lib/tls b/lib/tls
index 7bde5e6..0baf86c 100644
--- a/lib/tls
+++ b/lib/tls
@@ -487,7 +487,7 @@
 }
 
 # Starts the TLS proxy for the given IP/ports
-# start_tls_proxy front-host front-port back-host back-port
+# start_tls_proxy service-name front-host front-port back-host back-port
 function start_tls_proxy {
     local b_service="$1-tls-proxy"
     local f_host=$2
@@ -527,6 +527,7 @@
     # for swift functional testing to work with tls enabled. It is 2 bytes
     # larger than the apache default of 8190.
     LimitRequestFieldSize $f_header_size
+    RequestHeader set X-Forwarded-Proto "https"
 
     <Location />
         ProxyPass http://$b_host:$b_port/ retry=0 nocanon
@@ -541,7 +542,7 @@
     if is_suse ; then
         sudo a2enflag SSL
     fi
-    for mod in ssl proxy proxy_http; do
+    for mod in headers ssl proxy proxy_http; do
         enable_apache_mod $mod
     done
     enable_apache_site $b_service
diff --git a/openrc b/openrc
index 23c173c..37724c5 100644
--- a/openrc
+++ b/openrc
@@ -84,7 +84,7 @@
 # We currently recommend using the version 3 *identity api*.
 #
 
-# If you don't have a working .stackenv, this is the backup possition
+# If you don't have a working .stackenv, this is the backup position
 KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
 KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP}
 
diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml
new file mode 100644
index 0000000..ede8382
--- /dev/null
+++ b/playbooks/devstack.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+  roles:
+    - run-devstack
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
new file mode 100644
index 0000000..6f5126f
--- /dev/null
+++ b/playbooks/post.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+  roles:
+    - export-devstack-journal
+    - fetch-devstack-log-dir
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
new file mode 100644
index 0000000..4d07960
--- /dev/null
+++ b/playbooks/pre.yaml
@@ -0,0 +1,22 @@
+- hosts: all
+  roles:
+    - configure-swap
+    - setup-stack-user
+    - setup-tempest-user
+    - setup-devstack-source-dirs
+    - setup-devstack-log-dir
+    - setup-devstack-cache
+    - start-fresh-logging
+    - write-devstack-local-conf
+  # TODO(jeblair): remove when configure-mirrors is fixed  
+  tasks:
+    - name: Hack mirror_info
+      shell:
+        _raw_params: |
+          mkdir /etc/ci
+          cat << "EOF" > /etc/ci/mirror_info.sh
+          export NODEPOOL_UCA_MIRROR=http://mirror.dfw.rax.openstack.org/ubuntu-cloud-archive
+          EOF
+      args:
+        executable: /bin/bash
+      become: true
diff --git a/roles/configure-swap/README.rst b/roles/configure-swap/README.rst
new file mode 100644
index 0000000..eaba5cf
--- /dev/null
+++ b/roles/configure-swap/README.rst
@@ -0,0 +1,11 @@
+Configure a swap partition
+
+Creates a swap partition on the ephemeral block device (the rest of which
+will be mounted on /opt).
+
+**Role Variables**
+
+.. zuul:rolevar:: configure_swap_size
+   :default: 8192
+
+   The size of the swap partition, in MiB.
diff --git a/roles/configure-swap/defaults/main.yaml b/roles/configure-swap/defaults/main.yaml
new file mode 100644
index 0000000..4d62232
--- /dev/null
+++ b/roles/configure-swap/defaults/main.yaml
@@ -0,0 +1 @@
+configure_swap_size: 8192
diff --git a/roles/configure-swap/tasks/ephemeral.yaml b/roles/configure-swap/tasks/ephemeral.yaml
new file mode 100644
index 0000000..c2316ea
--- /dev/null
+++ b/roles/configure-swap/tasks/ephemeral.yaml
@@ -0,0 +1,110 @@
+# Configure attached ephemeral devices for storage and swap
+
+- assert:
+    that:
+      - "ephemeral_device is defined"
+
+- name: Set partition names
+  set_fact:
+    swap_partition: "{{ ephemeral_device}}1"
+    opt_partition: "{{ ephemeral_device}}2"
+
+- name: Ensure ephemeral device is unmounted
+  become: yes
+  mount:
+    name: "{{ ephemeral_device }}"
+    state: unmounted
+
+- name: Get existing partitions
+  become: yes
+  parted:
+    device: "{{ ephemeral_device }}"
+    unit: MiB
+  register: ephemeral_partitions
+
+- name: Remove any existing partitions
+  become: yes
+  parted:
+    device: "{{ ephemeral_device }}"
+    number: "{{ item.num }}"
+    state: absent
+  with_items:
+    - "{{ ephemeral_partitions.partitions }}"
+
+- name: Create new disk label
+  become: yes
+  parted:
+    label: msdos
+    device: "{{ ephemeral_device }}"
+
+- name: Create swap partition
+  become: yes
+  parted:
+    device: "{{ ephemeral_device }}"
+    number: 1
+    state: present
+    part_start: '0%'
+    part_end: "{{ configure_swap_size }}MiB"
+
+- name: Create opt partition
+  become: yes
+  parted:
+    device: "{{ ephemeral_device }}"
+    number: 2
+    state: present
+    part_start: "{{ configure_swap_size }}MiB"
+    part_end: "100%"
+
+- name: Make swap on partition
+  become: yes
+  command: "mkswap {{ swap_partition }}"
+
+- name: Write swap to fstab
+  become: yes
+  mount:
+    path: none
+    src: "{{ swap_partition }}"
+    fstype: swap
+    opts: sw
+    passno: 0
+    dump: 0
+    state: present
+
+# XXX: does "parted" plugin ensure the partition is available
+# before moving on?  No udev settles here ...
+
+- name: Add all swap
+  become: yes
+  command: swapon -a
+
+- name: Create /opt filesystem
+  become: yes
+  filesystem:
+    fstype: ext4
+    dev: "{{ opt_partition }}"
+
+# Rackspace at least does not have enough room for two devstack
+# installs on the primary partition.  We copy in the existing /opt to
+# the new partition on the ephemeral device, and then overmount /opt
+# to there for the test runs.
+#
+# NOTE(ianw): the existing "mount" touches fstab.  There is currently (Sep2017)
+# work in [1] to split mount & fstab into separate parts, but for now we bundle
+# it into an atomic shell command
+# [1] https://github.com/ansible/ansible/pull/27174
+- name: Copy old /opt
+  become: yes
+  shell: |
+    mount {{ opt_partition }} /mnt
+    find /opt/ -mindepth 1 -maxdepth 1 -exec mv {} /mnt/ \;
+    umount /mnt
+
+# This overmounts any existing /opt
+- name: Add opt to fstab and mount
+  become: yes
+  mount:
+    path: /opt
+    src: "{{ opt_partition }}"
+    fstype: ext4
+    opts: noatime
+    state: mounted
diff --git a/roles/configure-swap/tasks/main.yaml b/roles/configure-swap/tasks/main.yaml
new file mode 100644
index 0000000..8960c72
--- /dev/null
+++ b/roles/configure-swap/tasks/main.yaml
@@ -0,0 +1,63 @@
+# On RAX hosts, we have a small root partition and a large,
+# unallocated ephemeral device attached at /dev/xvde
+- name: Set ephemeral device if /dev/xvde exists
+  when: ansible_devices["xvde"] is defined
+  set_fact:
+    ephemeral_device: "/dev/xvde"
+
+# On other providers, we have a device called "ephemeral0".
+#
+# NOTE(ianw): Once [1] is in our ansible (2.4 era?), we can figure
+# this out more directly by walking the device labels in the facts
+#
+# [1] https://github.com/ansible/ansible/commit/d46dd99f47c0ee5081d15bc5b741e9096d8bfd3e
+- name: Set ephemeral device by label
+  when: ephemeral_device is undefined
+  block:
+    - name: Get ephemeral0 device node
+      command: /sbin/blkid -L ephemeral0
+      register: ephemeral0
+      # If this doesn't exist, returns !0
+      ignore_errors: yes
+      changed_when: False
+
+    - name: Set ephemeral device if LABEL exists
+      when: "ephemeral0.rc == 0"
+      set_fact:
+        ephemeral_device: "{{ ephemeral0.stdout }}"
+
+# If we have ephemeral storage and we don't appear to have setup swap,
+# we will create a swap and move /opt to a large data partition there.
+- include: ephemeral.yaml
+  static: no
+  when:
+    - ephemeral_device is defined
+    - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size
+
+# If no ephemeral device and no swap, then we will setup some swap
+# space on the root device to ensure all hosts a consistent memory
+# environment.
+- include: root.yaml
+  static: no
+  when:
+    - ephemeral_device is undefined
+    - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size
+
+# ensure a standard level of swappiness.  Some platforms
+# (rax+centos7) come with swappiness of 0 (presumably because the
+# vm doesn't come with swap setup ... but we just did that above),
+# which depending on the kernel version can lead to the OOM killer
+# kicking in on some processes despite swap being available;
+# particularly things like mysql which have very high ratio of
+# anonymous-memory to file-backed mappings.
+#
+# This sets swappiness low; we really don't want to be relying on
+# cloud I/O based swap during our runs if we can help it
+- name: Set swappiness
+  become: yes
+  sysctl:
+    name: vm.swappiness
+    value: 30
+    state: present
+
+- debug:  var=ephemeral_device
diff --git a/roles/configure-swap/tasks/root.yaml b/roles/configure-swap/tasks/root.yaml
new file mode 100644
index 0000000..f22b537
--- /dev/null
+++ b/roles/configure-swap/tasks/root.yaml
@@ -0,0 +1,63 @@
+# If no ephemeral devices are available, use root filesystem
+
+- name: Calculate required swap
+  set_fact:
+    swap_required: "{{ configure_swap_size - ansible_memory_mb['swap']['total'] | int }}"
+
+- block:
+    - name: Get root filesystem
+      shell: df --output='fstype' /root | tail -1
+      register: root_fs
+
+    - name: Save root filesystem
+      set_fact:
+        root_filesystem: "{{ root_fs.stdout }}"
+
+    - debug: var=root_filesystem
+
+# Note, we don't use a sparse device to avoid wedging when disk space
+# and memory are both unavailable.
+
+# Cannot fallocate on filesystems like XFS, so use slower dd
+- name: Create swap backing file for non-EXT fs
+  when: '"ext" not in root_filesystem'
+  become: yes
+  command: dd if=/dev/zero of=/root/swapfile bs=1M count={{ swap_required }}
+  args:
+    creates: /root/swapfile
+
+- name: Create sparse swap backing file for EXT fs
+  when: '"ext" in root_filesystem'
+  become: yes
+  command: fallocate -l {{ swap_required }}M /root/swapfile
+  args:
+    creates: /root/swapfile
+
+- name: Ensure swapfile perms
+  become: yes
+  file:
+    path: /root/swapfile
+    owner: root
+    group: root
+    mode: 0600
+
+- name: Make swapfile
+  become: yes
+  command: mkswap /root/swapfile
+
+- name: Write swap to fstab
+  become: yes
+  mount:
+    path: none
+    src: /root/swapfile
+    fstype: swap
+    opts: sw
+    passno: 0
+    dump: 0
+    state: present
+
+- name: Add all swap
+  become: yes
+  command: swapon -a
+
+- debug: var=swap_required
diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst
new file mode 100644
index 0000000..5f00592
--- /dev/null
+++ b/roles/export-devstack-journal/README.rst
@@ -0,0 +1,15 @@
+Export journal files from devstack services
+
+Export the systemd journal for every devstack service in native
+journal format as well as text.  Also, export a syslog-style file with
+kernal and sudo messages.
+
+Writes the output to the ``logs/`` subdirectory of
+``devstack_base_dir``.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/export-devstack-journal/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
new file mode 100644
index 0000000..b9af02a
--- /dev/null
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -0,0 +1,29 @@
+# TODO: convert this to ansible
+- name: Export journal files
+  become: true
+  shell:
+    cmd: |
+      u=""
+      name=""
+      for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do
+        name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
+        journalctl -o short-precise --unit $u | tee {{ devstack_base_dir }}/logs/$name.txt > /dev/null
+      done
+
+      # Export the journal in export format to make it downloadable
+      # for later searching. It can then be rewritten to a journal native
+      # format locally using systemd-journal-remote. This makes a class of
+      # debugging much easier. We don't do the native conversion here as
+      # some distros do not package that tooling.
+      journalctl -u 'devstack@*' -o export | \
+          xz --threads=0 - > {{ devstack_base_dir }}/logs/devstack.journal.xz
+
+      # The journal contains everything running under systemd, we'll
+      # build an old school version of the syslog with just the
+      # kernel and sudo messages.
+      journalctl \
+          -t kernel \
+          -t sudo \
+          --no-pager \
+          --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
+        | tee {{ devstack_base_dir }}/logs/syslog.txt > /dev/null
diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst
new file mode 100644
index 0000000..360a2e3
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/README.rst
@@ -0,0 +1,10 @@
+Fetch content from the devstack log directory
+
+Copy logs from every host back to the zuul executor.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
new file mode 100644
index 0000000..5a198b2
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Collect devstack logs
+  synchronize:
+    dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
+    mode: pull
+    src: "{{ devstack_base_dir }}/logs"
diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst
new file mode 100644
index 0000000..d77eb15
--- /dev/null
+++ b/roles/run-devstack/README.rst
@@ -0,0 +1,8 @@
+Run devstack
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/run-devstack/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml
new file mode 100644
index 0000000..bafebaf
--- /dev/null
+++ b/roles/run-devstack/tasks/main.yaml
@@ -0,0 +1,6 @@
+- name: Run devstack
+  command: ./stack.sh
+  args:
+    chdir: "{{devstack_base_dir}}/devstack"
+  become: true
+  become_user: stack
diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst
new file mode 100644
index 0000000..b8938c3
--- /dev/null
+++ b/roles/setup-devstack-cache/README.rst
@@ -0,0 +1,15 @@
+Set up the devstack cache directory
+
+If the node has a cache of devstack image files, copy it into place.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: devstack_cache_dir
+   :default: /opt/cache
+
+   The directory with the cached files.
diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml
new file mode 100644
index 0000000..c56720b
--- /dev/null
+++ b/roles/setup-devstack-cache/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_cache_dir: /opt/cache
diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml
new file mode 100644
index 0000000..84f33f0
--- /dev/null
+++ b/roles/setup-devstack-cache/tasks/main.yaml
@@ -0,0 +1,14 @@
+- name: Copy cached devstack files
+  # This uses hard links to avoid using extra space.
+  command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;"
+  become: true
+
+- name: Set ownership of cached files
+  file:
+    path: '{{ devstack_base_dir }}/devstack/files'
+    state: directory
+    recurse: true
+    owner: stack
+    group: stack
+    mode: a+r
+  become: yes
diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst
new file mode 100644
index 0000000..9d8dba3
--- /dev/null
+++ b/roles/setup-devstack-log-dir/README.rst
@@ -0,0 +1,11 @@
+Set up the devstack log directory
+
+Create a log directory on the ephemeral disk partition to save space
+on the root device.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-devstack-log-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml
new file mode 100644
index 0000000..b9f38df
--- /dev/null
+++ b/roles/setup-devstack-log-dir/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Create logs directory
+  file:
+    path: '{{ devstack_base_dir }}/logs'
+    state: directory
+  become: yes
diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst
new file mode 100644
index 0000000..4ebf839
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/README.rst
@@ -0,0 +1,11 @@
+Set up the devstack source directories
+
+Ensure that the base directory exists, and then move the source repos
+into it.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
new file mode 100644
index 0000000..e6bbae2
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -0,0 +1,22 @@
+- name: Find all source repos used by this job
+  find:
+    paths:
+      - src/git.openstack.org/openstack
+      - src/git.openstack.org/openstack-dev
+      - src/git.openstack.org/openstack-infra
+    file_type: directory
+  register: found_repos
+
+- name: Copy Zuul repos into devstack working directory
+  command: rsync -a {{ item.path }} {{ devstack_base_dir }}
+  with_items: '{{ found_repos.files }}'
+  become: yes
+
+- name: Set ownership of repos
+  file:
+    path: '{{ devstack_base_dir }}'
+    state: directory
+    recurse: true
+    owner: stack
+    group: stack
+  become: yes
diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst
new file mode 100644
index 0000000..80c4d39
--- /dev/null
+++ b/roles/setup-stack-user/README.rst
@@ -0,0 +1,16 @@
+Set up the `stack` user
+
+Create the stack user, set up its home directory, and allow it to
+sudo.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: devstack_stack_home_dir
+   :default: {{ devstack_base_dir }}
+
+   The home directory for the stack user.
diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml
new file mode 100644
index 0000000..6d0be66
--- /dev/null
+++ b/roles/setup-stack-user/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_stack_home_dir: '{{ devstack_base_dir }}'
diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh
new file mode 100644
index 0000000..4c6b46b
--- /dev/null
+++ b/roles/setup-stack-user/files/50_stack_sh
@@ -0,0 +1 @@
+stack ALL=(root) NOPASSWD:ALL
diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml
new file mode 100644
index 0000000..8384515
--- /dev/null
+++ b/roles/setup-stack-user/tasks/main.yaml
@@ -0,0 +1,45 @@
+- name: Create stack group
+  group:
+    name: stack
+  become: yes
+
+# NOTE(andreaf) Create a user home_dir is not safe via
+# the user module since it will fail if the containing
+# folder does not exists. If the folder does exists and
+# it's empty, the skeleton is setup and ownership set.
+- name: Create the stack user home folder
+  file:
+    path: '{{ devstack_stack_home_dir }}'
+    state: directory
+  become: yes
+
+- name: Create stack user
+  user:
+    name: stack
+    shell: /bin/bash
+    home: '{{ devstack_stack_home_dir }}'
+    group: stack
+  become: yes
+
+- name: Set stack user home directory permissions
+  file:
+    path: '{{ devstack_stack_home_dir }}'
+    mode: 0755
+  become: yes
+
+- name: Copy 50_stack_sh file to /etc/sudoers.d
+  copy:
+    src: 50_stack_sh
+    dest: /etc/sudoers.d
+    mode: 0440
+    owner: root
+    group: root
+  become: yes
+
+- name: Create new/.cache folder within BASE
+  file:
+    path: '{{ devstack_stack_home_dir }}/.cache'
+    state: directory
+    owner: stack
+    group: stack
+  become: yes
diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst
new file mode 100644
index 0000000..bb29c50
--- /dev/null
+++ b/roles/setup-tempest-user/README.rst
@@ -0,0 +1,10 @@
+Set up the `tempest` user
+
+Create the tempest user and allow it to sudo.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh
new file mode 100644
index 0000000..f88ff9f
--- /dev/null
+++ b/roles/setup-tempest-user/files/51_tempest_sh
@@ -0,0 +1,3 @@
+tempest ALL=(root) NOPASSWD:/sbin/ip
+tempest ALL=(root) NOPASSWD:/sbin/iptables
+tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client
diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml
new file mode 100644
index 0000000..892eaf6
--- /dev/null
+++ b/roles/setup-tempest-user/tasks/main.yaml
@@ -0,0 +1,20 @@
+- name: Create tempest group
+  group:
+    name: tempest
+  become: yes
+
+- name: Create tempest user
+  user:
+    name: tempest
+    shell: /bin/bash
+    group: tempest
+  become: yes
+
+- name: Copy 51_tempest_sh to /etc/sudoers.d
+  copy:
+    src: 51_tempest_sh
+    dest: /etc/sudoers.d
+    owner: root
+    group: root
+    mode: 0440
+  become: yes
diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst
new file mode 100644
index 0000000..11b029e
--- /dev/null
+++ b/roles/start-fresh-logging/README.rst
@@ -0,0 +1,11 @@
+Restart logging on all hosts
+
+Restart syslog so that the system logs only include output from the
+job.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/start-fresh-logging/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml
new file mode 100644
index 0000000..6c7ba66
--- /dev/null
+++ b/roles/start-fresh-logging/tasks/main.yaml
@@ -0,0 +1,56 @@
+- name: Check for /bin/journalctl file
+  command: which journalctl
+  changed_when: False
+  failed_when: False
+  register: which_out
+
+- block:
+    - name: Get current date
+      command: date +"%Y-%m-%d %H:%M:%S"
+      register: date_out
+
+    - name: Copy current date to log-start-timestamp.txt
+      copy:
+        dest: "{{ devstack_base_dir }}/log-start-timestamp.txt"
+        content: "{{ date_out.stdout }}"
+  when: which_out.rc == 0
+  become: yes
+
+- block:
+    - name: Stop rsyslog
+      service: name=rsyslog state=stopped
+
+    - name: Save syslog file prior to devstack run
+      command: mv /var/log/syslog /var/log/syslog-pre-devstack
+
+    - name: Save kern.log file prior to devstack run
+      command: mv /var/log/kern.log /var/log/kern_log-pre-devstack
+
+    - name: Recreate syslog file
+      file: name=/var/log/syslog state=touch
+
+    - name: Recreate syslog file owner and group
+      command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+    - name: Recreate syslog file permissions
+      command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+    - name: Add read permissions to all on syslog file
+      file: name=/var/log/syslog mode=a+r
+
+    - name: Recreate kern.log file
+      file: name=/var/log/kern.log state=touch
+
+    - name: Recreate kern.log file owner and group
+      command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+    - name: Recreate kern.log file permissions
+      command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+    - name: Add read permissions to all on kern.log file
+      file: name=/var/log/kern.log mode=a+r
+
+    - name: Start rsyslog
+      service: name=rsyslog state=started
+  when: which_out.rc == 1
+  become: yes
diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst
new file mode 100644
index 0000000..e30dfa1
--- /dev/null
+++ b/roles/write-devstack-local-conf/README.rst
@@ -0,0 +1,63 @@
+Write the local.conf file for use by devstack
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: devstack_local_conf_path
+   :default: {{ devstack_base_dir }}/devstack/local.conf
+
+   The path of the local.conf file.
+
+.. zuul:rolevar:: devstack_localrc
+   :type: dict
+
+   A dictionary of variables that should be written to the localrc
+   section of local.conf.  The values (which are strings) may contain
+   bash shell variables, and will be ordered so that variables used by
+   later entries appear first.
+
+.. zuul:rolevar:: devstack_local_conf
+   :type: dict
+
+   A complex argument consisting of nested dictionaries which combine
+   to form the meta-sections of the local_conf file.  The top level is
+   a dictionary of phases, followed by dictionaries of filenames, then
+   sections, which finally contain key-value pairs for the INI file
+   entries in those sections.
+
+   The keys in this dictionary are the devstack phases.
+
+   .. zuul:rolevar:: [phase]
+      :type: dict
+
+      The keys in this dictionary are the filenames for this phase.
+
+      .. zuul:rolevar:: [filename]
+         :type: dict
+
+         The keys in this dictionary are the INI sections in this file.
+
+         .. zuul:rolevar:: [section]
+            :type: dict
+
+            This is a dictionary of key-value pairs which comprise
+            this section of the INI file.
+
+.. zuul:rolevar:: devstack_services
+   :type: dict
+
+   A dictionary mapping service names to boolean values.  If the
+   boolean value is ``false``, a ``disable_service`` line will be
+   emitted for the service name.  If it is ``true``, then
+   ``enable_service`` will be emitted.  All other values are ignored.
+
+.. zuul:rolevar:: devstack_plugins
+   :type: dict
+
+   A dictionary mapping a plugin name to a git repo location.  If the
+   location is a non-empty string, then an ``enable_plugin`` line will
+   be emmitted for the plugin name.
diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml
new file mode 100644
index 0000000..491fa0f
--- /dev/null
+++ b/roles/write-devstack-local-conf/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf"
diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py
new file mode 100644
index 0000000..4134beb
--- /dev/null
+++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py
@@ -0,0 +1,185 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+
+class VarGraph(object):
+    # This is based on the JobGraph from Zuul.
+
+    def __init__(self, vars):
+        self.vars = {}
+        self._varnames = set()
+        self._dependencies = {}  # dependent_var_name -> set(parent_var_names)
+        for k, v in vars.items():
+            self._varnames.add(k)
+        for k, v in vars.items():
+            self._addVar(k, str(v))
+
+    bash_var_re = re.compile(r'\$\{?(\w+)')
+    def getDependencies(self, value):
+        return self.bash_var_re.findall(value)
+
+    def _addVar(self, key, value):
+        if key in self.vars:
+            raise Exception("Variable {} already added".format(key))
+        self.vars[key] = value
+        # Append the dependency information
+        self._dependencies.setdefault(key, set())
+        try:
+            for dependency in self.getDependencies(value):
+                if dependency == key:
+                    # A variable is allowed to reference itself; no
+                    # dependency link needed in that case.
+                    continue
+                if dependency not in self._varnames:
+                    # It's not necessary to create a link for an
+                    # external variable.
+                    continue
+                # Make sure a circular dependency is never created
+                ancestor_vars = self._getParentVarNamesRecursively(
+                    dependency, soft=True)
+                ancestor_vars.add(dependency)
+                if any((key == anc_var) for anc_var in ancestor_vars):
+                    raise Exception("Dependency cycle detected in var {}".
+                                    format(key))
+                self._dependencies[key].add(dependency)
+        except Exception:
+            del self.vars[key]
+            del self._dependencies[key]
+            raise
+
+    def getVars(self):
+        ret = []
+        keys = sorted(self.vars.keys())
+        seen = set()
+        for key in keys:
+            dependencies = self.getDependentVarsRecursively(key)
+            for var in dependencies + [key]:
+                if var not in seen:
+                    ret.append((var, self.vars[var]))
+                    seen.add(var)
+        return ret
+
+    def getDependentVarsRecursively(self, parent_var):
+        dependent_vars = []
+
+        current_dependent_vars = self._dependencies[parent_var]
+        for current_var in current_dependent_vars:
+            if current_var not in dependent_vars:
+                dependent_vars.append(current_var)
+            for dep in self.getDependentVarsRecursively(current_var):
+                if dep not in dependent_vars:
+                    dependent_vars.append(dep)
+        return dependent_vars
+
+    def _getParentVarNamesRecursively(self, dependent_var, soft=False):
+        all_parent_vars = set()
+        vars_to_iterate = set([dependent_var])
+        while len(vars_to_iterate) > 0:
+            current_var = vars_to_iterate.pop()
+            current_parent_vars = self._dependencies.get(current_var)
+            if current_parent_vars is None:
+                if soft:
+                    current_parent_vars = set()
+                else:
+                    raise Exception("Dependent var {} not found: ".format(
+                                    dependent_var))
+            new_parent_vars = current_parent_vars - all_parent_vars
+            vars_to_iterate |= new_parent_vars
+            all_parent_vars |= new_parent_vars
+        return all_parent_vars
+
+
+class LocalConf(object):
+
+    def __init__(self, localrc, localconf, services, plugins):
+        self.localrc = []
+        self.meta_sections = {}
+        if plugins:
+            self.handle_plugins(plugins)
+        if services:
+            self.handle_services(services)
+        if localrc:
+            self.handle_localrc(localrc)
+        if localconf:
+            self.handle_localconf(localconf)
+
+    def handle_plugins(self, plugins):
+        for k, v in plugins.items():
+            if v:
+                self.localrc.append('enable_plugin {} {}'.format(k, v))
+
+    def handle_services(self, services):
+        for k, v in services.items():
+            if v is False:
+                self.localrc.append('disable_service {}'.format(k))
+            elif v is True:
+                self.localrc.append('enable_service {}'.format(k))
+
+    def handle_localrc(self, localrc):
+        vg = VarGraph(localrc)
+        for k, v in vg.getVars():
+            self.localrc.append('{}={}'.format(k, v))
+
+    def handle_localconf(self, localconf):
+        for phase, phase_data in localconf.items():
+            for fn, fn_data in phase_data.items():
+                ms_name = '[[{}|{}]]'.format(phase, fn)
+                ms_data = []
+                for section, section_data in fn_data.items():
+                    ms_data.append('[{}]'.format(section))
+                    for k, v in section_data.items():
+                        ms_data.append('{} = {}'.format(k, v))
+                    ms_data.append('')
+                self.meta_sections[ms_name] = ms_data
+
+    def write(self, path):
+        with open(path, 'w') as f:
+            f.write('[[local|localrc]]\n')
+            f.write('\n'.join(self.localrc))
+            f.write('\n\n')
+            for section, lines in self.meta_sections.items():
+                f.write('{}\n'.format(section))
+                f.write('\n'.join(lines))
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec=dict(
+            plugins=dict(type='dict'),
+            services=dict(type='dict'),
+            localrc=dict(type='dict'),
+            local_conf=dict(type='dict'),
+            path=dict(type='str'),
+        )
+    )
+
+    p = module.params
+    lc = LocalConf(p.get('localrc'),
+                   p.get('local_conf'),
+                   p.get('services'),
+                   p.get('plugins'))
+    lc.write(p['path'])
+
+    module.exit_json()
+
+
+from ansible.module_utils.basic import *  # noqa
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+    main()
diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml
new file mode 100644
index 0000000..1d67616
--- /dev/null
+++ b/roles/write-devstack-local-conf/tasks/main.yaml
@@ -0,0 +1,9 @@
+- name: Write a job-specific local_conf file
+  become: true
+  become_user: stack
+  devstack_local_conf:
+    path: "{{ devstack_local_conf_path }}"
+    plugins: "{{ devstack_plugins|default(omit) }}"
+    services: "{{ devstack_services|default(omit) }}"
+    localrc: "{{ devstack_localrc|default(omit) }}"
+    local_conf: "{{ devstack_local_conf|default(omit) }}"
diff --git a/stack.sh b/stack.sh
index 015ee6e..c545c56 100755
--- a/stack.sh
+++ b/stack.sh
@@ -30,7 +30,7 @@
 # NOTE(sdague): why do we explicitly set locale when running stack.sh?
 #
 # Devstack is written in bash, and many functions used throughout
-# devstack process text comming off a command (like the ip command)
+# devstack process text coming off a command (like the ip command)
 # and do transforms using grep, sed, cut, awk on the strings that are
 # returned. Many of these programs are interationalized, which is
 # great for end users, but means that the strings that devstack
@@ -228,16 +228,6 @@
     fi
 fi
 
-# Check to see if we are already running DevStack
-# Note that this may fail if USE_SCREEN=False
-if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then
-    echo "You are already running a stack.sh session."
-    echo "To rejoin this session type 'screen -x stack'."
-    echo "To destroy this session, type './unstack.sh'."
-    exit 1
-fi
-
-
 # Local Settings
 # --------------
 
@@ -491,24 +481,6 @@
     exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
 fi
 
-# Set up logging of screen windows
-# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
-# directory specified in ``SCREEN_LOGDIR``, we will log to the file
-# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
-# ``screen-$SERVICE_NAME.log`` to the latest log file.
-# Logs are kept for as long specified in ``LOGDAYS``.
-# This is deprecated....logs go in ``LOGDIR``, only symlinks will be here now.
-if [[ -n "$SCREEN_LOGDIR" ]]; then
-
-    # We make sure the directory is created.
-    if [[ -d "$SCREEN_LOGDIR" ]]; then
-        # We cleanup the old logs
-        find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \;
-    else
-        mkdir -p $SCREEN_LOGDIR
-    fi
-fi
-
 # Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
 check_path_perm_sanity ${DEST}
 
@@ -537,14 +509,20 @@
 
     if [[ $r -ne 0 ]]; then
         echo "Error on exit"
-        generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
+        # If we error before we've installed os-testr, this will fail.
+        if type -p generate-subunit > /dev/null; then
+            generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
+        fi
         if [[ -z $LOGDIR ]]; then
             $TOP_DIR/tools/worlddump.py
         else
             $TOP_DIR/tools/worlddump.py -d $LOGDIR
         fi
     else
-        generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT}
+        # If we error before we've installed os-testr, this will fail.
+        if type -p generate-subunit > /dev/null; then
+            generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT}
+        fi
     fi
 
     exit $r
@@ -896,14 +874,12 @@
 if is_service_enabled nova; then
     # Compute service
     stack_install_service nova
-    cleanup_nova
     configure_nova
 fi
 
 if is_service_enabled placement; then
     # placement api
     stack_install_service placement
-    cleanup_placement
     configure_placement
 fi
 
@@ -1017,38 +993,6 @@
     configure_database
 fi
 
-
-# Configure screen
-# ----------------
-
-USE_SCREEN=$(trueorfalse True USE_SCREEN)
-if [[ "$USE_SCREEN" == "True" ]]; then
-    # Create a new named screen to run processes in
-    screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
-    sleep 1
-
-    # Set a reasonable status bar
-    SCREEN_HARDSTATUS=${SCREEN_HARDSTATUS:-}
-    if [ -z "$SCREEN_HARDSTATUS" ]; then
-        SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
-    fi
-    screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
-    screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
-
-    if is_service_enabled tls-proxy; then
-        follow_tls_proxy
-    fi
-fi
-
-# Clear ``screenrc`` file
-SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
-if [[ -e $SCREENRC ]]; then
-    rm -f $SCREENRC
-fi
-
-# Initialize the directory for service status check
-init_service_check
-
 # Save configuration values
 save_stackenv $LINENO
 
@@ -1433,6 +1377,13 @@
 # Sanity checks
 # =============
 
+# Check that computes are all ready
+#
+# TODO(sdague): there should be some generic phase here.
+if is_service_enabled n-cpu; then
+    is_nova_ready
+fi
+
 # Check the status of running services
 service_check
 
diff --git a/stackrc b/stackrc
index 877da82..ffe4050 100644
--- a/stackrc
+++ b/stackrc
@@ -80,7 +80,7 @@
 # CELLSV2_SETUP - how we should configure services with cells v2
 #
 # - superconductor - this is one conductor for the api services, and
-#   one per cell managing the compute services. This is prefered
+#   one per cell managing the compute services. This is preferred
 # - singleconductor - this is one conductor for the whole deployment,
 #   this is not recommended, and will be removed in the future.
 CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"}
@@ -88,22 +88,9 @@
 # Set the root URL for Horizon
 HORIZON_APACHE_ROOT="/dashboard"
 
-# TODO(sdague): Queens
-#
-# All the non systemd paths should be removed in queens, they only
-# exist in Pike to support testing from grenade. Ensure that all this
-# is cleaned up and purged, which should dramatically simplify the
-# devstack codebase.
-
-# Whether to use 'dev mode' for screen windows. Dev mode works by
-# stuffing text into the screen windows so that a developer can use
-# ctrl-c, up-arrow, enter to restart the service. Starting services
-# this way is slightly unreliable, and a bit slower, so this can
-# be disabled for automated testing by setting this value to False.
-USE_SCREEN=$(trueorfalse False USE_SCREEN)
-
-# Whether to use SYSTEMD to manage services
-USE_SYSTEMD=$(trueorfalse False USE_SYSTEMD)
+# Whether to use SYSTEMD to manage services, we only do this from
+# Queens forward.
+USE_SYSTEMD="True"
 USER_UNITS=$(trueorfalse False USER_UNITS)
 if [[ "$USER_UNITS" == "True" ]]; then
     SYSTEMD_DIR="$HOME/.local/share/systemd/user"
@@ -117,21 +104,11 @@
 # Whether or not to enable Kernel Samepage Merging (KSM) if available.
 # This allows programs that mark their memory as mergeable to share
 # memory pages if they are identical. This is particularly useful with
-# libvirt backends. This reduces memory useage at the cost of CPU overhead
+# libvirt backends. This reduces memory usage at the cost of CPU overhead
 # to scan memory. We default to enabling it because we tend to be more
 # memory constrained than CPU bound.
 ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
 
-# When using screen, should we keep a log file on disk?  You might
-# want this False if you have a long-running setup where verbose logs
-# can fill-up the host.
-# XXX: Ideally screen itself would be configured to log but just not
-# activate.  This isn't possible with the screerc syntax.  Temporary
-# logging can still be used by a developer with:
-#    C-a : logfile foo
-#    C-a : log on
-SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING)
-
 # Passwords generated by interactive devstack runs
 if [[ -r $RC_DIR/.localrc.password ]]; then
     source $RC_DIR/.localrc.password
@@ -153,10 +130,12 @@
 # When Python 3 is supported by an application, adding the specific
 # version of Python 3 to this variable will install the app using that
 # version of the interpreter instead of 2.7.
-export PYTHON3_VERSION=${PYTHON3_VERSION:-3.5}
+_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
+export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
 
 # Just to be more explicit on the Python 2 version to use.
-export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7}
+_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
+export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
 
 # allow local overrides of env variables, including repo config
 if [[ -f $RC_DIR/localrc ]]; then
@@ -167,19 +146,6 @@
     source $RC_DIR/.localrc.auto
 fi
 
-# TODO(sdague): Delete all this in Queens.
-if [[ "$USE_SYSTEMD" == "True" ]]; then
-    USE_SCREEN=False
-fi
-# if we are forcing off USE_SCREEN (as we do in the gate), force on
-# systemd. This allows us to drop one of 3 paths through the code.
-if [[ "$USE_SCREEN" == "False" ]]; then
-    # Remove in Pike: this gets us through grenade upgrade
-    if [[ "$GRENADE_PHASE" != "target" ]]; then
-        USE_SYSTEMD="True"
-    fi
-fi
-
 # Default for log coloring is based on interactive-or-not.
 # Baseline assumption is that non-interactive invocations are for CI,
 # where logs are to be presented as browsable text files; hence color
@@ -218,7 +184,7 @@
 # will to be set to ``3`` in order to make DevStack register the Identity
 # endpoint as v3. This flag is experimental and will be used as basis to
 # identify the projects which still have issues to operate with Identity v3.
-ENABLE_IDENTITY_V2=$(trueorfalse True ENABLE_IDENTITY_V2)
+ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
 if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
     IDENTITY_API_VERSION=3
 fi
@@ -280,7 +246,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="pike"
+DEVSTACK_SERIES="queens"
 
 ##############
 #
@@ -391,6 +357,10 @@
 # this doesn't exist in a lib file, so set it here
 GITDIR["python-openstackclient"]=$DEST/python-openstackclient
 
+# placement-api CLI
+GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git}
+GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-master}
+
 
 ###################
 #
@@ -624,7 +594,7 @@
 IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
 
 # a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
 NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6}
 
 # a websockets/html5 or flash powered SPICE console for vm instances
@@ -731,10 +701,60 @@
             DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
             IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
             IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
+        fake)
+            # Use the same as the default for libvirt
+            DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
+            DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
+            IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
     esac
     DOWNLOAD_DEFAULT_IMAGES=False
 fi
 
+# This is a comma separated list of extra URLS to be listed for
+# download by the tools/image_list.sh script.  CI environments can
+# pre-download these URLS and place them in $FILES.  Later scripts can
+# then use "get_extra_file <url>" which will print out the path to the
+# file; it will either be downloaded on demand or acquired from the
+# cache if there.
+EXTRA_CACHE_URLS=""
+
+# etcd3 defaults
+ETCD_VERSION=${ETCD_VERSION:-v3.1.10}
+ETCD_SHA256_AMD64="2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be"
+# NOTE(sdague): etcd v3.1.10 doesn't have anything for these architectures, though 3.2.x does.
+ETCD_SHA256_ARM64=""
+ETCD_SHA256_PPC64=""
+ETCD_SHA256_S390X=""
+# Make sure etcd3 downloads the correct architecture
+if is_arch "x86_64"; then
+    ETCD_ARCH="amd64"
+    ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64}
+elif is_arch "aarch64"; then
+    ETCD_ARCH="arm64"
+    ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64}
+elif is_arch "ppc64le"; then
+    ETCD_ARCH="ppc64le"
+    ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64}
+elif is_arch "s390x"; then
+    # An etcd3 binary for s390x is not available on github like it is
+    # for other arches. Only continue if a custom download URL was
+    # provided.
+    if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then
+        ETCD_ARCH="s390x"
+        ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X}
+    else
+        exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided."
+    fi
+else
+    exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
+fi
+ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
+ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
+ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
+ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
+# etcd is always required, so place it into list of pre-cached downloads
+EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION"
+
 # Detect duplicate values in IMAGE_URLS
 for image_url in ${IMAGE_URLS//,/ }; do
     if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then
@@ -758,9 +778,6 @@
 
 PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""}
 
-# Set default screen name
-SCREEN_NAME=${SCREEN_NAME:-stack}
-
 # Allow the use of an alternate protocol (such as https) for service endpoints
 SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
 
@@ -781,6 +798,9 @@
 # Service graceful shutdown timeout
 SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
 
+# Service graceful shutdown timeout
+WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
+
 # Support alternative yum -- in future Fedora 'dnf' will become the
 # only supported installer, but for now 'yum' and 'dnf' are both
 # available in parallel with compatible CLIs.  Allow manual switching
@@ -880,15 +900,6 @@
 
 # Following entries need to be last items in file
 
-# Compatibility bits required by other callers like Grenade
-
-# Old way was using SCREEN_LOGDIR to locate those logs and LOGFILE for the stack.sh trace log.
-# LOGFILE       SCREEN_LOGDIR       output
-# not set       not set             no log files
-# set           not set             stack.sh log to LOGFILE
-# not set       set                 screen logs to SCREEN_LOGDIR
-# set           set                 stack.sh log to LOGFILE, screen logs to SCREEN_LOGDIR
-
 # New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR
 # LOGFILE       LOGDIR              output
 # not set       not set             (new) set LOGDIR from default
@@ -896,9 +907,6 @@
 # not set       set                 screen logs to LOGDIR
 # set           set                 stack.sh log to LOGFILE, screen logs to LOGDIR
 
-# For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR
-# symlinks to SCREEN_LOGDIR (compat)
-
 # Set up new logging defaults
 if [[ -z "${LOGDIR:-}" ]]; then
     default_logdir=$DEST/logs
@@ -913,12 +921,6 @@
             # LOGFILE had no path, set a default
             LOGDIR="$default_logdir"
         fi
-
-        # Check for duplication
-        if [[ "${SCREEN_LOGDIR:-}" == "${LOGDIR}" ]]; then
-            # We don't need the symlinks since it's the same directory
-            unset SCREEN_LOGDIR
-        fi
     fi
     unset default_logdir logfile
 fi
diff --git a/tests/run-process.sh b/tests/run-process.sh
deleted file mode 100755
index 301b9a0..0000000
--- a/tests/run-process.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-# tests/exec.sh - Test DevStack run_process() and stop_process()
-#
-# exec.sh start|stop|status
-#
-# Set USE_SCREEN True|False to change use of screen.
-#
-# This script emulates the basic exec environment in ``stack.sh`` to test
-# the process spawn and kill operations.
-
-if [[ -z $1 ]]; then
-    echo "$0 start|stop"
-    exit 1
-fi
-
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-source $TOP_DIR/functions
-
-USE_SCREEN=${USE_SCREEN:-False}
-
-ENABLED_SERVICES=fake-service
-
-SERVICE_DIR=/tmp
-SCREEN_NAME=test
-SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME}
-
-
-# Kill background processes on exit
-trap clean EXIT
-clean() {
-    local r=$?
-    jobs -p
-    kill >/dev/null 2>&1 $(jobs -p)
-    exit $r
-}
-
-
-# Exit on any errors so that errors don't compound
-trap failed ERR
-failed() {
-    local r=$?
-    jobs -p
-    kill >/dev/null 2>&1 $(jobs -p)
-    set +o xtrace
-    [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
-    exit $r
-}
-
-function status {
-    if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then
-        pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid)
-    fi
-    ps -ef | grep fake
-}
-
-function setup_screen {
-if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then
-    rm -rf $SERVICE_DIR/$SCREEN_NAME
-    mkdir -p $SERVICE_DIR/$SCREEN_NAME
-fi
-
-if [[ "$USE_SCREEN" == "True" ]]; then
-    # Create a new named screen to run processes in
-    screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
-    sleep 1
-
-    # Set a reasonable status bar
-    if [ -z "$SCREEN_HARDSTATUS" ]; then
-        SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
-    fi
-    screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
-fi
-
-# Clear screen rc file
-SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc
-if [[ -e $SCREENRC ]]; then
-    echo -n > $SCREENRC
-fi
-}
-
-# Mimic logging
-    # Set up output redirection without log files
-    # Copy stdout to fd 3
-    exec 3>&1
-    if [[ "$VERBOSE" != "True" ]]; then
-        # Throw away stdout and stderr
-        #exec 1>/dev/null 2>&1
-        :
-    fi
-    # Always send summary fd to original stdout
-    exec 6>&3
-
-
-if [[ "$1" == "start" ]]; then
-    echo "Start service"
-    setup_screen
-    run_process fake-service "$TOP_DIR/tests/fake-service.sh"
-    sleep 1
-    status
-elif [[ "$1" == "stop" ]]; then
-    echo "Stop service"
-    stop_process fake-service
-    status
-elif [[ "$1" == "status" ]]; then
-    status
-else
-    echo "Unknown command"
-    exit 1
-fi
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 5b4ff32..0bd8d49 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -36,7 +36,8 @@
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
 ALL_LIBS+=" oslo.serialization django_openstack_auth"
-ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap"
+ALL_LIBS+=" python-openstackclient osc-lib osc-placement"
+ALL_LIBS+=" os-client-config oslo.rootwrap"
 ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service"
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 55cd725..efe0125 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -84,10 +84,10 @@
         # we can find local mirrors then use that mirror.
         source /etc/ci/mirror_info.sh
 
-        sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main"
+        sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main"
     else
         # Otherwise use upstream UCA
-        sudo add-apt-repository -y cloud-archive:ocata
+        sudo add-apt-repository -y cloud-archive:pike
     fi
 
     # Disable use of libvirt wheel since a cached wheel build might be
@@ -202,5 +202,22 @@
 # on python-virtualenv), first install the distro python-virtualenv
 # to satisfy any dependencies then use pip to overwrite it.
 
-install_package python-virtualenv
-pip_install -U --force-reinstall virtualenv
+# ... but, for infra builds, the pip-and-virtualenv [1] element has
+# already done this to ensure the latest pip, virtualenv and
+# setuptools on the base image for all platforms.  It has also added
+# the packages to the yum/dnf ignore list to prevent them being
+# overwritten with old versions.  F26 and dnf 2.0 has changed
+# behaviour that means re-installing python-virtualenv fails [2].
+# Thus we do a quick check if we're in the infra environment by
+# looking for the mirror config script before doing this, and just
+# skip it if so.
+
+# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \
+#        diskimage_builder/elements/pip-and-virtualenv/ \
+#            install.d/pip-and-virtualenv-source-install/04-install-pip
+# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
+
+if [[ ! -f /etc/ci/mirror_info.sh ]]; then
+    install_package python-virtualenv
+    pip_install -U --force-reinstall virtualenv
+fi
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 29b93ed..3a27c4a 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -1,5 +1,14 @@
 #!/bin/bash
 
+# Print out a list of image and other files to download for caching.
+# This is mostly used by the OpenStack infrasturucture during daily
+# image builds to save the large images to /opt/cache/files (see [1])
+#
+# The two lists of URL's downloaded are the IMAGE_URLS and
+# EXTRA_CACHE_URLS, which are setup in stackrc
+#
+# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos
+
 # Keep track of the DevStack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
 
@@ -31,12 +40,20 @@
     ALL_IMAGES+=$URLS
 done
 
-# Make a nice list
-echo $ALL_IMAGES | tr ',' '\n' | sort | uniq
-
 # Sanity check - ensure we have a minimum number of images
 num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l)
 if [[ "$num" -lt 4 ]]; then
     echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right."
     exit 1
 fi
+
+# This is extra non-image files that we want pre-cached.  This is kept
+# in a separate list because devstack loops over the IMAGE_LIST to
+# upload files glance and these aren't images.  (This was a bit of an
+# after-thought which is why the naming around this is very
+# image-centric)
+URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS)
+ALL_IMAGES+=$URLS
+
+# Make a nice combined list
+echo $ALL_IMAGES | tr ',' '\n' | sort | uniq
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 6189085..da59093 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -88,15 +88,6 @@
     export PYTHON=$(which python 2>/dev/null)
 fi
 
-if is_suse; then
-    # now reinstall cryptography from source, in order to rebuilt it against the
-    # system libssl rather than the bundled openSSL 1.1, which segfaults when combined
-    # with a system provided openSSL 1.0
-    # see https://github.com/pyca/cryptography/issues/3804 and followup issues
-    sudo pip install cryptography --no-binary :all:
-fi
-
-
 # Mark end of run
 # ---------------
 
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
index 2169cc2..07716b0 100755
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -3,12 +3,12 @@
 # This tool lists processes that lock memory pages from swapping to disk.
 
 import re
-import subprocess
 
 import psutil
 
 
-SUMMARY_REGEX = re.compile(b".*\s+(?P<locked>[\d]+)\s+KB")
+LCK_SUMMARY_REGEX = re.compile(
+    "^VmLck:\s+(?P<locked>[\d]+)\s+kB", re.MULTILINE)
 
 
 def main():
@@ -22,28 +22,21 @@
 def _get_report():
     mlock_users = []
     for proc in psutil.process_iter():
-        pid = proc.pid
         # sadly psutil does not expose locked pages info, that's why we
-        # call to pmap and parse the output here
+        # iterate over the /proc/%pid/status files manually
         try:
-            out = subprocess.check_output(['pmap', '-XX', str(pid)])
-        except subprocess.CalledProcessError as e:
-            # 42 means process just vanished, which is ok
-            if e.returncode == 42:
-                continue
-            raise
-        last_line = out.splitlines()[-1]
-
-        # some processes don't provide a memory map, for example those
-        # running as kernel services, so we need to skip those that don't
-        # match
-        result = SUMMARY_REGEX.match(last_line)
-        if result:
-            locked = int(result.group('locked'))
-            if locked:
-                mlock_users.append({'name': proc.name(),
-                                    'pid': pid,
-                                    'locked': locked})
+            s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r')
+        except EnvironmentError:
+            continue
+        with s:
+            for line in s:
+                result = LCK_SUMMARY_REGEX.search(line)
+                if result:
+                    locked = int(result.group('locked'))
+                    if locked:
+                        mlock_users.append({'name': proc.name(),
+                                            'pid': proc.pid,
+                                            'locked': locked})
 
     # produce a single line log message with per process mlock stats
     if mlock_users:
diff --git a/unstack.sh b/unstack.sh
index 77a151f..5d3672e 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -171,15 +171,6 @@
     stop_dstat
 fi
 
-# Clean up the remainder of the screen processes
-SCREEN=$(which screen)
-if [[ -n "$SCREEN" ]]; then
-    SESSION=$(screen -ls | awk "/[0-9]+.${SCREEN_NAME}/"'{ print $1 }')
-    if [[ -n "$SESSION" ]]; then
-        screen -X -S $SESSION quit
-    fi
-fi
-
 # NOTE: Cinder automatically installs the lvm2 package, independently of the
 # enabled backends. So if Cinder is enabled, and installed successfully we are
 # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.