Merge "neutron-legacy: Remove no longer necessary vpnaas conditional"
diff --git a/.gitignore b/.gitignore
index d2c127d..8fe56ad 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,11 @@
 *~
 .*.sw?
 *.log
-*.log.[1-9]
+*-log
+*.log.*
+*-log.*
 *.pem
+*.pyc
 .localrc.auto
 .localrc.password
 .prereqs
@@ -25,7 +28,7 @@
 files/ir-deploy*
 files/ironic-inspector*
 files/etcd*
-local.conf
+/local.conf
 local.sh
 localrc
 proto
diff --git a/.gitreview b/.gitreview
index 570d31a..e1bf63b 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
 [gerrit]
-host=review.openstack.org
+host=review.opendev.org
 port=29418
-project=openstack-dev/devstack.git
+project=openstack/devstack.git
diff --git a/.zuul.yaml b/.zuul.yaml
index bb7239a..7a85266 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,3 +1,11 @@
+- pragma:
+   # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to
+   # be using devstack
+   # TODO(gtema): delete this once r1 branch is merged into master
+    implied-branches:
+      - master
+      - feature/r1
+
 - nodeset:
     name: openstack-single-node
     nodes:
@@ -9,6 +17,86 @@
           - controller
 
 - nodeset:
+    name: openstack-single-node-focal
+    nodes:
+      - name: controller
+        label: ubuntu-focal
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: openstack-single-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: openstack-single-node-xenial
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-centos-7
+    nodes:
+      - name: controller
+        label: centos-7
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-centos-8-stream
+    nodes:
+      - name: controller
+        label: centos-8-stream
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-opensuse-15
+    nodes:
+      - name: controller
+        label: opensuse-15
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-fedora-latest
+    nodes:
+      - name: controller
+        label: fedora-34
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: devstack-single-node-debian-bullseye
+    nodes:
+      - name: controller
+        label: debian-bullseye
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: openstack-two-node
     nodes:
       - name: controller
@@ -16,29 +104,207 @@
       - name: compute1
         label: ubuntu-xenial
     groups:
+      # Node where tests are executed and test results collected
       - name: tempest
         nodes:
           - controller
+      # Nodes running the compute service
       - name: compute
         nodes:
           - controller
           - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-two-node-focal
+    nodes:
+      - name: controller
+        label: ubuntu-focal
+      - name: compute1
+        label: ubuntu-focal
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-two-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+      - name: compute1
+        label: ubuntu-bionic
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-two-node-xenial
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+      - name: compute1
+        label: ubuntu-xenial
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-three-node-focal
+    nodes:
+      - name: controller
+        label: ubuntu-focal
+      - name: compute1
+        label: ubuntu-focal
+      - name: compute2
+        label: ubuntu-focal
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+          - compute2
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+          - compute2
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+          - compute2
+
+- nodeset:
+    name: openstack-three-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+      - name: compute1
+        label: ubuntu-bionic
+      - name: compute2
+        label: ubuntu-bionic
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+          - compute2
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+          - compute2
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+          - compute2
 
 - job:
-    name: devstack
+    name: devstack-base
     parent: multinode
-    description: Base devstack job
-    nodeset: openstack-single-node
+    abstract: true
+    description: |
+      Base abstract Devstack job.
+
+      Defines plays and base variables, but it does not include any project
+      and it does not run any service by default. This is a common base for
+      all single Devstack jobs, single or multinode.
+      Variables are defined in job.vars, which is what is then used by single
+      node jobs and by multi node jobs for the controller, as well as in
+      job.group-vars.peers, which is what is used by multi node jobs for subnode
+      nodes (everything but the controller).
     required-projects:
-      - openstack-dev/devstack
-      - openstack/cinder
-      - openstack/glance
-      - openstack/keystone
-      - openstack/neutron
-      - openstack/nova
-      - openstack/requirements
-      - openstack/swift
-    timeout: 7200
+      - opendev.org/openstack/devstack
+    roles:
+      - zuul: opendev.org/openstack/devstack-gate
+      - zuul: opendev.org/openstack/openstack-zuul-jobs
     vars:
       devstack_localrc:
         DATABASE_PASSWORD: secretdatabase
@@ -46,34 +312,585 @@
         ADMIN_PASSWORD: secretadmin
         SERVICE_PASSWORD: secretservice
         NETWORK_GATEWAY: 10.1.0.1
-        Q_USE_DEBUG_COMMAND: True
         FIXED_RANGE: 10.1.0.0/20
         IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
         FLOATING_RANGE: 172.24.5.0/24
         PUBLIC_NETWORK_GATEWAY: 172.24.5.1
-        FLOATING_HOST_PREFIX: 172.24.4
-        FLOATING_HOST_MASK: 23
-        SWIFT_REPLICAS: 1
-        SWIFT_START_ALL_SERVICES: False
         LOGFILE: /opt/stack/logs/devstacklog.txt
-        LOG_COLOR: False
-        VERBOSE: True
-        NETWORK_GATEWAY: 10.1.0.1
-        NOVNC_FROM_PACKAGE: True
-        ERROR_ON_CLONE: True
-        # NOTE(dims): etcd 3.x is not available in debian/ubuntu
-        # etc. As a stop gap measure, devstack uses wget to download
-        # from the location below for all the CI jobs.
-        ETCD_DOWNLOAD_URL: "http://tarballs.openstack.org/etcd/"
+        LOG_COLOR: false
+        VERBOSE: true
+        VERBOSE_NO_TIMESTAMP: true
+        NOVNC_FROM_PACKAGE: true
+        ERROR_ON_CLONE: true
+        # Gate jobs can't deal with nested virt. Disable it by default.
+        LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}'
       devstack_services:
-        horizon: False
-        tempest: False
-    pre-run: playbooks/pre
-    post-run: playbooks/post
+        # Ignore any default set by devstack. Emit a "disable_all_services".
+        base: false
+      zuul_copy_output:
+        '{{ devstack_conf_dir }}/local.conf': logs
+        '{{ devstack_conf_dir }}/localrc': logs
+        '{{ devstack_conf_dir }}/.localrc.auto': logs
+        '{{ devstack_conf_dir }}/.stackenv': logs
+        '{{ devstack_log_dir }}/dstat-csv.log': logs
+        '{{ devstack_log_dir }}/devstacklog.txt': logs
+        '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
+        '{{ devstack_log_dir }}/tcpdump.pcap': logs
+        '{{ devstack_log_dir }}/worlddump-latest.txt': logs
+        '{{ devstack_full_log}}': logs
+        '{{ stage_dir }}/verify_tempest_conf.log': logs
+        '{{ stage_dir }}/apache': logs
+        '{{ stage_dir }}/apache_config': logs
+        '{{ stage_dir }}/etc': logs
+        /var/log/rabbitmq: logs
+        /var/log/postgresql: logs
+        /var/log/mysql: logs
+        /var/log/libvirt: logs
+        /etc/libvirt: logs
+        /etc/sudoers: logs
+        /etc/sudoers.d: logs
+        '{{ stage_dir }}/iptables.txt': logs
+        '{{ stage_dir }}/df.txt': logs
+        '{{ stage_dir }}/pip2-freeze.txt': logs
+        '{{ stage_dir }}/pip3-freeze.txt': logs
+        '{{ stage_dir }}/dpkg-l.txt': logs
+        '{{ stage_dir }}/rpm-qa.txt': logs
+        '{{ stage_dir }}/core': logs
+        '{{ stage_dir }}/listen53.txt': logs
+        '{{ stage_dir }}/deprecations.log': logs
+        '{{ stage_dir }}/audit.log': logs
+        /etc/ceph: logs
+        /var/log/ceph: logs
+        /var/log/openvswitch: logs
+        /var/log/glusterfs: logs
+        /etc/glusterfs/glusterd.vol: logs
+        /etc/resolv.conf: logs
+        /var/log/unbound.log: logs
+      extensions_to_txt:
+        conf: true
+        log: true
+        localrc: true
+        stackenv: true
+        auto: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          DATABASE_PASSWORD: secretdatabase
+          RABBIT_PASSWORD: secretrabbit
+          ADMIN_PASSWORD: secretadmin
+          SERVICE_PASSWORD: secretservice
+          NETWORK_GATEWAY: 10.1.0.1
+          FIXED_RANGE: 10.1.0.0/20
+          IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
+          FLOATING_RANGE: 172.24.5.0/24
+          PUBLIC_NETWORK_GATEWAY: 172.24.5.1
+          LOGFILE: /opt/stack/logs/devstacklog.txt
+          LOG_COLOR: false
+          VERBOSE: true
+          VERBOSE_NO_TIMESTAMP: true
+          NOVNC_FROM_PACKAGE: true
+          ERROR_ON_CLONE: true
+          LIBVIRT_TYPE: qemu
+        devstack_services:
+          base: false
+    pre-run: playbooks/pre.yaml
+    run: playbooks/devstack.yaml
+    post-run: playbooks/post.yaml
+    irrelevant-files:
+      # Documentation related
+      - ^.*\.rst$
+      - ^api-ref/.*$
+      - ^doc/.*$
+      - ^releasenotes/.*$
+      # Translations
+      - ^.*/locale/.*po$
 
+- job:
+    name: devstack-minimal
+    parent: devstack-base
+    description: |
+      Minimal devstack base job, intended for use by jobs that need
+      less than the normal minimum set of required-projects.
+    nodeset: openstack-single-node-focal
+    required-projects:
+      - opendev.org/openstack/requirements
+    vars:
+      devstack_localrc:
+        # Multinode specific settings
+        SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+        HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+        PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
+      devstack_services:
+        # Shared services
+        dstat: true
+        etcd3: true
+        memory_tracker: true
+        mysql: true
+        rabbit: true
+    group-vars:
+      subnode:
+        devstack_services:
+          # Shared services
+          dstat: true
+          memory_tracker: true
+        devstack_localrc:
+          # Multinode specific settings
+          HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
+          SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+          PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
+          # Subnode specific settings
+          DATABASE_TYPE: mysql
+          RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+          DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+
+
+- job:
+    name: devstack
+    parent: devstack-minimal
+    description: |
+      Base devstack job for integration gate.
+
+      This base job can be used for single node and multinode devstack jobs.
+
+      With a single node nodeset, this job sets up an "all-in-one" (aio)
+      devstack with the seven OpenStack services included in the devstack tree:
+      keystone, glance, cinder, neutron, nova, placement, and swift.
+
+      With a two node nodeset, this job sets up an aio + compute node.
+      The controller can be customised using host-vars.controller, the
+      sub-nodes can be customised using group-vars.subnode.
+
+      Descendent jobs can enable / disable services, add devstack configuration
+      options, enable devstack plugins, configure log files or directories to be
+      transferred to the log server.
+
+      The job assumes that there is only one controller node. The number of
+      subnodes can be scaled up seamlessly by setting a custom nodeset in
+      job.nodeset.
+
+      The run playbook consists of a single role, so it can be easily rewritten
+      and extended.
+    required-projects:
+      - opendev.org/openstack/cinder
+      - opendev.org/openstack/glance
+      - opendev.org/openstack/keystone
+      - opendev.org/openstack/neutron
+      - opendev.org/openstack/nova
+      - opendev.org/openstack/placement
+      - opendev.org/openstack/swift
+    timeout: 7200
+    vars:
+      devstack_localrc:
+        # Common OpenStack services settings
+        SWIFT_REPLICAS: 1
+        SWIFT_START_ALL_SERVICES: false
+        SWIFT_HASH: 1234123412341234
+        DEBUG_LIBVIRT_COREDUMPS: true
+        NOVA_VNC_ENABLED: true
+        OVN_DBS_LOG_LEVEL: dbg
+      devstack_local_conf:
+        post-config:
+          $NEUTRON_CONF:
+            DEFAULT:
+              global_physnet_mtu: '{{ external_bridge_mtu }}'
+      devstack_services:
+        # Core services enabled for this branch.
+        # This list replaces the test-matrix.
+        # Shared services
+        dstat: true
+        etcd3: true
+        memory_tracker: true
+        mysql: true
+        rabbit: true
+        tls-proxy: true
+        # Keystone services
+        key: true
+        # Glance services
+        g-api: true
+        # Nova services
+        n-api: true
+        n-api-meta: true
+        n-cond: true
+        n-cpu: true
+        n-novnc: true
+        n-sch: true
+        # Placement service
+        placement-api: true
+        # OVN services
+        ovn-controller: true
+        ovn-northd: true
+        ovs-vswitchd: true
+        ovsdb-server: true
+        # Neutron services
+        q-svc: true
+        q-ovn-metadata-agent: true
+        # Swift services
+        s-account: true
+        s-container: true
+        s-object: true
+        s-proxy: true
+        # Cinder services
+        c-api: true
+        c-bak: true
+        c-sch: true
+        c-vol: true
+        # Services we don't need.
+        # This section is not really needed, it's for readability.
+        horizon: false
+        tempest: false
+        # Test matrix emits ceilometer but ceilomenter is not installed in the
+        # integrated gate, so specifying the services has not effect.
+        # ceilometer-*: false
+    group-vars:
+      subnode:
+        devstack_services:
+          # Core services enabled for this branch.
+          # This list replaces the test-matrix.
+          # Shared services
+          dstat: true
+          memory_tracker: true
+          tls-proxy: true
+          # Nova services
+          n-cpu: true
+          # Placement services
+          placement-client: true
+          # OVN services
+          ovn-controller: true
+          ovs-vswitchd: true
+          ovsdb-server: true
+          # Neutron services
+          q-ovn-metadata-agent: true
+          # Cinder services
+          c-bak: true
+          c-vol: true
+          # Services we don't run at all on subnode.
+          # This section is not really needed, it's for readability.
+          # keystone: false
+          # s-*: false
+          horizon: false
+          tempest: false
+          # Test matrix emits ceilometer but ceilometer is not installed in the
+          # integrated gate, so specifying the services has not effect.
+          # ceilometer-*: false
+        devstack_localrc:
+          # Subnode specific settings
+          GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292"
+          Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+          NOVA_VNC_ENABLED: true
+          ENABLE_CHASSIS_AS_GW: false
+
+- job:
+    name: devstack-ipv6
+    parent: devstack
+    description: |
+      Devstack single node job for integration gate with IPv6.
+    vars:
+      devstack_localrc:
+        SERVICE_IP_VERSION: 6
+        SERVICE_HOST: ""
+
+- job:
+    name: devstack-enforce-scope
+    parent: devstack
+    description: |
+      This job runs the devstack with scope checks enabled.
+    vars:
+      devstack_localrc:
+        # Keep enabeling the services here to run with system scope
+        CINDER_ENFORCE_SCOPE: true
+        GLANCE_ENFORCE_SCOPE: true
+
+- job:
+    name: devstack-multinode
+    parent: devstack
+    nodeset: openstack-two-node-focal
+    description: |
+      Simple multinode test to verify multinode functionality on devstack side.
+      This is not meant to be used as a parent job.
+
+# NOTE(ianw) Platform tests have traditionally been non-voting because
+# we often have to rush things through devstack to stabilise the gate,
+# and these platforms don't have the round-the-clock support to avoid
+# becoming blockers in that situation.
+- job:
+    name: devstack-platform-centos-8-stream
+    parent: tempest-full-py3
+    description: CentOS 8 Stream platform test
+    nodeset: devstack-single-node-centos-8-stream
+    voting: false
+    timeout: 9000
+    vars:
+      configure_swap_size: 4096
+
+- job:
+    name: devstack-platform-debian-bullseye
+    parent: tempest-full-py3
+    description: Debian Bullseye platform test
+    nodeset: devstack-single-node-debian-bullseye
+    voting: false
+    timeout: 9000
+    vars:
+      configure_swap_size: 4096
+      # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS
+      # for the time being.
+      devstack_localrc:
+        Q_AGENT: openvswitch
+        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+      devstack_services:
+        # Disable OVN services
+        ovn-northd: false
+        ovn-controller: false
+        ovs-vswitchd: false
+        ovsdb-server: false
+        # Disable Neutron ML2/OVN services
+        q-ovn-metadata-agent: false
+        # Enable Neutron ML2/OVS services
+        q-agt: true
+        q-dhcp: true
+        q-l3: true
+        q-meta: true
+        q-metering: true
+    group-vars:
+      subnode:
+        devstack_services:
+          # Disable OVN services
+          ovn-controller: false
+          ovs-vswitchd: false
+          ovsdb-server: false
+          # Disable Neutron ML2/OVN services
+          q-ovn-metadata-agent: false
+          # Enable Neutron ML2/OVS services
+          q-agt: true
+
+- job:
+    name: devstack-no-tls-proxy
+    parent: tempest-full-py3
+    description: |
+      Tempest job with tls-proxy off.
+
+      Some gates run devstack like this and it follows different code paths.
+    vars:
+      devstack_services:
+        tls-proxy: false
+
+- job:
+    name: devstack-platform-fedora-latest
+    parent: tempest-full-py3
+    description: Fedora latest platform test
+    nodeset: devstack-single-node-fedora-latest
+    voting: false
+
+- job:
+    name: devstack-platform-fedora-latest-virt-preview
+    parent: tempest-full-py3
+    description: Fedora latest platform test using the virt-preview repo.
+    nodeset: devstack-single-node-fedora-latest
+    voting: false
+    vars:
+      devstack_localrc:
+        ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
+
+- job:
+    name: devstack-tox-base
+    parent: devstack
+    description: |
+      Base job for devstack-based functional tests that use tox.
+
+      This job is not intended to be run directly. It's just here
+      for organizational purposes for devstack-tox-functional and
+      devstack-tox-functional-consumer.
+    post-run: playbooks/tox/post.yaml
+    vars:
+      tox_envlist: functional
+      tox_install_siblings: false
+
+- job:
+    name: devstack-tox-functional
+    parent: devstack-tox-base
+    description: |
+      Base job for devstack-based functional tests that use tox.
+
+      Runs devstack, then runs the tox ``functional`` environment,
+      then collects tox/testr build output like normal tox jobs.
+
+      Turns off tox sibling installation. Projects may be involved
+      in the devstack deployment and so may be in the required-projects
+      list, but may not want to test against master of the other
+      projects in their tox env. Child jobs can set tox_install_siblings
+      to True to re-enable sibling processing.
+    run: playbooks/tox/run-both.yaml
+
+- job:
+    name: devstack-tox-functional-consumer
+    parent: devstack
+    description: |
+      Base job for devstack-based functional tests for projects that
+      consume the devstack cloud.
+
+      This base job should only be used by projects that are not involved
+      in the devstack deployment step, but are instead projects that are using
+      devstack to get a cloud against which they can test things.
+
+      Runs devstack in pre-run, then runs the tox ``functional`` environment,
+      then collects tox/testr build output like normal tox jobs.
+
+      Turns off tox sibling installation. Projects may be involved
+      in the devstack deployment and so may be in the required-projects
+      list, but may not want to test against master of the other
+      projects in their tox env. Child jobs can set tox_install_siblings
+      to True to re-enable sibling processing.
+    pre-run:
+      - playbooks/devstack.yaml
+      - playbooks/tox/pre.yaml
+    run: playbooks/tox/run.yaml
+
+- job:
+    name: devstack-unit-tests
+    nodeset: ubuntu-focal
+    description: |
+      Runs unit tests on devstack project.
+
+      It runs  ``run_tests.sh``.
+    pre-run: playbooks/unit-tests/pre.yaml
+    run: playbooks/unit-tests/run.yaml
 
 - project:
-    name: openstack-dev/devstack
+    templates:
+      - integrated-gate-py3
+      - publish-openstack-docs-pti
     check:
       jobs:
         - devstack
+        - devstack-ipv6
+        - devstack-enforce-scope
+        - devstack-platform-fedora-latest
+        - devstack-platform-centos-8-stream
+        - devstack-platform-debian-bullseye
+        - devstack-multinode
+        - devstack-unit-tests
+        - openstack-tox-bashate
+        - ironic-tempest-bios-ipmi-direct-tinyipa
+        - swift-dsvm-functional
+        - grenade:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-ovs-grenade-multinode:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-linuxbridge-tempest:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-ovn-tempest-ovs-release:
+            voting: false
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-multinode-full-py3:
+            voting: false
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - openstacksdk-functional-devstack:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-ipv6-only:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - nova-ceph-multistore:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+    gate:
+      jobs:
+        - devstack
+        - devstack-ipv6
+        - devstack-enforce-scope
+        - devstack-multinode
+        - devstack-unit-tests
+        - openstack-tox-bashate
+        - neutron-ovs-grenade-multinode:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-linuxbridge-tempest:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - ironic-tempest-bios-ipmi-direct-tinyipa
+        - swift-dsvm-functional
+        - grenade:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - openstacksdk-functional-devstack:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-ipv6-only:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - nova-ceph-multistore:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+    # Please add a note on each job and conditions for the job not
+    # being experimental any more, so we can keep this list somewhat
+    # pruned.
+    #
+    # * nova-next: maintained by nova for unreleased/undefaulted
+    #    things
+    # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
+    #    when neutron-api is served by uwsgi, it's in exprimental for testing.
+    #    the next cycle we can remove this  job if things turn out to be
+    #    stable enough.
+    # * neutron-functional-with-uwsgi: maintained by neutron for functional
+    #    test. Next cycle we can remove this one if things turn out to be
+    #    stable engouh with uwsgi.
+    # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
+    #    Next cycle we can remove this if everything run out stable enough.
+    # * nova-multi-cell: maintained by nova and currently non-voting in the
+    #    check queue for nova changes but relies on devstack configuration
+    # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood
+    #    for Nova to allow early testing of the latest versions of Libvirt and
+    #    QEMU. Should only graduate out of experimental if it ever moves into
+    #    the check queue for Nova.
+
+    experimental:
+      jobs:
+        - nova-multi-cell
+        - nova-next
+        - neutron-fullstack-with-uwsgi
+        - neutron-functional-with-uwsgi
+        - neutron-tempest-with-uwsgi
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-ovs-tempest-dvr:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-ovs-tempest-dvr-ha-multinode-full:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - cinder-tempest-lvm-multibackend:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-pg-full:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - devstack-platform-fedora-latest-virt-preview
+        - devstack-no-tls-proxy
+    periodic:
+      jobs:
+        - devstack-no-tls-proxy
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..bb51165
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,19 @@
+The source repository for this project can be found at:
+
+   https://opendev.org/openstack/devstack
+
+Pull requests submitted through GitHub are not monitored.
+
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
+
+   https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
+
+Bugs should be filed on Launchpad:
+
+   https://bugs.launchpad.net/devstack
+
+For more specific information about contributing to this repository, see the
+Devstack contributor guide:
+
+   https://docs.openstack.org/devstack/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index d5d6fbc..6a91e0a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -10,8 +10,8 @@
 Shell script was chosen because it best illustrates the steps used to
 set up and interact with OpenStack components.
 
-DevStack's official repository is located on git.openstack.org at
-https://git.openstack.org/openstack-dev/devstack.  Besides the master branch that
+DevStack's official repository is located on opendev.org at
+https://opendev.org/openstack/devstack.  Besides the master branch that
 tracks the OpenStack trunk branches a separate branch is maintained for all
 OpenStack releases starting with Diablo (stable/diablo).
 
@@ -23,10 +23,10 @@
 .. _contribute: https://docs.openstack.org/infra/manual/developers.html
 
 __ lp_
-.. _lp: https://launchpad.net/~devstack
+.. _lp: https://launchpad.net/devstack
 
 The `Gerrit review
-queue <https://review.openstack.org/#/q/project:openstack-dev/devstack,n,z>`__
+queue <https://review.opendev.org/#/q/project:openstack/devstack>`__
 is used for all commits.
 
 The primary script in DevStack is ``stack.sh``, which performs the bulk of the
@@ -47,12 +47,7 @@
 level.
 
 ``doc`` - Contains the Sphinx source for the documentation.
-``tools/build_docs.sh`` is used to generate the HTML versions of the
-DevStack scripts.  A complete doc build can be run with ``tox -edocs``.
-
-``exercises`` - Contains the test scripts used to sanity-check and
-demonstrate some OpenStack functions. These scripts know how to exit
-early or skip services that are not enabled.
+A complete doc build can be run with ``tox -edocs``.
 
 ``extras.d`` - Contains the dispatch scripts called by the hooks in
 ``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins
@@ -79,8 +74,7 @@
 
 ``tools`` - Contains a collection of stand-alone scripts. While these
 may reference the top-level DevStack configuration they can generally be
-run alone. There are also some sub-directories to support specific
-environments such as XenServer.
+run alone.
 
 
 Scripts
@@ -150,8 +144,8 @@
 * Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR``
 * Global service configuration like ``ENABLED_SERVICES``
 * Variables used by multiple services that do not have a clear owner, i.e.
-  ``VOLUME_BACKING_FILE_SIZE`` (nova-compute, nova-volumes and cinder) or
-  ``PUBLIC_NETWORK_NAME`` (nova-network and neutron)
+  ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or
+  ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too)
 * Variables that can not be cleanly declared in a project file due to
   dependency ordering, i.e. the order of sourcing the project files can
   not be changed for other reasons but the earlier file needs to dereference a
@@ -168,7 +162,7 @@
 
 The DevStack repo now contains all of the static pages of devstack.org in
 the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every
-commit and updates devstack.org (now a redirect to docs.openstack.org/developer/devstack).
+commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/).
 
 All of the scripts are processed with shocco_ to render them with the comments
 as text describing the script below.  For this reason we tend to be a little
@@ -183,88 +177,6 @@
 OpenStack project standard.
 
 
-Exercises
----------
-
-The scripts in the exercises directory are meant to 1) perform basic operational
-checks on certain aspects of OpenStack; and b) document the use of the
-OpenStack command-line clients.
-
-In addition to the guidelines above, exercise scripts MUST follow the structure
-outlined here.  ``swift.sh`` is perhaps the clearest example of these guidelines.
-These scripts are executed serially by ``exercise.sh`` in testing situations.
-
-* Begin and end with a banner that stands out in a sea of script logs to aid
-  in debugging failures, particularly in automated testing situations.  If the
-  end banner is not displayed, the script ended prematurely and can be assumed
-  to have failed.
-
-  ::
-
-    echo "**************************************************"
-    echo "Begin DevStack Exercise: $0"
-    echo "**************************************************"
-    ...
-    set +o xtrace
-    echo "**************************************************"
-    echo "End DevStack Exercise: $0"
-    echo "**************************************************"
-
-* The scripts will generally have the shell ``xtrace`` attribute set to display
-  the actual commands being executed, and the ``errexit`` attribute set to exit
-  the script on non-zero exit codes::
-
-    # This script exits on an error so that errors don't compound and you see
-    # only the first error that occurred.
-    set -o errexit
-
-    # Print the commands being run so that we can see the command that triggers
-    # an error.  It is also useful for following as the install occurs.
-    set -o xtrace
-
-* Settings and configuration are stored in ``exerciserc``, which must be
-  sourced after ``openrc`` or ``stackrc``::
-
-    # Import exercise configuration
-    source $TOP_DIR/exerciserc
-
-* There are a couple of helper functions in the common ``functions`` sub-script
-  that will check for non-zero exit codes and unset environment variables and
-  print a message and exit the script.  These should be called after most client
-  commands that are not otherwise checked to short-circuit long timeouts
-  (instance boot failure, for example)::
-
-    swift post $CONTAINER
-    die_if_error "Failure creating container $CONTAINER"
-
-    FLOATING_IP=`euca-allocate-address | cut -f2`
-    die_if_not_set FLOATING_IP "Failure allocating floating IP"
-
-* If you want an exercise to be skipped when for example a service wasn't
-  enabled for the exercise to be run, you can exit your exercise with the
-  special exitcode 55 and it will be detected as skipped.
-
-* The exercise scripts should only use the various OpenStack client binaries to
-  interact with OpenStack.  This specifically excludes any ``*-manage`` tools
-  as those assume direct access to configuration and databases, as well as direct
-  database access from the exercise itself.
-
-* If specific configuration needs to be present for the exercise to complete,
-  it should be staged in ``stack.sh``, or called from ``stack.sh``.
-
-* The ``OS_*`` environment variables should be the only ones used for all
-  authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
-
-.. _CLIAuth: https://wiki.openstack.org/CLIAuth
-
-* The exercise MUST clean up after itself if successful.  If it is not successful,
-  it is assumed that state will be left behind; this allows a chance for developers
-  to look around and attempt to debug the problem.  The exercise SHOULD clean up
-  or graciously handle possible artifacts left over from previous runs if executed
-  again.  It is acceptable to require a reboot or even a re-install of DevStack
-  to restore a clean test environment.
-
-
 Bash Style Guidelines
 ~~~~~~~~~~~~~~~~~~~~~
 DevStack defines a bash set of best practices for maintaining large
@@ -276,7 +188,7 @@
 list below is not complete for what bashate checks, nor is it all checked
 by bashate.  So many lines of code, so little time.
 
-.. _bashate: https://pypi.python.org/pypi/bashate
+.. _bashate: https://pypi.org/project/bashate/
 
 Whitespace Rules
 ----------------
@@ -362,9 +274,6 @@
   even years from now -- why we were motivated to make a change at the
   time.
 
-* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people
-  that should be added to reviews of various sub-systems.
-
 
 Making Changes, Testing, and CI
 -------------------------------
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
deleted file mode 100644
index d4968a6..0000000
--- a/MAINTAINERS.rst
+++ /dev/null
@@ -1,92 +0,0 @@
-MAINTAINERS
-===========
-
-
-Overview
---------
-
-The following is a list of people known to have interests in
-particular areas or sub-systems of devstack.
-
-It is a rather general guide intended to help seed the initial
-reviewers list of a change.  A +1 on a review from someone identified
-as being a maintainer of its affected area is a very positive flag to
-the core team for the veracity of the change.
-
-The ``devstack-core`` group can still be added to all reviews.
-
-
-Format
-~~~~~~
-
-The format of the file is the name of the maintainer and their
-gerrit-registered email.
-
-
-Maintainers
------------
-
-.. contents:: :local:
-
-
-Ceph
-~~~~
-
-* Sebastien Han <sebastien.han@enovance.com>
-
-Cinder
-~~~~~~
-
-Fedora/CentOS/RHEL
-~~~~~~~~~~~~~~~~~~
-
-* Ian Wienand <iwienand@redhat.com>
-
-Neutron
-~~~~~~~
-
-MidoNet
-~~~~~~~
-
-* Jaume Devesa <devvesa@gmail.com>
-* Ryu Ishimoto <ryu@midokura.com>
-* YAMAMOTO Takashi <yamamoto@midokura.com>
-
-OpenDaylight
-~~~~~~~~~~~~
-
-* Kyle Mestery <mestery@mestery.com>
-
-OpenFlow Agent (ofagent)
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
-* Fumihiko Kakuma <kakuma@valinux.co.jp>
-
-Swift
-~~~~~
-
-* Chmouel Boudjnah <chmouel@enovance.com>
-
-SUSE
-~~~~
-
-* Ralf Haferkamp <rhafer@suse.de>
-* Vincent Untz <vuntz@suse.com>
-
-Tempest
-~~~~~~~
-
-Xen
-~~~
-* Bob Ball <bob.ball@citrix.com>
-
-Zaqar (Marconi)
-~~~~~~~~~~~~~~~
-
-* Flavio Percoco <flaper87@gmail.com>
-* Malini Kamalambal <malini.kamalambal@rackspace.com>
-
-Oracle Linux
-~~~~~~~~~~~~
-* Wiekus Beukes <wiekus.beukes@oracle.com>
diff --git a/README.rst b/README.rst
index 6885546..f3a585a 100644
--- a/README.rst
+++ b/README.rst
@@ -38,7 +38,7 @@
 `stackrc` for the default set).  Usually just before a release there will be
 milestone-proposed branches that need to be tested::
 
-    GLANCE_REPO=git://git.openstack.org/openstack/glance.git
+    GLANCE_REPO=https://opendev.org/openstack/glance.git
     GLANCE_BRANCH=milestone-proposed
 
 Start A Dev Cloud
diff --git a/clean.sh b/clean.sh
index 2333596..870dfd4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -113,7 +113,7 @@
 cleanup_database
 
 # Clean out data and status
-sudo rm -rf $DATA_DIR $DEST/status
+sudo rm -rf $DATA_DIR $DEST/status $DEST/async
 
 # Clean out the log file and log directories
 if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then
@@ -122,16 +122,11 @@
 if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then
     sudo rm -rf $LOGDIR
 fi
-if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then
-    sudo rm -rf $SCREEN_LOGDIR
-fi
 
-# Clean out the sytemd user unit files if systemd was used.
-if [[ "$USE_SYSTEMD" = "True" ]]; then
-    sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
-    # Make systemd aware of the deletion.
-    $SYSTEMCTL daemon-reload
-fi
+# Clean out the systemd unit files.
+sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
+# Make systemd aware of the deletion.
+$SYSTEMCTL daemon-reload
 
 # Clean up venvs
 DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack"
@@ -150,12 +145,5 @@
 
 rm -rf ~/.config/openstack
 
-# Clean up all *.pyc files
-if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
-    find_version=`find --version | awk '{ print $NF; exit}'`
-    if vercmp "$find_version" "<" "4.2.3" ; then
-        sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm
-    else
-        sudo find $DEST -name "*.pyc" -delete
-    fi
-fi
+# Clear any fstab entries made
+sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..ffce3ff
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,11 @@
+pbr>=2.0.0,!=2.1.0
+
+Pygments
+docutils
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
+nwdiag
+blockdiag
+sphinxcontrib-blockdiag
+sphinxcontrib-nwdiag
+zuul-sphinx>=0.2.0
diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf
new file mode 120000
index 0000000..cfc2a4e
--- /dev/null
+++ b/doc/source/assets/local.conf
@@ -0,0 +1 @@
+../../../samples/local.conf
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 780237f..2e17da1 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,9 +11,6 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys
-import os
-
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -26,13 +23,22 @@
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ]
+extensions = [ 'sphinx.ext.autodoc',
+               'zuul_sphinx',
+               'openstackdocstheme',
+               'sphinxcontrib.blockdiag',
+               'sphinxcontrib.nwdiag' ]
 
 # openstackdocstheme options
-repository_name = 'openstack-dev/devstack'
-bug_project = 'devstack'
-bug_tag = ''
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack-dev/devstack'
+openstackdocs_pdf_link = True
+openstackdocs_bug_project = 'devstack'
+openstackdocs_bug_tag = ''
+openstackdocs_auto_name = False
+# This repo is not tagged, so don't set versions
+openstackdocs_auto_version = False
+version = ''
+release = ''
 
 todo_include_todos = True
 
@@ -81,7 +87,7 @@
 show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
 
 # A list of ignored prefixes for module index sorting.
 modindex_common_prefix = ['DevStack-doc.']
@@ -119,11 +125,6 @@
 # pixels large.
 #html_favicon = None
 
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
-
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
 #html_use_smartypants = True
@@ -167,21 +168,10 @@
 
 # -- Options for LaTeX output --------------------------------------------------
 
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'DevStack-doc.tex', u'DevStack Docs',
+  ('index', 'doc-devstack.tex', u'DevStack Docs',
    u'OpenStack DevStack Team', 'manual'),
 ]
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 23f680a..b4fff4f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -41,6 +41,7 @@
 -  **extra** - runs after services are started and before any files in
    ``extra.d`` are executed
 -  **post-extra** - runs after files in ``extra.d`` are executed
+-  **test-config** - runs after tempest (and plugins) are configured
 
 The file is processed strictly in sequence; meta-sections may be
 specified more than once but if any settings are duplicated the last to
@@ -204,7 +205,7 @@
 Historically DevStack obtained all local configuration and
 customizations from a ``localrc`` file.  In Oct 2013 the
 ``local.conf`` configuration method was introduced (in `review 46768
-<https://review.openstack.org/#/c/46768/>`__) to simplify this
+<https://review.opendev.org/#/c/46768/>`__) to simplify this
 process.
 
 Configuration Notes
@@ -223,25 +224,22 @@
 from a different repo for testing, such as a Gerrit branch
 proposal. ``GIT_BASE`` points to the primary repository server.
 
-    ::
+::
 
-        NOVA_REPO=$GIT_BASE/openstack/nova.git
-        NOVA_BRANCH=master
+    NOVA_REPO=$GIT_BASE/openstack/nova.git
+    NOVA_BRANCH=master
 
 To pull a branch directly from Gerrit, get the repo and branch from
-the Gerrit review page:
+the Gerrit review page::
 
-    ::
+    git fetch https://review.opendev.org/openstack/nova \
+        refs/changes/50/5050/1 && git checkout FETCH_HEAD
 
-        git fetch https://review.openstack.org/p/openstack/nova refs/changes/50/5050/1 && git checkout FETCH_HEAD
+The repo is the stanza following ``fetch`` and the branch is the
+stanza following that::
 
-    The repo is the stanza following ``fetch`` and the branch is the
-    stanza following that:
-
-    ::
-
-        NOVA_REPO=https://review.openstack.org/p/openstack/nova
-        NOVA_BRANCH=refs/changes/50/5050/1
+    NOVA_REPO=https://review.opendev.org/openstack/nova
+    NOVA_BRANCH=refs/changes/50/5050/1
 
 
 Installation Directory
@@ -254,13 +252,15 @@
 later variables.  It can be useful to set it even though it is not
 changed from the default value.
 
-    ::
+::
 
-        DEST=/opt/stack
+    DEST=/opt/stack
 
 Logging
 -------
 
+.. _enable_logging:
+
 Enable Logging
 ~~~~~~~~~~~~~~
 
@@ -270,21 +270,33 @@
 timestamp will be appended to the given filename for each run of
 ``stack.sh``.
 
-    ::
+::
 
-        LOGFILE=$DEST/logs/stack.sh.log
+    LOGFILE=$DEST/logs/stack.sh.log
 
 Old log files are cleaned automatically if ``LOGDAYS`` is set to the
 number of days of old log files to keep.
 
-    ::
+::
 
-        LOGDAYS=1
+    LOGDAYS=1
 
 Some coloring is used during the DevStack runs to make it easier to
 see what is going on. This can be disabled with::
 
-        LOG_COLOR=False
+    LOG_COLOR=False
+
+When using the logfile, by default logs are sent to the console and
+the file.  You can set ``VERBOSE`` to ``false`` if you only wish the
+logs to be sent to the file (this may avoid having double-logging in
+some cases where you are capturing the script output and the log
+files).  If ``VERBOSE`` is ``true`` you can additionally set
+``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each
+output line sent to the console.  This can be useful in some
+situations where the console output is being captured by a runner or
+framework (e.g. Ansible) that adds its own timestamps.  Note that the
+log lines sent to the ``LOGFILE`` will still be prefixed with a
+timestamp.
 
 Logging the Service Output
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -294,7 +306,7 @@
 
 To query the logs use the ``journalctl`` command, such as::
 
-  journalctl --unit devstack@*
+  sudo journalctl --unit devstack@*
 
 More examples can be found in :ref:`journalctl-examples`.
 
@@ -304,12 +316,12 @@
 For example, non-interactive installs probably wish to save output to
 a file, keep service logs and disable color in the stored files.
 
-   ::
+::
 
-       [[local|localrc]]
-       DEST=/opt/stack/
-       LOGFILE=$LOGDIR/stack.sh.log
-       LOG_COLOR=False
+   [[local|localrc]]
+   DEST=/opt/stack/
+   LOGFILE=$LOGDIR/stack.sh.log
+   LOG_COLOR=False
 
 Database Backend
 ----------------
@@ -317,12 +329,10 @@
 Multiple database backends are available. The available databases are defined
 in the lib/databases directory.
 ``mysql`` is the default database, choose a different one by putting the
-following in the ``localrc`` section:
+following in the ``localrc`` section::
 
-   ::
-
-      disable_service mysql
-      enable_service postgresql
+  disable_service mysql
+  enable_service postgresql
 
 ``mysql`` is the default database.
 
@@ -334,11 +344,9 @@
 RabbitMQ is handled via the usual service functions and
 ``ENABLED_SERVICES``.
 
-Example disabling RabbitMQ in ``local.conf``:
+Example disabling RabbitMQ in ``local.conf``::
 
-::
-
-    disable_service rabbit
+  disable_service rabbit
 
 
 Apache Frontend
@@ -357,34 +365,23 @@
 
 Keystone is run under Apache with ``mod_wsgi`` by default.
 
-Example (Keystone)
-
-::
+Example (Keystone)::
 
     KEYSTONE_USE_MOD_WSGI="True"
 
-Example (Nova):
-
-::
+Example (Nova)::
 
     NOVA_USE_MOD_WSGI="True"
 
-Example (Swift):
-
-::
+Example (Swift)::
 
     SWIFT_USE_MOD_WSGI="True"
 
-Example (Heat):
-
-::
+Example (Heat)::
 
     HEAT_USE_MOD_WSGI="True"
 
-
-Example (Cinder):
-
-::
+Example (Cinder)::
 
     CINDER_USE_MOD_WSGI="True"
 
@@ -400,9 +397,9 @@
 git trees by specifying it in ``LIBS_FROM_GIT``.  Multiple libraries
 can be specified as a comma separated list.
 
-   ::
+::
 
-      LIBS_FROM_GIT=python-keystoneclient,oslo.config
+  LIBS_FROM_GIT=python-keystoneclient,oslo.config
 
 Setting the variable to ``ALL`` will activate the download for all
 libraries.
@@ -418,9 +415,9 @@
 of a venv to be used for the project.  The array index is the project
 name.  Multiple projects can use the same venv if desired.
 
-  ::
+::
 
-    PROJECT_VENV["glance"]=${GLANCE_DIR}.venv
+  PROJECT_VENV["glance"]=${GLANCE_DIR}.venv
 
 ``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional
 packages to be installed into each venv.  Often projects will not have
@@ -429,10 +426,9 @@
 configurations.  By default, the enabled databases will have their
 Python bindings added when they are enabled.
 
-  ::
+::
 
-     ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
-
+  ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
 
 A clean install every time
 --------------------------
@@ -442,9 +438,9 @@
 ``RECLONE`` is set to ``yes``. This avoids having to manually remove
 repos in order to get the current branch from ``$GIT_BASE``.
 
-    ::
+::
 
-        RECLONE=yes
+  RECLONE=yes
 
 Upgrade packages installed by pip
 ---------------------------------
@@ -455,9 +451,9 @@
 required Python packages will be upgraded to the most recent version
 that matches requirements.
 
-    ::
+::
 
-        PIP_UPGRADE=True
+  PIP_UPGRADE=True
 
 Guest Images
 ------------
@@ -471,11 +467,11 @@
 these default images; in that case, you will want to populate
 ``IMAGE_URLS`` with sufficient images to satisfy testing-requirements.
 
-    ::
+::
 
-        DOWNLOAD_DEFAULT_IMAGES=False
-        IMAGE_URLS="http://foo.bar.com/image.qcow,"
-        IMAGE_URLS+="http://foo.bar.com/image2.qcow"
+  DOWNLOAD_DEFAULT_IMAGES=False
+  IMAGE_URLS="http://foo.bar.com/image.qcow,"
+  IMAGE_URLS+="http://foo.bar.com/image2.qcow"
 
 
 Instance Type
@@ -490,13 +486,13 @@
 default flavors instead.
 
 KVM on Power with QEMU 2.4 requires 512 MB to load the firmware -
-`QEMU 2.4 - PowerPC <http://wiki.qemu.org/ChangeLog/2.4>`__ so users
+`QEMU 2.4 - PowerPC <https://wiki.qemu.org/ChangeLog/2.4>`__ so users
 running instances on ppc64/ppc64le can choose one of the default
 created flavors as follows:
 
-    ::
+::
 
-        DEFAULT_INSTANCE_TYPE=m1.tiny
+  DEFAULT_INSTANCE_TYPE=m1.tiny
 
 
 IP Version
@@ -507,19 +503,19 @@
 either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
 respectively.
 
-    ::
+::
 
-        IP_VERSION=4+6
+  IP_VERSION=4+6
 
 The following optional variables can be used to alter the default IPv6
 behavior:
 
-    ::
+::
 
-        IPV6_RA_MODE=slaac
-        IPV6_ADDRESS_MODE=slaac
-        IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
-        IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
+  IPV6_RA_MODE=slaac
+  IPV6_ADDRESS_MODE=slaac
+  IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
+  IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
 
 *Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
 can be configured with any valid IPv6 prefix. The default values make
@@ -542,11 +538,9 @@
 
 The default value for this setting is ``4``.  Dual-mode support, for
 example ``4+6`` is not currently supported.  ``HOST_IPV6`` can
-optionally be used to alter the default IPv6 address
+optionally be used to alter the default IPv6 address::
 
-    ::
-
-        HOST_IPV6=${some_local_ipv6_address}
+  HOST_IPV6=${some_local_ipv6_address}
 
 Multi-node setup
 ~~~~~~~~~~~~~~~~
@@ -604,8 +598,8 @@
 Swift S3
 ++++++++
 
-If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will
-install the swift3 middleware emulation. Swift will be configured to
+If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will
+install the s3api middleware emulation. Swift will be configured to
 act as a S3 endpoint for Keystone so effectively replacing the
 ``nova-objectstore``.
 
@@ -634,27 +628,6 @@
     INSTALL_TEMPEST=True
 
 
-Xenserver
-~~~~~~~~~
-
-If you would like to use Xenserver as the hypervisor, please refer to
-the instructions in ``./tools/xen/README.md``.
-
-Cells
-~~~~~
-
-`Cells <http://wiki.openstack.org/blueprint-nova-compute-cells>`__ is
-an alternative scaling option.  To setup a cells environment add the
-following to your ``localrc`` section:
-
-::
-
-    enable_service n-cell
-
-Be aware that there are some features currently missing in cells, one
-notable one being security groups.  The exercises have been patched to
-disable functionality not supported by cells.
-
 Cinder
 ~~~~~~
 
@@ -663,11 +636,17 @@
 ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
 with ``VOLUME_BACKING_FILE_SIZE``.
 
-    ::
+::
 
-        VOLUME_GROUP_NAME="stack-volumes"
-        VOLUME_NAME_PREFIX="volume-"
-        VOLUME_BACKING_FILE_SIZE=10250M
+  VOLUME_GROUP_NAME="stack-volumes"
+  VOLUME_NAME_PREFIX="volume-"
+  VOLUME_BACKING_FILE_SIZE=24G
+
+When running highly concurrent tests, the default per-project quotas
+for volumes, backups, or snapshots may be too small.  These can be
+adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``,
+or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value.  (The default for
+each is 10.)
 
 
 Keystone
@@ -693,7 +672,6 @@
 
     disable_service horizon
     KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
-    KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
     REGION_NAME=RegionTwo
     KEYSTONE_REGION_NAME=RegionOne
 
@@ -716,43 +694,23 @@
 
     ENABLE_IDENTITY_V2=False
 
-Exercises
-~~~~~~~~~
 
-``exerciserc`` is used to configure settings for the exercise scripts.
-The values shown below are the default values. These can all be
-overridden by setting them in the ``localrc`` section.
+Glance
+++++++
 
-* Max time to wait while vm goes from build to active state
+The default image size quota of 1GiB may be too small if larger images
+are to be used. Change the default at setup time with:
 
-    ::
+::
 
-        ACTIVE_TIMEOUT==30
+    GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000
 
-* Max time to wait for proper IP association and dis-association.
+or at runtime via:
 
-    ::
+::
 
-        ASSOCIATE_TIMEOUT=15
-
-* Max time till the vm is bootable
-
-    ::
-
-        BOOT_TIMEOUT=30
-
-* Max time from run instance command until it is running
-
-    ::
-
-        RUNNING_TIMEOUT=$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))
-
-* Max time to wait for a vm to terminate
-
-    ::
-
-        TERMINATE_TIMEOUT=30
-
+    openstack --os-cloud devstack-system-admin registered limit update \
+      --service glance --default-limit 5000 --region RegionOne image_size_total
 
 .. _arch-configuration:
 
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..4de238f
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,56 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Devstack.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at OFTC.
+* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses)
+  http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Devstack Core Team
+<https://review.opendev.org/#/admin/groups/50,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Devstack features are tracked on `Launchpad BP <https://blueprints.launchpad.net/devstack>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/devstack>`_.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/devstack/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Devstack require two ``Code-Review +2`` votes from
+Devstack core reviewers before one of the core reviewers can approve the patch
+by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
+which can be approved by single core reviewers.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
new file mode 100644
index 0000000..fd0d9cd
--- /dev/null
+++ b/doc/source/debugging.rst
@@ -0,0 +1,46 @@
+=====================
+System-wide debugging
+=====================
+
+A lot can go wrong during a devstack run, and there are a few inbuilt
+tools to help you.
+
+dstat
+-----
+
+Enable the ``dstat`` service to produce performance logs during the
+devstack run.  These will be logged to the journal and also as a CSV
+file.
+
+memory_tracker
+--------------
+
+The ``memory_tracker`` service periodically monitors RAM usage and
+provides consumption output when available memory is seen to be
+falling (i.e. processes are consuming memory).  It also provides
+output showing locked (unswappable) memory.
+
+tcpdump
+-------
+
+Enable the ``tcpdump`` service to run a background tcpdump.  You must
+set the ``TCPDUMP_ARGS`` variable to something suitable (there is no
+default).  For example, to trace iSCSI communication during a job in
+the OpenStack gate and copy the result into the log output, you might
+use:
+
+.. code-block:: yaml
+
+   job:
+     name: devstack-job
+     parent: devstack
+     vars:
+       devstack_services:
+         tcpdump: true
+       devstack_localrc:
+         TCPDUMP_ARGS: "-i any tcp port 3260"
+       zuul_copy_output:
+         '{{ devstack_log_dir }}/tcpdump.pcap': logs
+
+
+
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index ed9b4da..8214de0 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -18,6 +18,57 @@
 Your best choice is probably to choose a `distribution of OpenStack
 <https://www.openstack.org/marketplace/distros/>`__.
 
+Can I use DevStack as a development environment?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sure, you can. That said, there are a couple of things you should note before
+doing so:
+
+- DevStack makes a lot of configuration changes to your system and should not
+  be run in your main development environment.
+
+- All the repositories that DevStack clones when deploying are considered
+  volatile by default and thus are subject to hard resets. This is necessary to
+  keep you in sync with the latest upstream, which is what you want in a CI
+  situation, but it can result in branches being overwritten and files being
+  removed.
+
+  The corollary of this is that if you are working on a specific project, using
+  the DevStack project repository (defaulted to ``/opt/stack/<project>``) as
+  the single master repository for storing all your work is not recommended.
+  This behavior can be overridden by setting the ``RECLONE`` config option to
+  ``no``.  Alternatively, you can avoid running ``stack.sh`` to redeploy by
+  restarting services manually. In any case, you should generally ensure work
+  in progress is pushed to Gerrit or otherwise backed up before running
+  ``stack.sh``.
+
+- If you use DevStack within a VM, you may wish to mount a local OpenStack
+  directory, such as ``~/src/openstack``, inside the VM and configure DevStack
+  to use this as the clone location using the ``{PROJECT}_REPO`` config
+  variables. For example, assuming you're using Vagrant and sharing your home
+  directory, you should place the following in ``local.conf``:
+
+  .. code-block:: shell
+
+     NEUTRON_REPO=/home/vagrant/src/neutron
+     NOVA_REPO=/home/vagrant/src/nova
+     KEYSTONE_REPO=/home/vagrant/src/keystone
+     GLANCE_REPO=/home/vagrant/src/glance
+     SWIFT_REPO=/home/vagrant/src/swift
+     HORIZON_REPO=/home/vagrant/src/horizon
+     CINDER_REPO=/home/vagrant/src/cinder
+     HEAT_REPO=/home/vagrant/src/heat
+     TEMPEST_REPO=/home/vagrant/src/tempest
+     HEATCLIENT_REPO=/home/vagrant/src/python-heatclient
+     GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient
+     NOVACLIENT_REPO=/home/vagrant/src/python-novaclient
+     NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient
+     OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient
+     HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools
+     HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates
+     NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas
+     # ...
+
 Why a shell script, why not chef/puppet/...
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -29,8 +80,7 @@
 ~~~~~~~~~~~~~~~~~
 
 That isn't a question, but please do! The source for DevStack is at
-`git.openstack.org
-<https://git.openstack.org/cgit/openstack-dev/devstack>`__ and bug
+`opendev.org <https://opendev.org/openstack/devstack>`__ and bug
 reports go to `LaunchPad
 <https://bugs.launchpad.net/devstack/>`__. Contributions follow the
 usual process as described in the `developer guide
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index c2c7b91..e7ec629 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -10,6 +10,7 @@
 
 .. toctree::
    :glob:
+   :hidden:
    :maxdepth: 1
 
    guides/single-vm
@@ -20,6 +21,7 @@
    guides/devstack-with-nested-kvm
    guides/nova
    guides/devstack-with-lbaas-v2
+   guides/devstack-with-ldap
 
 All-In-One Single VM
 --------------------
@@ -66,3 +68,13 @@
 --------------------------------
 
 Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
+
+Configure Load-Balancer Version 2
+-----------------------------------
+
+Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+
+Deploying DevStack with LDAP
+----------------------------
+
+Guide to setting up :doc:`DevStack with LDAP <guides/devstack-with-ldap>`.
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 3592844..5d96ca7 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -1,39 +1,51 @@
-Configure Load-Balancer Version 2
-=================================
+Devstack with Octavia Load Balancing
+====================================
 
-Starting in the OpenStack Liberty release, the
-`neutron LBaaS v2 API <http://developer.openstack.org/api-ref-networking-v2-ext.html>`_
-is now stable while the LBaaS v1 API has been deprecated.  The LBaaS v2 reference
-driver is based on Octavia.
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
 
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
 
 Phase 1: Create DevStack + 2 nova instances
 --------------------------------------------
 
 First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find useful.
+make sure it is updated. Install git and any other developer tools you find
+useful.
 
 Install devstack
 
-  ::
+::
 
-    git clone https://git.openstack.org/openstack-dev/devstack
-    cd devstack
+    git clone https://opendev.org/openstack/devstack
+    cd devstack/tools
+    sudo ./create-stack-user.sh
+    cd ../..
+    sudo mv devstack /opt/stack
+    sudo chown -R stack.stack /opt/stack/devstack
 
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
 
-Edit your ``local.conf`` to look like
+Edit your ``/opt/stack/devstack/local.conf`` to look like
 
-  ::
+::
 
     [[local|localrc]]
-    # Load the external LBaaS plugin.
-    enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
-    enable_plugin octavia https://git.openstack.org/openstack/octavia
+    enable_plugin octavia https://opendev.org/openstack/octavia
+    # If you are enabling horizon, include the octavia dashboard
+    # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git
+    # If you are enabling barbican for TLS offload in Octavia, include it here.
+    # enable_plugin barbican https://opendev.org/openstack/barbican
 
     # ===== BEGIN localrc =====
     DATABASE_PASSWORD=password
     ADMIN_PASSWORD=password
     SERVICE_PASSWORD=password
+    SERVICE_TOKEN=password
     RABBIT_PASSWORD=password
     # Enable Logging
     LOGFILE=$DEST/logs/stack.sh.log
@@ -41,27 +53,30 @@
     LOG_COLOR=True
     # Pre-requisite
     ENABLED_SERVICES=rabbit,mysql,key
-    # Horizon
-    ENABLED_SERVICES+=,horizon
+    # Horizon - enable for the OpenStack web GUI
+    # ENABLED_SERVICES+=,horizon
     # Nova
-    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch
+    ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
+    ENABLED_SERVICES+=,placement-api,placement-client
     # Glance
-    ENABLED_SERVICES+=,g-api,g-reg
+    ENABLED_SERVICES+=,g-api
     # Neutron
-    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
-    # Enable LBaaS v2
-    ENABLED_SERVICES+=,q-lbaasv2
+    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
     ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
     # Cinder
     ENABLED_SERVICES+=,c-api,c-vol,c-sch
     # Tempest
     ENABLED_SERVICES+=,tempest
+    # Barbican - Optionally used for TLS offload in Octavia
+    # ENABLED_SERVICES+=,barbican
     # ===== END localrc =====
 
 Run stack.sh and do some sanity checks
 
-  ::
+::
 
+    sudo su - stack
+    cd /opt/stack/devstack
     ./stack.sh
     . ./openrc
 
@@ -69,41 +84,62 @@
 
 Create two nova instances that we can use as test http servers:
 
-  ::
+::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
-    nova list # should show the nova instances just created
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+    openstack server list # should show the nova instances just created
 
     #add secgroup rules to allow ssh etc..
     openstack security group rule create default --protocol icmp
     openstack security group rule create default --protocol tcp --dst-port 22:22
     openstack security group rule create default --protocol tcp --dst-port 80:80
 
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
+Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
 
- ::
+::
 
     MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
     while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
 
-Phase 2: Create your load balancers
-------------------------------------
+Phase 2: Create your load balancer
+----------------------------------
 
- ::
+Make sure you have the 'openstack loadbalancer' commands:
 
-    neutron lbaas-loadbalancer-create --name lb1 private-subnet
-    neutron lbaas-loadbalancer-show lb1  # Wait for the provisioning_status to be ACTIVE.
-    neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
-    sleep 10  # Sleep since LBaaS actions can take a few seconds depending on the environment.
-    neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-    sleep 10
-    neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1
-    sleep 10
-    neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1
+::
 
-Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes
-(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be
-reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is
-"curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
+    pip install python-octaviaclient
+
+Create your load balancer:
+
+::
+
+    openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+    openstack loadbalancer show lb1 # Note the vip_address
+    curl http://<vip_address>
+    curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst
new file mode 100644
index 0000000..4c54723
--- /dev/null
+++ b/doc/source/guides/devstack-with-ldap.rst
@@ -0,0 +1,174 @@
+============================
+Deploying DevStack with LDAP
+============================
+
+The OpenStack Identity service has the ability to integrate with LDAP. The goal
+of this guide is to walk you through setting up an LDAP-backed OpenStack
+development environment.
+
+Introduction
+============
+
+LDAP support in keystone is read-only. You can use it to back an entire
+OpenStack deployment to a single LDAP server, or you can use it to back
+separate LDAP servers to specific keystone domains. Users within those domains
+can authenticate against keystone, assume role assignments, and interact with
+other OpenStack services.
+
+Configuration
+=============
+
+To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of
+``ENABLED_SERVICES`` in the ``local.conf`` file::
+
+    enable_service ldap
+
+Devstack will require a password to set up an LDAP administrator. This
+administrative user is also the bind user specified in keystone's configuration
+files, similar to a ``keystone`` user for MySQL databases.
+
+Devstack will prompt you for a password when running ``stack.sh`` if
+``LDAP_PASSWORD`` is not set. You can add the following to your
+``local.conf``::
+
+    LDAP_PASSWORD=super_secret_password
+
+At this point, devstack should have everything it needs to deploy OpenLDAP,
+bootstrap it with a minimal set of users, and configure it to back to a domain
+in keystone. You can do this by running the ``stack.sh`` script::
+
+    $ ./stack.sh
+
+Once ``stack.sh`` completes, you should have a running keystone deployment with
+a basic set of users. It is important to note that not all users will live
+within LDAP. Instead, keystone will back different domains to different
+identity sources. For example, the ``default`` domain will be backed by MySQL.
+This is usually where you'll find your administrative and services users. If
+you query keystone for a list of domains, you should see a domain called
+``Users``. This domain is set up by devstack and points to OpenLDAP.
+
+User Management
+===============
+
+Initially, there will only be two users in the LDAP server. The ``Manager``
+user is used by keystone to talk to OpenLDAP. The ``demo`` user is a generic
+user that you should be able to see if you query keystone for users within the
+``Users`` domain. Both of these users were added to LDAP using basic LDAP
+utilities installed by devstack (e.g. ``ldap-utils``) and LDIFs. The LDIFs used
+to create these users can be found in ``devstack/files/ldap/``.
+
+Listing Users
+-------------
+
+To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP
+user bootstrapped by devstack::
+
+    $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+        -H ldap://localhost -b dc=openstack,dc=org
+
+As you can see, devstack creates an OpenStack domain called ``openstack.org``
+as a container for the ``Manager`` and ``demo`` users.
+
+Creating Users
+--------------
+
+Since keystone's LDAP integration is read-only, users must be added directly to
+LDAP. Users added directly to OpenLDAP will automatically be placed into the
+``Users`` domain.
+
+LDIFs can be used to add users via the command line. The following is an
+example LDIF that can be used to create a new LDAP user, let's call it
+``peter.ldif.in``::
+
+    dn: cn=peter,ou=Users,dc=openstack,dc=org
+    cn: peter
+    displayName: Peter Quill
+    givenName: Peter Quill
+    mail: starlord@openstack.org
+    objectClass: inetOrgPerson
+    objectClass: top
+    sn: peter
+    uid: peter
+    userPassword: im-a-better-pilot-than-rocket
+
+Now, we use the ``Manager`` user to create a user for Peter in LDAP::
+
+    $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+        -H ldap://localhost -c -f peter.ldif.in
+
+We should be able to assign Peter roles on projects. After Peter has some level
+of authorization, he should be able to login to Horizon by specifying the
+``Users`` domain and using his ``peter`` username and password. Authorization
+can be given to Peter by creating a project within the ``Users`` domain and
+giving him a role assignment on that project::
+
+    $ openstack project create --domain Users awesome-mix-vol-1
+    +-------------+----------------------------------+
+    | Field       | Value                            |
+    +-------------+----------------------------------+
+    | description |                                  |
+    | domain_id   | 61a2de23107c46bea2d758167af707b9 |
+    | enabled     | True                             |
+    | id          | 7d422396d54945cdac8fe1e8e32baec4 |
+    | is_domain   | False                            |
+    | name        | awesome-mix-vol-1                |
+    | parent_id   | 61a2de23107c46bea2d758167af707b9 |
+    | tags        | []                               |
+    +-------------+----------------------------------+
+    $ openstack role add --user peter --user-domain Users \
+          --project awesome-mix-vol-1 --project-domain Users admin
+
+
+Deleting Users
+--------------
+
+We can use the same basic steps to remove users from LDAP, but instead of using
+LDIFs, we can just pass the ``dn`` of the user we want to delete::
+
+    $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+        -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org
+
+Group Management
+================
+
+Like users, groups are considered specific identities. This means that groups
+also fall under the same read-only constraints as users and they can be managed
+directly with LDAP in the same way users are with LDIFs.
+
+Adding Groups
+-------------
+
+Let's define a specific group with the following LDIF::
+
+    dn: cn=guardians,ou=UserGroups,dc=openstack,dc=org
+    objectClass: groupOfNames
+    cn: guardians
+    description: Guardians of the Galaxy
+    member: cn=peter,dc=openstack,dc=org
+    member: cn=gamora,dc=openstack,dc=org
+    member: cn=drax,dc=openstack,dc=org
+    member: cn=rocket,dc=openstack,dc=org
+    member: cn=groot,dc=openstack,dc=org
+
+We can create the group using the same ``ldapadd`` command as we did with
+users::
+
+    $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+        -H ldap://localhost -c -f guardian-group.ldif.in
+
+If we check the group membership in Horizon, we'll see that only Peter is a
+member of the ``guardians`` group, despite the whole crew being specified in
+the LDIF. Once those accounts are created in LDAP, they will automatically be
+added to the ``guardians`` group. They will also assume any role assignments
+given to the ``guardians`` group.
+
+Deleting Groups
+---------------
+
+Just like users, groups can be deleted using the ``dn``::
+
+    $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+        -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org
+
+Note that this operation will not remove users within that group. It will only
+remove the group itself and the memberships any users had with that group.
diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst
index 9549ed2..dcaa416 100644
--- a/doc/source/guides/lxc.rst
+++ b/doc/source/guides/lxc.rst
@@ -105,7 +105,7 @@
 
    ::
 
-       git clone https://git.openstack.org/openstack-dev/devstack
+       git clone https://opendev.org/openstack/devstack
 
 #. Configure
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index b4e2891..c0b3f58 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -103,7 +103,7 @@
 
 ::
 
-    git clone https://git.openstack.org/openstack-dev/devstack
+    git clone https://opendev.org/openstack/devstack
     cd devstack
 
 Up to this point all of the steps apply to each node in the cluster.
@@ -120,11 +120,8 @@
 
     [[local|localrc]]
     HOST_IP=192.168.42.11
-    FLAT_INTERFACE=eth0
     FIXED_RANGE=10.4.128.0/20
-    FIXED_NETWORK_SIZE=4096
     FLOATING_RANGE=192.168.42.128/25
-    MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
     DATABASE_PASSWORD=supersecret
@@ -160,11 +157,8 @@
 
     [[local|localrc]]
     HOST_IP=192.168.42.12 # change this per compute node
-    FLAT_INTERFACE=eth0
     FIXED_RANGE=10.4.128.0/20
-    FIXED_NETWORK_SIZE=4096
     FLOATING_RANGE=192.168.42.128/25
-    MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
     DATABASE_PASSWORD=supersecret
@@ -175,17 +169,12 @@
     MYSQL_HOST=$SERVICE_HOST
     RABBIT_HOST=$SERVICE_HOST
     GLANCE_HOSTPORT=$SERVICE_HOST:9292
-    ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client
+    ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client
     NOVA_VNC_ENABLED=True
-    NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
+    NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
     VNCSERVER_LISTEN=$HOST_IP
     VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
 
-**Note:** the ``n-api-meta`` service is a version of the api server
-that only serves the metadata service. It's needed because the
-computes created won't have a routing path to the metadata service on
-the controller.
-
 Fire up OpenStack:
 
 ::
@@ -240,8 +229,8 @@
     sudo rm -rf /etc/libvirt/qemu/inst*
     sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy
 
-Options pimp your stack
-=======================
+Going further
+=============
 
 Additional Users
 ----------------
@@ -302,10 +291,10 @@
 
 DevStack will automatically use an existing LVM volume group named
 ``stack-volumes`` to store cloud-created volumes. If ``stack-volumes``
-doesn't exist, DevStack will set up a 10Gb loop-mounted file to contain
-it. This obviously limits the number and size of volumes that can be
-created inside OpenStack. The size can be overridden by setting
-``VOLUME_BACKING_FILE_SIZE`` in ``local.conf``.
+doesn't exist, DevStack will set up a loop-mounted file to contain
+it.  If the default size is insufficient for the number and size of volumes
+required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in
+``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``).
 
 ``stack-volumes`` can be pre-created on any physical volume supported by
 Linux's LVM. The name of the volume group can be changed by setting
@@ -369,17 +358,6 @@
 Notes stuff you might need to know
 ==================================
 
-Reset the Bridge
-----------------
-
-How to reset the bridge configuration:
-
-::
-
-    sudo brctl delif br100 eth0.926
-    sudo ip link set dev br100 down
-    sudo brctl delbr br100
-
 Set MySQL Password
 ------------------
 
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 092809a..2c25a1c 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -244,7 +244,7 @@
 
     ## Neutron options
     PUBLIC_INTERFACE=eth0
-    ENABLED_SERVICES=n-cpu,rabbit,q-agt
+    ENABLED_SERVICES=n-cpu,rabbit,q-agt,placement-client
 
 Network traffic from `eth0` on the compute nodes is then NAT'd by the
 controller node that runs Neutron's `neutron-l3-agent` and provides L3
@@ -376,8 +376,8 @@
 
         ## Neutron options
         Q_USE_SECGROUP=True
-        ENABLE_PROJECT_VLANS=True
-        PROJECT_VLAN_RANGE=3001:4000
+        ENABLE_TENANT_VLANS=True
+        TENANT_VLAN_RANGE=3001:4000
         PHYSICAL_NETWORK=default
         OVS_PHYSICAL_BRIDGE=br-ex
 
@@ -396,7 +396,7 @@
 
 In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a
 publicly routed IPv4 subnet. In this specific instance we are using
-the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
+the special TEST-NET-3 subnet defined in `RFC 5737 <https://tools.ietf.org/html/rfc5737>`_,
 which is used for documentation.  In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE
 would be a public IP address range that you or your organization has
 allocated to you, so that you could access your instances from the
@@ -567,7 +567,7 @@
     Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
     Q_USE_PROVIDER_NETWORKING=True
 
-    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    enable_plugin neutron https://opendev.org/openstack/neutron
 
     ## MacVTap agent options
     Q_AGENT=macvtap
@@ -622,7 +622,7 @@
 
     # Services that a compute node runs
     disable_all_services
-    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    enable_plugin neutron https://opendev.org/openstack/neutron
     ENABLED_SERVICES+=n-cpu,q-agt
 
     ## MacVTap agent options
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 0f105d7..5b42797 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -10,7 +10,7 @@
 ================
 
 In Juno, nova implemented a `spec
-<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
+<https://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
 to allow read/write access to the serial console of an instance via
 `nova-serialproxy
 <https://docs.openstack.org/nova/latest/cli/nova-serialproxy.html>`_.
@@ -63,8 +63,74 @@
 Enabling the service is enough to be functional for a single machine DevStack.
 
 These config options are defined in `nova.conf.serial_console
-<https://github.com/openstack/nova/blob/master/nova/conf/serial_console.py>`_.
+<https://opendev.org/openstack/nova/src/master/nova/conf/serial_console.py>`_.
 
 For more information on OpenStack configuration see the `OpenStack
 Compute Service Configuration Reference
 <https://docs.openstack.org/nova/latest/admin/configuration/index.html>`_
+
+
+Fake virt driver
+================
+
+Nova has a `fake virt driver`_ which can be used for scale testing the control
+plane services or testing "move" operations between fake compute nodes, for
+example cold/live migration, evacuate and unshelve.
+
+The fake virt driver does not communicate with any hypervisor, it just reports
+some fake resource inventory values and keeps track of the state of the
+"guests" created, moved and deleted. It is not feature-complete with the
+compute API but is good enough for most API testing, and is also used within
+the nova functional tests themselves so is fairly robust.
+
+.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py
+
+Configuration
+-------------
+
+Set the following in your devstack ``local.conf``:
+
+.. code-block:: ini
+
+  [[local|localrc]]
+  VIRT_DRIVER=fake
+  NUMBER_FAKE_NOVA_COMPUTE=<number>
+
+The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake
+``nova-compute`` services to run and defaults to 1.
+
+When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in
+nova and neutron automatically. However, other services, like cinder, will
+still enforce quota limits by default.
+
+Scaling
+-------
+
+The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors
+such as:
+
+* The size of the host (physical or virtualized) on which devstack is running.
+* The number of API workers. By default, devstack will run ``max($nproc/2, 2)``
+  workers per API service. If you are running several fake compute services on
+  a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``.
+
+In addition, while quota will be disabled in neutron, there is no fake ML2
+backend for neutron so creating fake VMs will still result in real ports being
+created. To create servers without networking, you can specify ``--nic=none``
+when creating the server, for example:
+
+.. code-block:: shell
+
+  $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \
+      --image cirros-0.3.5-x86_64-disk --nic none --wait test-server
+
+.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is
+          required to use ``--nic=none``.
+
+To avoid overhead from other services which you may not need, disable them in
+your ``local.conf``, for example:
+
+.. code-block:: ini
+
+  disable_service horizon
+  disable_service tempest
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 48a4fa8..a0e97ed 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -45,31 +45,37 @@
 install you can skip this step and just give the user sudo privileges
 below)
 
-::
+.. code-block:: console
 
-    useradd -s /bin/bash -d /opt/stack -m stack
+    $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
 Since this user will be making many changes to your system, it will need
 to have sudo privileges:
 
-::
+.. code-block:: console
 
-    apt-get install sudo -y || yum install -y sudo
-    echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    $ apt-get install sudo -y || yum install -y sudo
+    $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+.. note:: On some systems you may need to use ``sudo visudo``.
 
 From here on you should use the user you created. **Logout** and
-**login** as that user.
+**login** as that user:
+
+.. code-block:: console
+
+    $ sudo su stack && cd ~
 
 Download DevStack
 -----------------
 
 We'll grab the latest version of DevStack via https:
 
-::
+.. code-block:: console
 
-    sudo apt-get install git -y || sudo yum install -y git
-    git clone https://git.openstack.org/openstack-dev/devstack
-    cd devstack
+    $ sudo apt-get install git -y || sudo yum install -y git
+    $ git clone https://opendev.org/openstack/devstack
+    $ cd devstack
 
 Run DevStack
 ------------
@@ -81,11 +87,8 @@
 -  Set ``FLOATING_RANGE`` to a range not used on the local network, i.e.
    192.168.1.224/27. This configures IP addresses ending in 225-254 to
    be used as floating IPs.
--  Set ``FIXED_RANGE`` and ``FIXED_NETWORK_SIZE`` to configure the
-   internal address space used by the instances.
--  Set ``FLAT_INTERFACE`` to the Ethernet interface that connects the
-   host to your local network. This is the interface that should be
-   configured with the static IP address mentioned above.
+-  Set ``FIXED_RANGE`` to configure the internal address space used by the
+   instances.
 -  Set the administrative password. This password is used for the
    **admin** and **demo** accounts set up as OpenStack users.
 -  Set the MySQL administrative password. The default here is a random
@@ -97,23 +100,24 @@
 
 ``local.conf`` should look something like this:
 
-::
+.. code-block:: ini
 
     [[local|localrc]]
     FLOATING_RANGE=192.168.1.224/27
     FIXED_RANGE=10.11.12.0/24
-    FIXED_NETWORK_SIZE=256
-    FLAT_INTERFACE=eth0
     ADMIN_PASSWORD=supersecret
     DATABASE_PASSWORD=iheartdatabases
     RABBIT_PASSWORD=flopsymopsy
     SERVICE_PASSWORD=iheartksl
 
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+    under the *samples* directory in the devstack repository.
+
 Run DevStack:
 
-::
+.. code-block:: console
 
-    ./stack.sh
+    $ ./stack.sh
 
 A seemingly endless stream of activity ensues. When complete you will
 see a summary of ``stack.sh``'s work, including the relevant URLs,
@@ -127,7 +131,3 @@
 http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if
 you give them floating IPs and security group access those VMs will be
 accessible from other machines on your network.
-
-Some examples of using the OpenStack command-line clients ``nova`` and
-``glance`` are in the shakedown scripts in ``devstack/exercises``.
-``exercise.sh`` will run all of those scripts and report on the results.
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index 45b8f2d..7dac18b 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -60,7 +60,7 @@
             DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git
             sudo chown stack:stack /home/stack
             cd /home/stack
-            git clone https://git.openstack.org/openstack-dev/devstack
+            git clone https://opendev.org/openstack/devstack
             cd devstack
             echo '[[local|localrc]]' > local.conf
             echo ADMIN_PASSWORD=password >> local.conf
@@ -78,7 +78,7 @@
 to create a non-root user and run the ``start.sh`` script as that user.
 
 If you are using cloud-init and you have not
-`enabled custom logging <../configuration.html#enable-logging>`_ of the stack
+:ref:`enabled custom logging <enable_logging>` of the stack
 output, then the stack output can be found in
 ``/var/log/cloud-init-output.log`` by default.
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 47087c5..08ce4cb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,9 +11,8 @@
    and how to go beyond this setup. Both should be a set of quick
    links to other documents to let people explore from there.
 
-==========
- DevStack
-==========
+DevStack
+========
 
 .. image:: assets/images/logo-blue.png
 
@@ -23,8 +22,7 @@
 environment and as the basis for much of the OpenStack project's
 functional testing.
 
-The source is available at
-`<https://git.openstack.org/cgit/openstack-dev/devstack>`__.
+The source is available at `<https://opendev.org/openstack/devstack>`__.
 
 .. warning::
 
@@ -33,56 +31,58 @@
    are dedicated to this purpose.
 
 Quick Start
-===========
++++++++++++
 
 Install Linux
 -------------
 
-Start with a clean and minimal install of a Linux system. Devstack
-attempts to support Ubuntu 16.04/17.04, Fedora 24/25, CentOS/RHEL 7,
-as well as Debian and OpenSUSE.
+Start with a clean and minimal install of a Linux system. DevStack
+attempts to support the two latest LTS releases of Ubuntu, the
+latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
 
-If you do not have a preference, Ubuntu 16.04 is the most tested, and
-will probably go the smoothest.
+If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
+most tested, and will probably go the smoothest.
 
-Add Stack User
---------------
+Add Stack User (optional)
+-------------------------
 
-Devstack should be run as a non-root user with sudo enabled
+DevStack should be run as a non-root user with sudo enabled
 (standard logins to cloud images such as "ubuntu" or "cloud-user"
 are usually fine).
 
-You can quickly create a separate `stack` user to run DevStack with
+If you are not using a cloud image, you can create a separate `stack` user
+to run DevStack with
 
-::
+.. code-block:: console
 
    $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
 Since this user will be making many changes to your system, it should
 have sudo privileges:
 
-::
+.. code-block:: console
 
     $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
-    $ sudo su - stack
+    $ sudo -u stack -i
 
 Download DevStack
 -----------------
 
-::
+.. code-block:: console
 
-   $ git clone https://git.openstack.org/openstack-dev/devstack
+   $ git clone https://opendev.org/openstack/devstack
    $ cd devstack
 
 The ``devstack`` repo contains a script that installs OpenStack and
-templates for configuration files
+templates for configuration files.
 
 Create a local.conf
 -------------------
 
-Create a ``local.conf`` file with 4 passwords preset at the root of the
+Create a ``local.conf`` file with four passwords preset at the root of the
 devstack git repo.
-::
+
+.. code-block:: ini
 
    [[local|localrc]]
    ADMIN_PASSWORD=secret
@@ -92,12 +92,15 @@
 
 This is the minimum required config to get started with DevStack.
 
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+    under the *samples* directory in the devstack repository.
+
 Start the install
 -----------------
 
-::
+.. code-block:: console
 
-   ./stack.sh
+   $ ./stack.sh
 
 This will take a 15 - 20 minutes, largely depending on the speed of
 your internet connection. Many git trees and packages will be
@@ -109,8 +112,8 @@
 You now have a working DevStack! Congrats!
 
 Your devstack will have installed ``keystone``, ``glance``, ``nova``,
-``cinder``, ``neutron``, and ``horizon``. Floating IPs will be
-available, guests have access to the external world.
+``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs
+will be available, guests have access to the external world.
 
 You can access horizon to experience the web interface to
 OpenStack, and manage vms, networks, volumes, and images from
@@ -139,12 +142,23 @@
 Enable :doc:`devstack plugins <plugins>` to support additional
 services, features, and configuration not present in base devstack.
 
+Use devstack in your CI with :doc:`Ansible roles <zuul_roles>` and
+:doc:`Jobs <zuul_jobs>` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul
+V3 with this full migration :doc:`how-to <zuul_ci_jobs_migration>`.
+
 Get :doc:`the big picture <overview>` of what we are trying to do
 with devstack, and help us by :doc:`contributing to the project
 <hacking>`.
 
+If you are a new contributor to devstack please refer: :doc:`contributor/contributing`
+
+.. toctree::
+   :hidden:
+
+   contributor/contributing
+
 Contents
---------
+++++++++
 
 .. toctree::
    :glob:
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index 74010cd..e65c7ef 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -40,7 +40,7 @@
 Locally Accessible Guests
 =========================
 
-If you want to make you guests accessible from other machines on your
+If you want to make your guests accessible from other machines on your
 network, we have to connect ``br-ex`` to a physical interface.
 
 Dedicated Guest Interface
@@ -81,7 +81,7 @@
    [[local|localrc]]
    PUBLIC_INTERFACE=eth0
    HOST_IP=10.42.0.52
-   FLOATING_RANGE=10.42.0.52/24
+   FLOATING_RANGE=10.42.0.0/24
    PUBLIC_NETWORK_GATEWAY=10.42.0.1
    Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254
 
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index c07a8e6..a609333 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -24,7 +24,7 @@
 
 -  Ubuntu: current LTS release plus current development release
 -  Fedora: current release plus previous release
--  RHEL/Centos: current major release
+-  RHEL/CentOS: current major release
 -  Other OS platforms may continue to be included but the maintenance of
    those platforms shall not be assumed simply due to their presence.
    Having a listed point-of-contact for each additional OS will greatly
@@ -64,7 +64,8 @@
 
 The default services configured by DevStack are Identity (keystone),
 Object Storage (swift), Image Service (glance), Block Storage
-(cinder), Compute (nova), Networking (neutron), Dashboard (horizon)
+(cinder), Compute (nova), Placement (placement),
+Networking (neutron), Dashboard (horizon).
 
 Additional services not included directly in DevStack can be tied in to
 ``stack.sh`` using the :doc:`plugin mechanism <plugins>` to call
@@ -75,11 +76,3 @@
 
 -  single node
 -  multi-node configurations as are tested by the gate
-
-Exercises
----------
-
-The DevStack exercise scripts are no longer used as integration and gate
-testing as that job has transitioned to Tempest. They are still
-maintained as a demonstrations of using OpenStack from the command line
-and for quick operational testing.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 6aa2e93..3edd708 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -21,162 +21,171 @@
 official OpenStack projects.
 
 
-====================================== ===
-Plugin Name                            URL
-====================================== ===
-almanach                               `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
-aodh                                   `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
-app-catalog-ui                         `git://git.openstack.org/openstack/app-catalog-ui <https://git.openstack.org/cgit/openstack/app-catalog-ui>`__
-astara                                 `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
-barbican                               `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
-bilean                                 `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
-blazar                                 `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
-broadview-collector                    `git://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
-ceilometer                             `git://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
-ceilometer-powervm                     `git://git.openstack.org/openstack/ceilometer-powervm <https://git.openstack.org/cgit/openstack/ceilometer-powervm>`__
-cerberus                               `git://git.openstack.org/openstack/cerberus <https://git.openstack.org/cgit/openstack/cerberus>`__
-cloudkitty                             `git://git.openstack.org/openstack/cloudkitty <https://git.openstack.org/cgit/openstack/cloudkitty>`__
-collectd-ceilometer-plugin             `git://git.openstack.org/openstack/collectd-ceilometer-plugin <https://git.openstack.org/cgit/openstack/collectd-ceilometer-plugin>`__
-congress                               `git://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
-cue                                    `git://git.openstack.org/openstack/cue <https://git.openstack.org/cgit/openstack/cue>`__
-cyborg                                 `git://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
-designate                              `git://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
-devstack-plugin-additional-pkg-repos   `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
-devstack-plugin-amqp1                  `git://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
-devstack-plugin-bdd                    `git://git.openstack.org/openstack/devstack-plugin-bdd <https://git.openstack.org/cgit/openstack/devstack-plugin-bdd>`__
-devstack-plugin-ceph                   `git://git.openstack.org/openstack/devstack-plugin-ceph <https://git.openstack.org/cgit/openstack/devstack-plugin-ceph>`__
-devstack-plugin-container              `git://git.openstack.org/openstack/devstack-plugin-container <https://git.openstack.org/cgit/openstack/devstack-plugin-container>`__
-devstack-plugin-glusterfs              `git://git.openstack.org/openstack/devstack-plugin-glusterfs <https://git.openstack.org/cgit/openstack/devstack-plugin-glusterfs>`__
-devstack-plugin-hdfs                   `git://git.openstack.org/openstack/devstack-plugin-hdfs <https://git.openstack.org/cgit/openstack/devstack-plugin-hdfs>`__
-devstack-plugin-kafka                  `git://git.openstack.org/openstack/devstack-plugin-kafka <https://git.openstack.org/cgit/openstack/devstack-plugin-kafka>`__
-devstack-plugin-libvirt-qemu           `git://git.openstack.org/openstack/devstack-plugin-libvirt-qemu <https://git.openstack.org/cgit/openstack/devstack-plugin-libvirt-qemu>`__
-devstack-plugin-mariadb                `git://git.openstack.org/openstack/devstack-plugin-mariadb <https://git.openstack.org/cgit/openstack/devstack-plugin-mariadb>`__
-devstack-plugin-nfs                    `git://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
-devstack-plugin-pika                   `git://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
-devstack-plugin-sheepdog               `git://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
-devstack-plugin-vmax                   `git://git.openstack.org/openstack/devstack-plugin-vmax <https://git.openstack.org/cgit/openstack/devstack-plugin-vmax>`__
-devstack-plugin-zmq                    `git://git.openstack.org/openstack/devstack-plugin-zmq <https://git.openstack.org/cgit/openstack/devstack-plugin-zmq>`__
-dragonflow                             `git://git.openstack.org/openstack/dragonflow <https://git.openstack.org/cgit/openstack/dragonflow>`__
-drbd-devstack                          `git://git.openstack.org/openstack/drbd-devstack <https://git.openstack.org/cgit/openstack/drbd-devstack>`__
-ec2-api                                `git://git.openstack.org/openstack/ec2-api <https://git.openstack.org/cgit/openstack/ec2-api>`__
-freezer                                `git://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
-freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
-freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
-fuxi                                   `git://git.openstack.org/openstack/fuxi <https://git.openstack.org/cgit/openstack/fuxi>`__
-gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
-glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
-group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
-heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
-horizon-mellanox                       `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
-ironic                                 `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
-ironic-inspector                       `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
-ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
-ironic-ui                              `git://git.openstack.org/openstack/ironic-ui <https://git.openstack.org/cgit/openstack/ironic-ui>`__
-k8s-cloud-provider                     `git://git.openstack.org/openstack/k8s-cloud-provider <https://git.openstack.org/cgit/openstack/k8s-cloud-provider>`__
-karbor                                 `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
-karbor-dashboard                       `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
-keystone                               `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
-kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
-kuryr-kubernetes                       `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
-kuryr-libnetwork                       `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
-magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
-magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
-manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
-manila-ui                              `git://git.openstack.org/openstack/manila-ui <https://git.openstack.org/cgit/openstack/manila-ui>`__
-masakari                               `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
-meteos                                 `git://git.openstack.org/openstack/meteos <https://git.openstack.org/cgit/openstack/meteos>`__
-meteos-ui                              `git://git.openstack.org/openstack/meteos-ui <https://git.openstack.org/cgit/openstack/meteos-ui>`__
-mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
-mixmatch                               `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
-mogan                                  `git://git.openstack.org/openstack/mogan <https://git.openstack.org/cgit/openstack/mogan>`__
-mogan-ui                               `git://git.openstack.org/openstack/mogan-ui <https://git.openstack.org/cgit/openstack/mogan-ui>`__
-monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
-monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
-monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
-monasca-events-api                     `git://git.openstack.org/openstack/monasca-events-api <https://git.openstack.org/cgit/openstack/monasca-events-api>`__
-monasca-log-api                        `git://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
-monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
-murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
-networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
-networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
-networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
-networking-baremetal                   `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
-networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
-networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
-networking-calico                      `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
-networking-cisco                       `git://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
-networking-cumulus                     `git://git.openstack.org/openstack/networking-cumulus <https://git.openstack.org/cgit/openstack/networking-cumulus>`__
-networking-dpm                         `git://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
-networking-fortinet                    `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
-networking-generic-switch              `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
-networking-hpe                         `git://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
-networking-huawei                      `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
-networking-hyperv                      `git://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
-networking-infoblox                    `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
-networking-l2gw                        `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
-networking-midonet                     `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
-networking-mlnx                        `git://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
-networking-nec                         `git://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
-networking-odl                         `git://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
-networking-onos                        `git://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
-networking-opencontrail                `git://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
-networking-ovn                         `git://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
-networking-ovs-dpdk                    `git://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
-networking-plumgrid                    `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
-networking-powervm                     `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
-networking-sfc                         `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
-networking-vpp                         `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
-networking-vsphere                     `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
-neutron                                `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
-neutron-dynamic-routing                `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
-neutron-fwaas                          `git://git.openstack.org/openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas>`__
-neutron-fwaas-dashboard                `git://git.openstack.org/openstack/neutron-fwaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-fwaas-dashboard>`__
-neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
-neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
-neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
-neutron-vpnaas-dashboard               `git://git.openstack.org/openstack/neutron-vpnaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-vpnaas-dashboard>`__
-nova-dpm                               `git://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
-nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
-nova-mksproxy                          `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
-nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
-oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
-octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
-octavia-dashboard                      `git://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
-omni                                   `git://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
-os-xenapi                              `git://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
-osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
-oswin-tempest-plugin                   `git://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
-panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-patrole                                `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
-picasso                                `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
-qinling                                `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
-rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
-sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
-sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
-scalpels                               `git://git.openstack.org/openstack/scalpels <https://git.openstack.org/cgit/openstack/scalpels>`__
-searchlight                            `git://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
-searchlight-ui                         `git://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
-senlin                                 `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
-solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
-stackube                               `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
-tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
-tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
-tap-as-a-service-dashboard             `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
-tricircle                              `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
-trio2o                                 `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
-trove                                  `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
-trove-dashboard                        `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
-valet                                  `git://git.openstack.org/openstack/valet <https://git.openstack.org/cgit/openstack/valet>`__
-vitrage                                `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
-vitrage-dashboard                      `git://git.openstack.org/openstack/vitrage-dashboard <https://git.openstack.org/cgit/openstack/vitrage-dashboard>`__
-vmware-nsx                             `git://git.openstack.org/openstack/vmware-nsx <https://git.openstack.org/cgit/openstack/vmware-nsx>`__
-vmware-vspc                            `git://git.openstack.org/openstack/vmware-vspc <https://git.openstack.org/cgit/openstack/vmware-vspc>`__
-watcher                                `git://git.openstack.org/openstack/watcher <https://git.openstack.org/cgit/openstack/watcher>`__
-watcher-dashboard                      `git://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
-zaqar                                  `git://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
-zaqar-ui                               `git://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
-zun                                    `git://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
-zun-ui                                 `git://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
-====================================== ===
+======================================== ===
+Plugin Name                              URL
+======================================== ===
+openstack/aodh                           `https://opendev.org/openstack/aodh <https://opendev.org/openstack/aodh>`__
+openstack/barbican                       `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
+openstack/blazar                         `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
+openstack/ceilometer                     `https://opendev.org/openstack/ceilometer <https://opendev.org/openstack/ceilometer>`__
+openstack/ceilometer-powervm             `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
+openstack/cinderlib                      `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
+openstack/cloudkitty                     `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
+openstack/cyborg                         `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
+openstack/designate                      `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
+openstack/devstack-plugin-amqp1          `https://opendev.org/openstack/devstack-plugin-amqp1 <https://opendev.org/openstack/devstack-plugin-amqp1>`__
+openstack/devstack-plugin-ceph           `https://opendev.org/openstack/devstack-plugin-ceph <https://opendev.org/openstack/devstack-plugin-ceph>`__
+openstack/devstack-plugin-container      `https://opendev.org/openstack/devstack-plugin-container <https://opendev.org/openstack/devstack-plugin-container>`__
+openstack/devstack-plugin-kafka          `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
+openstack/devstack-plugin-nfs            `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
+openstack/devstack-plugin-open-cas       `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
+openstack/ec2-api                        `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
+openstack/freezer                        `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
+openstack/freezer-api                    `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
+openstack/freezer-tempest-plugin         `https://opendev.org/openstack/freezer-tempest-plugin <https://opendev.org/openstack/freezer-tempest-plugin>`__
+openstack/freezer-web-ui                 `https://opendev.org/openstack/freezer-web-ui <https://opendev.org/openstack/freezer-web-ui>`__
+openstack/heat                           `https://opendev.org/openstack/heat <https://opendev.org/openstack/heat>`__
+openstack/heat-dashboard                 `https://opendev.org/openstack/heat-dashboard <https://opendev.org/openstack/heat-dashboard>`__
+openstack/ironic                         `https://opendev.org/openstack/ironic <https://opendev.org/openstack/ironic>`__
+openstack/ironic-inspector               `https://opendev.org/openstack/ironic-inspector <https://opendev.org/openstack/ironic-inspector>`__
+openstack/ironic-prometheus-exporter     `https://opendev.org/openstack/ironic-prometheus-exporter <https://opendev.org/openstack/ironic-prometheus-exporter>`__
+openstack/ironic-ui                      `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
+openstack/keystone                       `https://opendev.org/openstack/keystone <https://opendev.org/openstack/keystone>`__
+openstack/kuryr-kubernetes               `https://opendev.org/openstack/kuryr-kubernetes <https://opendev.org/openstack/kuryr-kubernetes>`__
+openstack/kuryr-libnetwork               `https://opendev.org/openstack/kuryr-libnetwork <https://opendev.org/openstack/kuryr-libnetwork>`__
+openstack/kuryr-tempest-plugin           `https://opendev.org/openstack/kuryr-tempest-plugin <https://opendev.org/openstack/kuryr-tempest-plugin>`__
+openstack/magnum                         `https://opendev.org/openstack/magnum <https://opendev.org/openstack/magnum>`__
+openstack/magnum-ui                      `https://opendev.org/openstack/magnum-ui <https://opendev.org/openstack/magnum-ui>`__
+openstack/manila                         `https://opendev.org/openstack/manila <https://opendev.org/openstack/manila>`__
+openstack/manila-tempest-plugin          `https://opendev.org/openstack/manila-tempest-plugin <https://opendev.org/openstack/manila-tempest-plugin>`__
+openstack/manila-ui                      `https://opendev.org/openstack/manila-ui <https://opendev.org/openstack/manila-ui>`__
+openstack/masakari                       `https://opendev.org/openstack/masakari <https://opendev.org/openstack/masakari>`__
+openstack/mistral                        `https://opendev.org/openstack/mistral <https://opendev.org/openstack/mistral>`__
+openstack/monasca-api                    `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
+openstack/monasca-events-api             `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
+openstack/monasca-tempest-plugin         `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
+openstack/murano                         `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
+openstack/networking-bagpipe             `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
+openstack/networking-baremetal           `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
+openstack/networking-bgpvpn              `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
+openstack/networking-generic-switch      `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
+openstack/networking-hyperv              `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
+openstack/networking-odl                 `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
+openstack/networking-powervm             `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
+openstack/networking-sfc                 `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
+openstack/neutron                        `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
+openstack/neutron-dynamic-routing        `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
+openstack/neutron-tempest-plugin         `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
+openstack/neutron-vpnaas                 `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
+openstack/neutron-vpnaas-dashboard       `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
+openstack/nova-powervm                   `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
+openstack/octavia                        `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
+openstack/octavia-dashboard              `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
+openstack/octavia-tempest-plugin         `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
+openstack/openstacksdk                   `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
+openstack/osprofiler                     `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
+openstack/oswin-tempest-plugin           `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/ovn-octavia-provider           `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
+openstack/patrole                        `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
+openstack/rally-openstack                `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
+openstack/sahara                         `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
+openstack/sahara-dashboard               `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
+openstack/senlin                         `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
+openstack/shade                          `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
+openstack/solum                          `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
+openstack/storlets                       `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
+openstack/tacker                         `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
+openstack/tap-as-a-service               `https://opendev.org/openstack/tap-as-a-service <https://opendev.org/openstack/tap-as-a-service>`__
+openstack/telemetry-tempest-plugin       `https://opendev.org/openstack/telemetry-tempest-plugin <https://opendev.org/openstack/telemetry-tempest-plugin>`__
+openstack/trove                          `https://opendev.org/openstack/trove <https://opendev.org/openstack/trove>`__
+openstack/trove-dashboard                `https://opendev.org/openstack/trove-dashboard <https://opendev.org/openstack/trove-dashboard>`__
+openstack/venus                          `https://opendev.org/openstack/venus <https://opendev.org/openstack/venus>`__
+openstack/venus-dashboard                `https://opendev.org/openstack/venus-dashboard <https://opendev.org/openstack/venus-dashboard>`__
+openstack/vitrage                        `https://opendev.org/openstack/vitrage <https://opendev.org/openstack/vitrage>`__
+openstack/vitrage-dashboard              `https://opendev.org/openstack/vitrage-dashboard <https://opendev.org/openstack/vitrage-dashboard>`__
+openstack/vitrage-tempest-plugin         `https://opendev.org/openstack/vitrage-tempest-plugin <https://opendev.org/openstack/vitrage-tempest-plugin>`__
+openstack/watcher                        `https://opendev.org/openstack/watcher <https://opendev.org/openstack/watcher>`__
+openstack/watcher-dashboard              `https://opendev.org/openstack/watcher-dashboard <https://opendev.org/openstack/watcher-dashboard>`__
+openstack/whitebox-tempest-plugin        `https://opendev.org/openstack/whitebox-tempest-plugin <https://opendev.org/openstack/whitebox-tempest-plugin>`__
+openstack/zaqar                          `https://opendev.org/openstack/zaqar <https://opendev.org/openstack/zaqar>`__
+openstack/zaqar-ui                       `https://opendev.org/openstack/zaqar-ui <https://opendev.org/openstack/zaqar-ui>`__
+openstack/zun                            `https://opendev.org/openstack/zun <https://opendev.org/openstack/zun>`__
+openstack/zun-ui                         `https://opendev.org/openstack/zun-ui <https://opendev.org/openstack/zun-ui>`__
+performa/os-faults                       `https://opendev.org/performa/os-faults <https://opendev.org/performa/os-faults>`__
+skyline/skyline-apiserver                `https://opendev.org/skyline/skyline-apiserver <https://opendev.org/skyline/skyline-apiserver>`__
+starlingx/config                         `https://opendev.org/starlingx/config <https://opendev.org/starlingx/config>`__
+starlingx/fault                          `https://opendev.org/starlingx/fault <https://opendev.org/starlingx/fault>`__
+starlingx/ha                             `https://opendev.org/starlingx/ha <https://opendev.org/starlingx/ha>`__
+starlingx/integ                          `https://opendev.org/starlingx/integ <https://opendev.org/starlingx/integ>`__
+starlingx/metal                          `https://opendev.org/starlingx/metal <https://opendev.org/starlingx/metal>`__
+starlingx/nfv                            `https://opendev.org/starlingx/nfv <https://opendev.org/starlingx/nfv>`__
+starlingx/update                         `https://opendev.org/starlingx/update <https://opendev.org/starlingx/update>`__
+vexxhost/openstack-operator              `https://opendev.org/vexxhost/openstack-operator <https://opendev.org/vexxhost/openstack-operator>`__
+x/almanach                               `https://opendev.org/x/almanach <https://opendev.org/x/almanach>`__
+x/apmec                                  `https://opendev.org/x/apmec <https://opendev.org/x/apmec>`__
+x/bilean                                 `https://opendev.org/x/bilean <https://opendev.org/x/bilean>`__
+x/broadview-collector                    `https://opendev.org/x/broadview-collector <https://opendev.org/x/broadview-collector>`__
+x/collectd-openstack-plugins             `https://opendev.org/x/collectd-openstack-plugins <https://opendev.org/x/collectd-openstack-plugins>`__
+x/devstack-plugin-additional-pkg-repos   `https://opendev.org/x/devstack-plugin-additional-pkg-repos <https://opendev.org/x/devstack-plugin-additional-pkg-repos>`__
+x/devstack-plugin-glusterfs              `https://opendev.org/x/devstack-plugin-glusterfs <https://opendev.org/x/devstack-plugin-glusterfs>`__
+x/devstack-plugin-hdfs                   `https://opendev.org/x/devstack-plugin-hdfs <https://opendev.org/x/devstack-plugin-hdfs>`__
+x/devstack-plugin-libvirt-qemu           `https://opendev.org/x/devstack-plugin-libvirt-qemu <https://opendev.org/x/devstack-plugin-libvirt-qemu>`__
+x/devstack-plugin-mariadb                `https://opendev.org/x/devstack-plugin-mariadb <https://opendev.org/x/devstack-plugin-mariadb>`__
+x/devstack-plugin-tobiko                 `https://opendev.org/x/devstack-plugin-tobiko <https://opendev.org/x/devstack-plugin-tobiko>`__
+x/devstack-plugin-vmax                   `https://opendev.org/x/devstack-plugin-vmax <https://opendev.org/x/devstack-plugin-vmax>`__
+x/drbd-devstack                          `https://opendev.org/x/drbd-devstack <https://opendev.org/x/drbd-devstack>`__
+x/fenix                                  `https://opendev.org/x/fenix <https://opendev.org/x/fenix>`__
+x/gce-api                                `https://opendev.org/x/gce-api <https://opendev.org/x/gce-api>`__
+x/glare                                  `https://opendev.org/x/glare <https://opendev.org/x/glare>`__
+x/group-based-policy                     `https://opendev.org/x/group-based-policy <https://opendev.org/x/group-based-policy>`__
+x/gyan                                   `https://opendev.org/x/gyan <https://opendev.org/x/gyan>`__
+x/horizon-mellanox                       `https://opendev.org/x/horizon-mellanox <https://opendev.org/x/horizon-mellanox>`__
+x/ironic-staging-drivers                 `https://opendev.org/x/ironic-staging-drivers <https://opendev.org/x/ironic-staging-drivers>`__
+x/kingbird                               `https://opendev.org/x/kingbird <https://opendev.org/x/kingbird>`__
+x/meteos                                 `https://opendev.org/x/meteos <https://opendev.org/x/meteos>`__
+x/meteos-ui                              `https://opendev.org/x/meteos-ui <https://opendev.org/x/meteos-ui>`__
+x/mixmatch                               `https://opendev.org/x/mixmatch <https://opendev.org/x/mixmatch>`__
+x/mogan                                  `https://opendev.org/x/mogan <https://opendev.org/x/mogan>`__
+x/mogan-ui                               `https://opendev.org/x/mogan-ui <https://opendev.org/x/mogan-ui>`__
+x/networking-6wind                       `https://opendev.org/x/networking-6wind <https://opendev.org/x/networking-6wind>`__
+x/networking-ansible                     `https://opendev.org/x/networking-ansible <https://opendev.org/x/networking-ansible>`__
+x/networking-arista                      `https://opendev.org/x/networking-arista <https://opendev.org/x/networking-arista>`__
+x/networking-brocade                     `https://opendev.org/x/networking-brocade <https://opendev.org/x/networking-brocade>`__
+x/networking-cisco                       `https://opendev.org/x/networking-cisco <https://opendev.org/x/networking-cisco>`__
+x/networking-cumulus                     `https://opendev.org/x/networking-cumulus <https://opendev.org/x/networking-cumulus>`__
+x/networking-dpm                         `https://opendev.org/x/networking-dpm <https://opendev.org/x/networking-dpm>`__
+x/networking-fortinet                    `https://opendev.org/x/networking-fortinet <https://opendev.org/x/networking-fortinet>`__
+x/networking-hpe                         `https://opendev.org/x/networking-hpe <https://opendev.org/x/networking-hpe>`__
+x/networking-huawei                      `https://opendev.org/x/networking-huawei <https://opendev.org/x/networking-huawei>`__
+x/networking-infoblox                    `https://opendev.org/x/networking-infoblox <https://opendev.org/x/networking-infoblox>`__
+x/networking-l2gw                        `https://opendev.org/x/networking-l2gw <https://opendev.org/x/networking-l2gw>`__
+x/networking-lagopus                     `https://opendev.org/x/networking-lagopus <https://opendev.org/x/networking-lagopus>`__
+x/networking-mlnx                        `https://opendev.org/x/networking-mlnx <https://opendev.org/x/networking-mlnx>`__
+x/networking-nec                         `https://opendev.org/x/networking-nec <https://opendev.org/x/networking-nec>`__
+x/networking-omnipath                    `https://opendev.org/x/networking-omnipath <https://opendev.org/x/networking-omnipath>`__
+x/networking-opencontrail                `https://opendev.org/x/networking-opencontrail <https://opendev.org/x/networking-opencontrail>`__
+x/networking-ovs-dpdk                    `https://opendev.org/x/networking-ovs-dpdk <https://opendev.org/x/networking-ovs-dpdk>`__
+x/networking-plumgrid                    `https://opendev.org/x/networking-plumgrid <https://opendev.org/x/networking-plumgrid>`__
+x/networking-spp                         `https://opendev.org/x/networking-spp <https://opendev.org/x/networking-spp>`__
+x/networking-vpp                         `https://opendev.org/x/networking-vpp <https://opendev.org/x/networking-vpp>`__
+x/networking-vsphere                     `https://opendev.org/x/networking-vsphere <https://opendev.org/x/networking-vsphere>`__
+x/neutron-classifier                     `https://opendev.org/x/neutron-classifier <https://opendev.org/x/neutron-classifier>`__
+x/nova-dpm                               `https://opendev.org/x/nova-dpm <https://opendev.org/x/nova-dpm>`__
+x/nova-mksproxy                          `https://opendev.org/x/nova-mksproxy <https://opendev.org/x/nova-mksproxy>`__
+x/oaktree                                `https://opendev.org/x/oaktree <https://opendev.org/x/oaktree>`__
+x/omni                                   `https://opendev.org/x/omni <https://opendev.org/x/omni>`__
+x/os-xenapi                              `https://opendev.org/x/os-xenapi <https://opendev.org/x/os-xenapi>`__
+x/picasso                                `https://opendev.org/x/picasso <https://opendev.org/x/picasso>`__
+x/rsd-virt-for-nova                      `https://opendev.org/x/rsd-virt-for-nova <https://opendev.org/x/rsd-virt-for-nova>`__
+x/scalpels                               `https://opendev.org/x/scalpels <https://opendev.org/x/scalpels>`__
+x/slogging                               `https://opendev.org/x/slogging <https://opendev.org/x/slogging>`__
+x/stackube                               `https://opendev.org/x/stackube <https://opendev.org/x/stackube>`__
+x/tap-as-a-service-dashboard             `https://opendev.org/x/tap-as-a-service-dashboard <https://opendev.org/x/tap-as-a-service-dashboard>`__
+x/tatu                                   `https://opendev.org/x/tatu <https://opendev.org/x/tatu>`__
+x/trio2o                                 `https://opendev.org/x/trio2o <https://opendev.org/x/trio2o>`__
+x/valet                                  `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
+x/vmware-nsx                             `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
+x/vmware-vspc                            `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+======================================== ===
 
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index fae1a1d..7d70d74 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -54,6 +54,31 @@
   default value only if the variable is unset or empty; e.g. in bash
   syntax ``FOO=${FOO:-default}``.
 
+  The file should include a ``define_plugin`` line to indicate the
+  plugin's name, which is the name that should be used by users on
+  "enable_plugin" lines.  It should generally be the last component of
+  the git repo path (e.g., if the plugin's repo is
+  openstack/foo, then the name here should be "foo") ::
+
+    define_plugin <YOUR PLUGIN>
+
+  If your plugin depends on another plugin, indicate it in this file
+  with one or more lines like the following::
+
+    plugin_requires <YOUR PLUGIN> <OTHER PLUGIN>
+
+  For a complete example, if the plugin "foo" depends on "bar", the
+  ``settings`` file should include::
+
+    define_plugin foo
+    plugin_requires foo bar
+
+  Devstack does not currently use this dependency information, so it's
+  important that users continue to add enable_plugin lines in the
+  correct order in ``local.conf``, however adding this information
+  allows other tools to consider dependency information when
+  automatically generating ``local.conf`` files.
+
 - ``plugin.sh`` - the actual plugin. It is executed by devstack at
   well defined points during a ``stack.sh`` run. The plugin.sh
   internal structure is discussed below.
@@ -74,7 +99,7 @@
 
 An example would be as follows::
 
-  enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
+  enable_plugin ec2-api https://opendev.org/openstack/ec2-api
 
 plugin.sh contract
 ==================
@@ -123,7 +148,7 @@
 
 ``devstack/settings``::
 
-    # settings file for template
+  # settings file for template
   enable_service template
 
 
@@ -197,24 +222,66 @@
 System Packages
 ===============
 
-Devstack provides a framework for getting packages installed at an early
-phase of its execution. These packages may be defined in a plugin as files
-that contain new-line separated lists of packages required by the plugin
 
-Supported packaging systems include apt and yum across multiple distributions.
-To enable a plugin to hook into this and install package dependencies, packages
-may be listed at the following locations in the top-level of the plugin
-repository:
+
+Devstack based
+--------------
+
+Devstack provides a custom framework for getting packages installed at
+an early phase of its execution.  These packages may be defined in a
+plugin as files that contain new-line separated lists of packages
+required by the plugin
+
+Supported packaging systems include apt and yum across multiple
+distributions.  To enable a plugin to hook into this and install
+package dependencies, packages may be listed at the following
+locations in the top-level of the plugin repository:
 
 - ``./devstack/files/debs/$plugin_name`` - Packages to install when running
   on Ubuntu, Debian or Linux Mint.
 
 - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
-  on Red Hat, Fedora, CentOS or XenServer.
+  on Red Hat, Fedora, or CentOS.
 
 - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
   running on SUSE Linux or openSUSE.
 
+Although there a no plans to remove this method of installing
+packages, plugins should consider it deprecated for ``bindep`` support
+described below.
+
+bindep
+------
+
+The `bindep <https://docs.openstack.org/infra/bindep>`__ project has
+become the defacto standard for OpenStack projects to specify binary
+dependencies.
+
+A plugin may provide a ``./devstack/files/bindep.txt`` file, which
+will be called with the *default* profile to install packages.  For
+details on the syntax, etc. see the bindep documentation.
+
+It is also possible to use the ``bindep.txt`` of projects that are
+being installed from source with the ``-bindep`` flag available in
+install functions.  For example
+
+.. code-block:: bash
+
+  if use_library_from_git "diskimage-builder"; then
+     GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL
+     GITDIR["diskimage-builder"]=$DEST/diskimage-builder
+     GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF
+     git_clone_by_name "diskimage-builder"
+     setup_dev_lib -bindep "diskimage-builder"
+  fi
+
+will result in any packages required by the ``bindep.txt`` of the
+``diskimage-builder`` project being installed.  Note however that jobs
+that switch projects between source and released/pypi installs
+(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both
+released dependencies and master versions) will have to deal with
+``bindep.txt`` being unavailable without the source directory.
+
 
 Using Plugins in the OpenStack Gate
 ===================================
@@ -239,10 +306,12 @@
 the best practice is to build a dedicated
 ``openstack/devstack-plugin-FOO`` project.
 
+Legacy project-config jobs
+--------------------------
+
 To enable a plugin to be used in a gate job, the following lines will
 be needed in your ``jenkins/jobs/<project>.yaml`` definition in
-`project-config
-<http://git.openstack.org/cgit/openstack-infra/project-config/>`_::
+`project-config <https://opendev.org/openstack/project-config/>`_::
 
   # Because we are testing a non standard project, add the
   # our project repository. This makes zuul do the right
@@ -252,12 +321,17 @@
   # note the actual url here is somewhat irrelevant because it
   # caches in nodepool, however make it a valid url for
   # documentation purposes.
-  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
+  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api"
+
+Zuul v3 jobs
+------------
+
+See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`.
 
 See Also
 ========
 
 For additional inspiration on devstack plugins you can check out the
-`Plugin Registry <plugin-registry.html>`_.
+:doc:`Plugin Registry <plugin-registry>`.
 
 .. _service types authority: https://specs.openstack.org/openstack/service-types-authority/
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 9cc4017..7853520 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -152,6 +152,19 @@
 
   /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf
 
+Some executables, such as :program:`nova-compute`, will need to be executed
+with a particular group. This will be shown in the systemd unit file::
+
+  sudo systemctl cat devstack@n-cpu.service | grep Group
+
+::
+
+  Group = libvirt
+
+Use the :program:`sg` tool to execute the command as this group::
+
+  sg libvirt -c '/usr/local/bin/nova-compute --config-file /etc/nova/nova-cpu.conf'
+
 Using remote-pdb
 ----------------
 
@@ -181,31 +194,7 @@
 
 See the `remote-pdb`_ home page for more options.
 
-.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb
-
-Known Issues
-============
-
-Be careful about systemd python libraries. There are 3 of them on
-pypi, and they are all very different. They unfortunately all install
-into the ``systemd`` namespace, which can cause some issues.
-
-- ``systemd-python`` - this is the upstream maintained library, it has
-  a version number like systemd itself (currently ``234``). This is
-  the one you want.
-- ``systemd`` - a python 3 only library, not what you want.
-- ``python-systemd`` - another library you don't want. Installing it
-  on a system will break ansible's ability to run.
-
-
-If we were using user units, the ``[Service]`` - ``Group=`` parameter
-doesn't seem to work with user units, even though the documentation
-says that it should. This means that we will need to do an explicit
-``/usr/bin/sg``. This has the downside of making the SYSLOG_IDENTIFIER
-be ``sg``. We can explicitly set that with ``SyslogIdentifier=``, but
-it's really unfortunate that we're going to need this work
-around. This is currently not a problem because we're only using
-system units.
+.. _`remote-pdb`: https://pypi.org/project/remote-pdb/
 
 Future Work
 ===========
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
new file mode 100644
index 0000000..c43603e
--- /dev/null
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -0,0 +1,320 @@
+===============================
+Migrating Zuul V2 CI jobs to V3
+===============================
+
+The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to
+the new CI system. All jobs have been migrated automatically to a format
+compatible with Zuul v3; the jobs produced in this way however are suboptimal
+and do not use the capabilities introduced by Zuul v3, which allow for re-use of
+job parts, in the form of Ansible roles, as well as inheritance between jobs.
+
+DevStack hosts a set of roles, plays and jobs that can be used by other
+repositories to define their DevStack based jobs. To benefit from them, jobs
+must be migrated from the legacy v2 ones into v3 native format.
+
+This document provides guidance and examples to make the migration process as
+painless and smooth as possible.
+
+Where to host the job definitions.
+==================================
+
+In Zuul V3 jobs can be defined in the repository that contains the code they
+excercise. If you are writing CI jobs for an OpenStack service you can define
+your DevStack based CI jobs in one of the repositories that host the code for
+your service. If you have a branchless repo, like a Tempest plugin, that is
+a convenient choice to host the job definitions since job changes do not have
+to be backported. For example, see the beginning of the ``.zuul.yaml`` from the
+sahara Tempest plugin repo:
+
+.. code:: yaml
+
+  # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml:
+  - job:
+      name: sahara-tests-tempest
+      description: |
+        Run Tempest tests from the Sahara plugin.
+      parent: devstack-tempest
+
+Which base job to start from
+============================
+
+If your job needs an OpenStack cloud deployed via DevStack, but you don't plan
+on running Tempest tests, you can start from one of the base
+:doc:`jobs <zuul_jobs>` defined in the DevStack repo.
+
+The ``devstack`` job can be used for both single-node jobs and multi-node jobs,
+and it includes the list of services used in the integrated gate (keystone,
+glance, nova, cinder, neutron and swift). Different topologies can be achieved
+by switching the nodeset used in the child job.
+
+The ``devstack-base`` job is similar to ``devstack`` but it does not specify any
+required repo or service to be run in DevStack. It can be useful to setup
+children jobs that use a very narrow DevStack setup.
+
+If your job needs an OpenStack cloud deployed via DevStack, and you do plan
+on running Tempest tests, you can start from one of the base jobs defined in the
+Tempest repo.
+
+The ``devstack-tempest`` job can be used for both single-node jobs and
+multi-node jobs. Different topologies can be achieved by switching the nodeset
+used in the child job.
+
+Jobs can be customized as follows without writing any Ansible code:
+
+- add and/or remove DevStack services
+- add or modify DevStack and services configuration
+- install DevStack plugins
+- extend the number of sub-nodes (multinode only)
+- define extra log files and/or directories to be uploaded on logs.o.o
+- define extra log file extensions to be rewritten to .txt for ease of access
+
+Tempest jobs can be further customized as follows:
+
+- define the Tempest tox environment to be used
+- define the test concurrency
+- define the test regular expression
+
+Writing Ansible code, or importing existing custom roles, jobs can be further
+extended by:
+
+- adding pre and/or post playbooks
+- overriding the run playbook, add custom roles
+
+The (partial) example below extends a Tempest single node base job
+"devstack-tempest" in the Kuryr repository. The parent job name is defined in
+job.parent.
+
+.. code:: yaml
+
+  # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml:
+  - job:
+      name: kuryr-kubernetes-tempest-base
+      parent: devstack-tempest
+      description: Base kuryr-kubernetes-job
+      required-projects:
+        - openstack/devstack-plugin-container
+        - openstack/kuryr
+        - openstack/kuryr-kubernetes
+        - openstack/kuryr-tempest-plugin
+        - openstack/neutron-lbaas
+      vars:
+        tempest_test_regex: '^(kuryr_tempest_plugin.tests.)'
+        tox_envlist: 'all'
+        devstack_localrc:
+          KURYR_K8S_API_PORT: 8080
+        devstack_services:
+          kubernetes-api: true
+          kubernetes-controller-manager: true
+          kubernetes-scheduler: true
+          kubelet: true
+          kuryr-kubernetes: true
+          (...)
+        devstack_plugins:
+          kuryr-kubernetes: https://opendev.org/openstack/kuryr
+          devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container
+          neutron-lbaas: https://opendev.org/openstack/neutron-lbaas
+        tempest_plugins:
+          - kuryr-tempest-plugin
+        (...)
+
+Job variables
+=============
+
+Variables can be added to the job in three different places:
+
+- job.vars: these are global variables available to all node in the nodeset
+- job.host-vars.[HOST]: these are variables available only to the specified HOST
+- job.group-vars.[GROUP]: these are variables available only to the specified
+  GROUP
+
+Zuul merges dict variables through job inheritance. Host and group variables
+override variables with the same name defined as global variables.
+
+In the example below, for the sundaes job, hosts that are not part of the
+subnode group will run vanilla and chocolate. Hosts in the subnode group will
+run stracciatella and strawberry.
+
+.. code:: yaml
+
+  - job:
+      name: ice-creams
+      vars:
+        devstack_service:
+          vanilla: true
+          chocolate: false
+      group-vars:
+        subnode:
+          devstack_service:
+            pistacchio: true
+            stracciatella: true
+
+  - job:
+      name: sundaes
+      parent: ice-creams
+      vars:
+        devstack_service:
+          chocolate: true
+      group-vars:
+        subnode:
+          devstack_service:
+            strawberry: true
+            pistacchio: false
+
+
+DevStack Gate Flags
+===================
+
+The old CI system worked using a combination of DevStack, Tempest and
+devstack-gate to setup a test environment and run tests against it. With Zuul
+V3, the logic that used to live in devstack-gate is moved into different repos,
+including DevStack, Tempest and grenade.
+
+DevStack-gate exposes an interface for job definition based on a number of
+DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map
+DEVSTACK_GATE flags into the new
+system.
+
+The repo column indicates in which repository is hosted the code that replaces
+the devstack-gate flag. The new implementation column explains how to reproduce
+the same or a similar behaviour in Zuul v3 jobs. For localrc settings,
+devstack-gate defined a default value. In ansible jobs the default is either the
+value defined in the parent job, or the default from DevStack, if any.
+
+.. list-table:: **DevStack Gate Flags**
+   :widths: 20 10 60
+   :header-rows: 1
+
+   * - DevStack gate flag
+     - Repo
+     - New implementation
+   * - OVERRIDE_ZUUL_BRANCH
+     - zuul
+     - override-checkout: [branch] in the job definition.
+   * - DEVSTACK_GATE_NET_OVERLAY
+     - zuul-jobs
+     - A bridge called br-infra is set up for all jobs that inherit
+       from multinode with a dedicated `bridge role
+       <https://zuul-ci.org/docs/zuul-jobs/general-roles.html#role-multi-node-bridge>`_.
+   * - DEVSTACK_CINDER_VOLUME_CLEAR
+     - devstack
+     - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the
+       job vars.
+   * - DEVSTACK_GATE_NEUTRON
+     - devstack
+     - True by default. To disable, disable all neutron services in
+       devstack_services in the job definition.
+   * - DEVSTACK_GATE_CONFIGDRIVE
+     - devstack
+     - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_INSTALL_TESTONLY
+     - devstack
+     - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in
+       the job vars.
+   * - DEVSTACK_GATE_VIRT_DRIVER
+     - devstack
+     - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_LIBVIRT_TYPE
+     - devstack
+     - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_TEMPEST
+     - devstack and tempest
+     - Defined by the job that is used. The ``devstack`` job only runs
+       devstack. The ``devstack-tempest`` one triggers a Tempest run
+       as well.
+   * - DEVSTACK_GATE_TEMPEST_FULL
+     - tempest
+     - *tox_envlist: full* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_ALL
+     - tempest
+     - *tox_envlist: all* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS
+     - tempest
+     - *tox_envlist: all-plugin* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_SCENARIOS
+     - tempest
+     - *tox_envlist: scenario* in the job vars.
+   * - TEMPEST_CONCURRENCY
+     - tempest
+     - *tempest_concurrency: [value]* in the job vars. This is
+       available only on jobs that inherit from ``devstack-tempest``
+       down.
+   * - DEVSTACK_GATE_TEMPEST_NOTESTS
+     - tempest
+     - *tox_envlist: venv-tempest* in the job vars. This will create
+       Tempest virtual environment but run no tests.
+   * - DEVSTACK_GATE_SMOKE_SERIAL
+     - tempest
+     - *tox_envlist: smoke-serial* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION
+     - tempest
+     - *tox_envlist: full-serial* in the job vars.
+       *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in
+       the job vars.
+
+
+The following flags have not been migrated yet or are legacy and won't be
+migrated at all.
+
+.. list-table:: **Not Migrated DevStack Gate Flags**
+   :widths: 20 10 60
+   :header-rows: 1
+
+   * - DevStack gate flag
+     - Status
+     - Details
+   * - DEVSTACK_GATE_TOPOLOGY
+     - WIP
+     - The topology depends on the base job that is used and more
+       specifically on the nodeset attached to it. The new job format
+       allows project to define the variables to be passed to every
+       node/node-group that exists in the topology. Named topologies
+       that include the nodeset and the matching variables can be
+       defined in the form of base jobs.
+   * - DEVSTACK_GATE_GRENADE
+     - TBD
+     - Grenade Zuul V3 jobs will be hosted in the grenade repo.
+   * - GRENADE_BASE_BRANCH
+     - TBD
+     - Grenade Zuul V3 jobs will be hosted in the grenade repo.
+   * - DEVSTACK_GATE_NEUTRON_DVR
+     - TBD
+     - Depends on multinode support.
+   * - DEVSTACK_GATE_EXERCISES
+     - TBD
+     - Can be done on request.
+   * - DEVSTACK_GATE_IRONIC
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_IRONIC_DRIVER
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_POSTGRES
+     - Legacy
+     - This flag exists in d-g but the only thing that it does is
+       capture postgres logs. This is already supported by the roles
+       in post, so the flag is useless in the new jobs. postgres
+       itself can be enabled via the devstack_service job variable.
+   * - DEVSTACK_GATE_ZEROMQ
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_MQ_DRIVER
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS
+     - Legacy
+     - Stress is not in Tempest anymore.
+   * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW
+     - Legacy
+     - This is not used anywhere.
+   * - DEVSTACK_GATE_CELLS
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT
+     - Legacy
+     - This has no effect in d-g.
diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst
new file mode 100644
index 0000000..cf203a8
--- /dev/null
+++ b/doc/source/zuul_jobs.rst
@@ -0,0 +1,4 @@
+Zuul CI Jobs
+============
+
+.. zuul:autojobs::
diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst
new file mode 100644
index 0000000..4939281
--- /dev/null
+++ b/doc/source/zuul_roles.rst
@@ -0,0 +1,4 @@
+Zuul CI Roles
+=============
+
+.. zuul:autoroles::
diff --git a/exercise.sh b/exercise.sh
deleted file mode 100755
index 9067033..0000000
--- a/exercise.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# **exercise.sh**
-
-# Keep track of the current DevStack directory.
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-# Run everything in the exercises/ directory that isn't explicitly disabled
-
-# comma separated list of script basenames to skip
-# to refrain from exercising foo.sh use ``SKIP_EXERCISES=foo``
-SKIP_EXERCISES=${SKIP_EXERCISES:-""}
-
-# comma separated list of script basenames to run
-# to run only foo.sh use ``RUN_EXERCISES=foo``
-basenames=${RUN_EXERCISES:-""}
-
-EXERCISE_DIR=$TOP_DIR/exercises
-
-if [[ -z "${basenames}" ]]; then
-    # Locate the scripts we should run
-    basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
-else
-    # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``.
-    SKIP_EXERCISES=
-fi
-
-# Track the state of each script
-passes=""
-failures=""
-skips=""
-
-# Loop over each possible script (by basename)
-for script in $basenames; do
-    if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then
-        skips="$skips $script"
-    else
-        echo "====================================================================="
-        echo Running $script
-        echo "====================================================================="
-        $EXERCISE_DIR/$script.sh
-        exitcode=$?
-        if [[ $exitcode == 55 ]]; then
-            skips="$skips $script"
-        elif [[ $exitcode -ne 0 ]]; then
-            failures="$failures $script"
-        else
-            passes="$passes $script"
-        fi
-    fi
-done
-
-# Output status of exercise run
-echo "====================================================================="
-for script in $skips; do
-    echo SKIP $script
-done
-for script in $passes; do
-    echo PASS $script
-done
-for script in $failures; do
-    echo FAILED $script
-done
-echo "====================================================================="
-
-if [[ -n "$failures" ]]; then
-    exit 1
-fi
diff --git a/exerciserc b/exerciserc
deleted file mode 100644
index 978e0b3..0000000
--- a/exerciserc
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-# source exerciserc
-#
-# Configure the DevStack exercise scripts
-# For best results, source this _after_ stackrc/localrc as it will set
-# values only if they are not already set.
-
-# Max time to wait while vm goes from build to active state
-export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time to wait for proper IP association and dis-association.
-export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
-
-# Max time till the vm is bootable
-export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
-
-# Max time from run instance command until it is running
-export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
-
-# Max time to wait for a vm to terminate
-export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
-
-# The size of the volume we want to boot from; some storage back-ends
-# do not allow a disk resize, so it's important that this can be tuned
-export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1}
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
deleted file mode 100755
index 8cbca54..0000000
--- a/exercises/aggregates.sh
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env bash
-
-# **aggregates.sh**
-
-# This script demonstrates how to use host aggregates:
-#
-# *  Create an Aggregate
-# *  Updating Aggregate details
-# *  Testing Aggregate metadata
-# *  Testing Aggregate delete
-# *  Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
-# *  Testing add/remove hosts (with one host)
-
-echo "**************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "**************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Test as the admin user
-# note this imports stackrc/functions, etc
-. $TOP_DIR/openrc admin admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Cells does not support aggregates.
-is_service_enabled n-cell && exit 55
-
-# Create an aggregate
-# ===================
-
-AGGREGATE_NAME=test_aggregate_$RANDOM
-AGGREGATE2_NAME=test_aggregate_$RANDOM
-AGGREGATE_A_ZONE=nova
-
-function exit_if_aggregate_present {
-    aggregate_name=$1
-
-    if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
-        echo "SUCCESS $aggregate_name not present"
-    else
-        die $LINENO "found aggregate: $aggregate_name"
-        exit -1
-    fi
-}
-
-exit_if_aggregate_present $AGGREGATE_NAME
-
-AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
-
-AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
-
-# check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
-
-
-# Ensure creating a duplicate fails
-# =================================
-
-if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
-    die $LINENO "could create duplicate aggregate"
-fi
-
-
-# Test aggregate-update (and aggregate-details)
-# =============================================
-AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-
-# Test aggregate-set-metadata
-# ===========================
-META_DATA_1_KEY=asdf
-META_DATA_2_KEY=foo
-META_DATA_3_KEY=bar
-
-#ensure no additional metadata is set
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep 123
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
-
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-
-# Test aggregate-add/remove-host
-# ==============================
-if [ "$VIRT_DRIVER" == "xenserver" ]; then
-    echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
-fi
-FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1)
-# Make sure can add two aggregates to same host
-nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
-nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
-if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
-    die $LINENO "could add duplicate host to single aggregate"
-fi
-nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
-nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
-
-# Test aggregate-delete
-# =====================
-nova aggregate-delete $AGGREGATE_ID
-nova aggregate-delete $AGGREGATE2_ID
-exit_if_aggregate_present $AGGREGATE_NAME
-
-set +o xtrace
-echo "**************************************************"
-echo "End DevStack Exercise: $0"
-echo "**************************************************"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
deleted file mode 100755
index 7478bdf..0000000
--- a/exercises/boot_from_volume.sh
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env bash
-
-# **boot_from_volume.sh**
-
-# This script demonstrates how to boot from a volume.  It does the following:
-#
-# *  Create a bootable volume
-# *  Boot a volume-backed instance
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled cinder || exit 55
-
-# Ironic does not support boot from volume.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-boot_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-bfv-inst}
-VOL_NAME=${VOL_NAME:-ex-vol-bfv}
-
-
-# Launching a server
-# ==================
-
-# List servers for project:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
-    # Cells does not support security groups, so force the use of "default"
-    SECGROUP="default"
-    echo "Using the default security group because of Cells."
-else
-    # Create a secgroup
-    if ! nova secgroup-list | grep -q $SECGROUP; then
-        nova secgroup-create $SECGROUP "$SECGROUP description"
-        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-            echo "Security group not created"
-            exit 1
-        fi
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
-    exit 1
-fi
-
-# Setup Keypair
-KEY_NAME=test_key
-KEY_FILE=key.pem
-nova keypair-delete $KEY_NAME || true
-nova keypair-add $KEY_NAME > $KEY_FILE
-chmod 600 $KEY_FILE
-
-# Set up volume
-# -------------
-
-# Delete any old volume
-cinder delete $VOL_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
-fi
-
-# Create the bootable volume
-start_time=$(date +%s)
-cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not created"
-    exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME  | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Boot instance
-# -------------
-
-# Boot using the --block-device-mapping param. The format of mapping is:
-# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
-# Leaving the middle two fields blank appears to do-the-right-thing
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Clean up
-# --------
-
-# Delete volume backed instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
-fi
-
-# Wait for volume to be released
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not released"
-    exit 1
-fi
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-if [[ $SECGROUP = "default" ]] ; then
-    echo "Skipping deleting default security group"
-else
-    # Delete secgroup
-    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
deleted file mode 100755
index b380968..0000000
--- a/exercises/client-args.sh
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-args.sh**
-
-# Test OpenStack client authentication arguments handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-# Save the known variables for later
-export x_PROJECT_NAME=$OS_PROJECT_NAME
-export x_USERNAME=$OS_USERNAME
-export x_PASSWORD=$OS_PASSWORD
-export x_AUTH_URL=$OS_AUTH_URL
-
-# Unset the usual variables to force argument processing
-unset OS_PROJECT_NAME
-unset OS_USERNAME
-unset OS_PASSWORD
-unset OS_AUTH_URL
-
-# Common authentication args
-PROJECT_ARG="--os-project-name=$x_PROJECT_NAME"
-ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
-        STATUS_KEYSTONE="Skipped"
-    else
-        echo -e "\nTest Keystone"
-        if openstack $PROJECT_ARG $ARGS catalog show identity; then
-            STATUS_KEYSTONE="Succeeded"
-        else
-            STATUS_KEYSTONE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
-        STATUS_NOVA="Skipped"
-    else
-        # Test OSAPI
-        echo -e "\nTest Nova"
-        if nova $PROJECT_ARG $ARGS flavor-list; then
-            STATUS_NOVA="Succeeded"
-        else
-            STATUS_NOVA="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
-        STATUS_CINDER="Skipped"
-    else
-        echo -e "\nTest Cinder"
-        if cinder $PROJECT_ARG $ARGS list; then
-            STATUS_CINDER="Succeeded"
-        else
-            STATUS_CINDER="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
-        STATUS_GLANCE="Skipped"
-    else
-        echo -e "\nTest Glance"
-        if openstack $PROJECT_ARG $ARGS image list; then
-            STATUS_GLANCE="Succeeded"
-        else
-            STATUS_GLANCE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Swift client
-# ------------
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
-        STATUS_SWIFT="Skipped"
-    else
-        echo -e "\nTest Swift"
-        if swift $PROJECT_ARG $ARGS stat; then
-            STATUS_SWIFT="Succeeded"
-        else
-            STATUS_SWIFT="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
-    if [[ -n "$2" ]]; then
-        echo "$1: $2"
-    fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
-    echo "*********************************************************************"
-    echo "SUCCESS: End DevStack Exercise: $0"
-    echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
deleted file mode 100755
index fff04df..0000000
--- a/exercises/client-env.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-env.sh**
-
-# Test OpenStack client environment variable handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
-    is_set $i
-    if [[ $? -ne 0 ]]; then
-        echo "$i expected to be set"
-        ABORT=1
-    fi
-done
-if [[ -n "$ABORT" ]]; then
-    exit 1
-fi
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
-        STATUS_KEYSTONE="Skipped"
-    else
-        echo -e "\nTest Keystone"
-        if openstack endpoint show identity; then
-            STATUS_KEYSTONE="Succeeded"
-        else
-            STATUS_KEYSTONE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
-        STATUS_NOVA="Skipped"
-    else
-        # Test OSAPI
-        echo -e "\nTest Nova"
-        if nova flavor-list; then
-            STATUS_NOVA="Succeeded"
-        else
-            STATUS_NOVA="Failed"
-            RETURN=1
-        fi
-
-    fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
-        STATUS_CINDER="Skipped"
-    else
-        echo -e "\nTest Cinder"
-        if cinder list; then
-            STATUS_CINDER="Succeeded"
-        else
-            STATUS_CINDER="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
-        STATUS_GLANCE="Skipped"
-    else
-        echo -e "\nTest Glance"
-        if openstack image list; then
-            STATUS_GLANCE="Succeeded"
-        else
-            STATUS_GLANCE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Swift client
-# ------------
-
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
-        STATUS_SWIFT="Skipped"
-    else
-        echo -e "\nTest Swift"
-        if swift stat; then
-            STATUS_SWIFT="Succeeded"
-        else
-            STATUS_SWIFT="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
-    if [[ -n "$2" ]]; then
-        echo "$1: $2"
-    fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
-    echo "*********************************************************************"
-    echo "SUCCESS: End DevStack Exercise: $0"
-    echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
deleted file mode 100755
index 5abc713..0000000
--- a/exercises/floating_ips.sh
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env bash
-
-# **floating_ips.sh** - using the cloud can be fun
-
-# Test instance connectivity with the ``nova`` command from ``python-novaclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-test_secgroup}
-
-# Default floating IP pool name
-DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public}
-
-# Additional floating IP pool and range
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-
-# Instance name
-VM_NAME="ex-float"
-
-# Cells does not support floating ips API calls
-is_service_enabled n-cell && exit 55
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
-    nova secgroup-create $SECGROUP "$SECGROUP description"
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        die $LINENO "Security group not created"
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-    die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    die $LINENO "server didn't terminate!"
-    exit 1
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Floating IPs
-# ------------
-
-# Allocate a floating IP from the default pool
-FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
-
-# List floating addresses
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
-    die $LINENO "Floating IP not allocated"
-fi
-
-# Add floating IP to our server
-nova add-floating-ip $VM_UUID $FLOATING_IP || \
-    die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
-
-# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
-ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME"
-
-if ! is_service_enabled neutron; then
-    # Allocate an IP from second floating pool
-    TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
-    die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
-
-    # list floating addresses
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
-        die $LINENO "Floating IP not allocated"
-    fi
-fi
-
-# Dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
-    die $LINENO "Failure deleting security group rule from $SECGROUP"
-
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
-    die $LINENO "Security group rule not deleted from $SECGROUP"
-fi
-
-# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
-    # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
-    ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail
-fi
-
-# Clean up
-# --------
-
-if ! is_service_enabled neutron; then
-    # Delete second floating IP
-    nova floating-ip-delete $TEST_FLOATING_IP || \
-        die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
-fi
-
-# Delete the floating ip
-nova floating-ip-delete $FLOATING_IP || \
-    die $LINENO "Failure deleting floating IP $FLOATING_IP"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-# Wait for termination
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    die $LINENO "Server $VM_NAME not deleted"
-fi
-
-# Delete secgroup
-nova secgroup-delete $SECGROUP || \
-    die $LINENO "Failure deleting security group $SECGROUP"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
deleted file mode 100755
index e8c8f62..0000000
--- a/exercises/neutron-adv-test.sh
+++ /dev/null
@@ -1,466 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright 2012, Cisco Systems
-# Copyright 2012, VMware, Inc.
-# Copyright 2012, NTT MCL, Inc.
-#
-# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com
-#
-# **neutron-adv-test.sh**
-
-# Perform integration testing of Nova and other components with Neutron.
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-
-set -o errtrace
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-# Environment
-# -----------
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import neutron functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped.
-neutron_plugin_check_adv_test_requirements || exit 55
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Neutron Settings
-# ----------------
-
-PROJECTS="DEMO1"
-# TODO (nati)_Test public network
-#PROJECTS="DEMO1,DEMO2"
-
-PUBLIC_NAME="admin"
-DEMO1_NAME="demo1"
-DEMO2_NAME="demo2"
-
-PUBLIC_NUM_NET=1
-DEMO1_NUM_NET=1
-DEMO2_NUM_NET=2
-
-PUBLIC_NET1_CIDR="200.0.0.0/24"
-DEMO1_NET1_CIDR="10.10.0.0/24"
-DEMO2_NET1_CIDR="10.20.0.0/24"
-DEMO2_NET2_CIDR="10.20.1.0/24"
-
-PUBLIC_NET1_GATEWAY="200.0.0.1"
-DEMO1_NET1_GATEWAY="10.10.0.1"
-DEMO2_NET1_GATEWAY="10.20.0.1"
-DEMO2_NET2_GATEWAY="10.20.1.1"
-
-PUBLIC_NUM_VM=1
-DEMO1_NUM_VM=1
-DEMO2_NUM_VM=2
-
-PUBLIC_VM1_NET='admin-net1'
-DEMO1_VM1_NET='demo1-net1'
-# Multinic settings. But this is fail without nic setting in OS image
-DEMO2_VM1_NET='demo2-net1'
-DEMO2_VM2_NET='demo2-net2'
-
-PUBLIC_NUM_ROUTER=1
-DEMO1_NUM_ROUTER=1
-DEMO2_NUM_ROUTER=1
-
-PUBLIC_ROUTER1_NET="admin-net1"
-DEMO1_ROUTER1_NET="demo1-net1"
-DEMO2_ROUTER1_NET="demo2-net1"
-
-# Various functions
-# -----------------
-
-function foreach_project {
-    COMMAND=$1
-    for PROJECT in ${PROJECTS//,/ };do
-        eval ${COMMAND//%PROJECT%/$PROJECT}
-    done
-}
-
-function foreach_project_resource {
-    COMMAND=$1
-    RESOURCE=$2
-    for PROJECT in ${PROJECTS//,/ };do
-        eval 'NUM=$'"${PROJECT}_NUM_$RESOURCE"
-        for i in `seq $NUM`;do
-            local COMMAND_LOCAL=${COMMAND//%PROJECT%/$PROJECT}
-            COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i}
-            eval $COMMAND_LOCAL
-        done
-    done
-}
-
-function foreach_project_vm {
-    COMMAND=$1
-    foreach_project_resource "$COMMAND" 'VM'
-}
-
-function foreach_project_net {
-    COMMAND=$1
-    foreach_project_resource "$COMMAND" 'NET'
-}
-
-function get_image_id {
-    local IMAGE_ID
-    IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-    die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
-    echo "$IMAGE_ID"
-}
-
-function get_project_id {
-    local PROJECT_NAME=$1
-    local PROJECT_ID
-    PROJECT_ID=`openstack project list | grep " $PROJECT_NAME " | head -n 1 | get_field 1`
-    die_if_not_set $LINENO PROJECT_ID "Failure retrieving PROJECT_ID for $PROJECT_NAME"
-    echo "$PROJECT_ID"
-}
-
-function get_user_id {
-    local USER_NAME=$1
-    local USER_ID
-    USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
-    die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
-    echo "$USER_ID"
-}
-
-function get_role_id {
-    local ROLE_NAME=$1
-    local ROLE_ID
-    ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
-    die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
-    echo "$ROLE_ID"
-}
-
-function get_network_id {
-    local NETWORK_NAME="$1"
-    local NETWORK_ID
-    NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
-    echo $NETWORK_ID
-}
-
-function get_flavor_id {
-    local INSTANCE_TYPE=$1
-    local FLAVOR_ID
-    FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
-    die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
-    echo "$FLAVOR_ID"
-}
-
-function confirm_server_active {
-    local VM_UUID=$1
-    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-        echo "server '$VM_UUID' did not become active!"
-        false
-    fi
-}
-
-function neutron_debug_admin {
-    local os_username=$OS_USERNAME
-    local os_project_id=$OS_PROJECT_ID
-    source $TOP_DIR/openrc admin admin
-    neutron-debug $@
-    source $TOP_DIR/openrc $os_username $os_project_id
-}
-
-function add_project {
-    openstack project create $1
-    openstack user create $2 --password ${ADMIN_PASSWORD} --project $1
-    openstack role add Member --project $1 --user $2
-}
-
-function remove_project {
-    local PROJECT=$1
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    openstack project delete $PROJECT_ID
-}
-
-function remove_user {
-    local USER=$1
-    local USER_ID
-    USER_ID=$(get_user_id $USER)
-    openstack user delete $USER_ID
-}
-
-function create_projects {
-    source $TOP_DIR/openrc admin admin
-    add_project demo1 demo1 demo1
-    add_project demo2 demo2 demo2
-    source $TOP_DIR/openrc demo demo
-}
-
-function delete_projects_and_users {
-    source $TOP_DIR/openrc admin admin
-    remove_user demo1
-    remove_project demo1
-    remove_user demo2
-    remove_project demo2
-    echo "removed all projects"
-    source $TOP_DIR/openrc demo demo
-}
-
-function create_network {
-    local PROJECT=$1
-    local GATEWAY=$2
-    local CIDR=$3
-    local NUM=$4
-    local EXTRA=$5
-    local NET_NAME="${PROJECT}-net$NUM"
-    local ROUTER_NAME="${PROJECT}-router${NUM}"
-    source $TOP_DIR/openrc admin admin
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    local NET_ID
-    NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
-    die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
-    openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
-    neutron_debug_admin probe-create --device-owner compute $NET_ID
-    source $TOP_DIR/openrc demo demo
-}
-
-function create_networks {
-    foreach_project_net 'create_network ${%PROJECT%_NAME} ${%PROJECT%_NET%NUM%_GATEWAY} ${%PROJECT%_NET%NUM%_CIDR} %NUM% ${%PROJECT%_NET%NUM%_EXTRA}'
-    #TODO(nati) test security group function
-    # allow ICMP for both project's security groups
-    #source $TOP_DIR/openrc demo1 demo1
-    #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-    #source $TOP_DIR/openrc demo2 demo2
-    #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_vm {
-    local PROJECT=$1
-    local NUM=$2
-    local NET_NAMES=$3
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    local NIC=""
-    for NET_NAME in ${NET_NAMES//,/ };do
-        NIC="$NIC --nic net-id="`get_network_id $NET_NAME`
-    done
-    #TODO (nati) Add multi-nic test
-    #TODO (nati) Add public-net test
-    local VM_UUID
-    VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
-        --image $(get_image_id) \
-        $NIC \
-        $PROJECT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
-    die_if_not_set $LINENO VM_UUID "Failure launching $PROJECT-server$NUM"
-    confirm_server_active $VM_UUID
-}
-
-function create_vms {
-    foreach_project_vm 'create_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function ping_ip {
-    # Test agent connection.  Assumes namespaces are disabled, and
-    # that DHCP is in use, but not L3
-    local VM_NAME=$1
-    local NET_NAME=$2
-    IP=$(get_instance_ip $VM_NAME $NET_NAME)
-    ping_check $IP $BOOT_TIMEOUT $NET_NAME
-}
-
-function check_vm {
-    local PROJECT=$1
-    local NUM=$2
-    local VM_NAME="$PROJECT-server$NUM"
-    local NET_NAME=$3
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    ping_ip $VM_NAME $NET_NAME
-    # TODO (nati) test ssh connection
-    # TODO (nati) test inter connection between vm
-    # TODO (nati) test dhcp host routes
-    # TODO (nati) test multi-nic
-}
-
-function check_vms {
-    foreach_project_vm 'check_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function shutdown_vm {
-    local PROJECT=$1
-    local NUM=$2
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    VM_NAME=${PROJECT}-server$NUM
-    nova delete $VM_NAME
-}
-
-function shutdown_vms {
-    foreach_project_vm 'shutdown_vm ${%PROJECT%_NAME} %NUM%'
-    if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
-        die $LINENO "Some VMs failed to shutdown"
-    fi
-}
-
-function delete_network {
-    local PROJECT=$1
-    local NUM=$2
-    local NET_NAME="${PROJECT}-net$NUM"
-    source $TOP_DIR/openrc admin admin
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    #TODO(nati) comment out until l3-agent merged
-    #for res in port subnet net router;do
-    for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
-        delete_probe $net_id
-        openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
-        openstack network delete $net_id
-    done
-    source $TOP_DIR/openrc demo demo
-}
-
-function delete_networks {
-    foreach_project_net 'delete_network ${%PROJECT%_NAME} %NUM%'
-    # TODO(nati) add secuirty group check after it is implemented
-    # source $TOP_DIR/openrc demo1 demo1
-    # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
-    # source $TOP_DIR/openrc demo2 demo2
-    # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_all {
-    create_projects
-    create_networks
-    create_vms
-}
-
-function delete_all {
-    shutdown_vms
-    delete_networks
-    delete_projects_and_users
-}
-
-function all {
-    create_all
-    check_vms
-    delete_all
-}
-
-# Test functions
-# --------------
-
-function test_functions {
-    IMAGE=$(get_image_id)
-    echo $IMAGE
-
-    PROJECT_ID=$(get_project_id demo)
-    echo $PROJECT_ID
-
-    FLAVOR_ID=$(get_flavor_id m1.tiny)
-    echo $FLAVOR_ID
-
-    NETWORK_ID=$(get_network_id admin)
-    echo $NETWORK_ID
-}
-
-# Usage and main
-# --------------
-
-function usage {
-    echo "$0: [-h]"
-    echo "  -h, --help              Display help message"
-    echo "  -t, --project            Create projects"
-    echo "  -n, --net               Create networks"
-    echo "  -v, --vm                Create vms"
-    echo "  -c, --check             Check connection"
-    echo "  -x, --delete-projects    Delete projects"
-    echo "  -y, --delete-nets       Delete networks"
-    echo "  -z, --delete-vms        Delete vms"
-    echo "  -T, --test              Test functions"
-}
-
-function main {
-
-    echo Description
-
-    if [ $# -eq 0 ] ; then
-        # if no args are provided, run all tests
-        all
-    else
-
-        while [ "$1" != "" ]; do
-            case $1 in
-                -h | --help )   usage
-                                exit
-                                ;;
-                -n | --net )    create_networks
-                                exit
-                                ;;
-                -v | --vm )     create_vms
-                                exit
-                                ;;
-                -t | --project ) create_projects
-                                exit
-                                ;;
-                -c | --check )   check_vms
-                                exit
-                                ;;
-                -T | --test )   test_functions
-                                exit
-                                ;;
-                -x | --delete-projects ) delete_projects_and_users
-                                exit
-                                ;;
-                -y | --delete-nets ) delete_networks
-                                exit
-                                ;;
-                -z | --delete-vms ) shutdown_vms
-                                exit
-                                ;;
-                -a | --all )    all
-                                exit
-                                ;;
-                * )             usage
-                                exit 1
-            esac
-            shift
-        done
-    fi
-}
-
-trap failed ERR
-function failed {
-    local r=$?
-    set +o errtrace
-    set +o xtrace
-    echo "Failed to execute"
-    echo "Starting cleanup..."
-    delete_all
-    echo "Finished cleanup"
-    exit $r
-}
-
-# Kick off script
-# ---------------
-
-echo $*
-main $*
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
deleted file mode 100755
index 2f78e39..0000000
--- a/exercises/sec_groups.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-# **sec_groups.sh**
-
-# Test security groups via the command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-
-# Testing Security Groups
-# =======================
-
-# List security groups
-nova secgroup-list
-
-# Create random name for new sec group and create secgroup of said name
-SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)"
-nova secgroup-create $SEC_GROUP_NAME 'a test security group'
-
-# Add some rules to the secgroup
-RULES_TO_ADD=( 22 3389 5900 )
-
-for RULE in "${RULES_TO_ADD[@]}"; do
-    nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Check to make sure rules were added
-SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
-die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
-for i in "${RULES_TO_ADD[@]}"; do
-    skip=
-    for j in "${SEC_GROUP_RULES[@]}"; do
-        [[ $i == $j ]] && { skip=1; break; }
-    done
-    [[ -n $skip ]] || exit 1
-done
-
-# Delete rules and secgroup
-for RULE in "${RULES_TO_ADD[@]}"; do
-    nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Delete secgroup
-nova secgroup-delete $SEC_GROUP_NAME || \
-    die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
deleted file mode 100755
index 8aa376b..0000000
--- a/exercises/swift.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-
-# **swift.sh**
-
-# Test swift via the ``python-openstackclient`` command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If swift is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled s-proxy || exit 55
-
-# Container name
-CONTAINER=ex-swift
-OBJECT=/etc/issue
-
-
-# Testing Swift
-# =============
-
-# Check if we have to swift via keystone
-openstack object store account show || die $LINENO "Failure getting account status"
-
-# We start by creating a test container
-openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
-
-# add a file into it.
-openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER"
-
-# list the objects
-openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
-
-# delete the object first
-openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER"
-
-# delete the container
-openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
deleted file mode 100755
index e7c3560..0000000
--- a/exercises/volumes.sh
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env bash
-
-# **volumes.sh**
-
-# Test cinder volumes with the ``cinder`` command from ``python-cinderclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled cinder || exit 55
-
-# Ironic does not currently support volume attachment.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-vol_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-vol-inst}
-VOL_NAME="ex-vol-$(openssl rand -hex 4)"
-
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
-    # Cells does not support security groups, so force the use of "default"
-    SECGROUP="default"
-    echo "Using the default security group because of Cells."
-else
-    # Create a secgroup
-    if ! nova secgroup-list | grep -q $SECGROUP; then
-        nova secgroup-create $SECGROUP "$SECGROUP description"
-        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-            echo "Security group not created"
-            exit 1
-        fi
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-    die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    die $LINENO "server didn't terminate!"
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Volumes
-# -------
-
-# Verify it doesn't exist
-if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
-    die $LINENO "Volume $VOL_NAME already exists"
-fi
-
-# Create a new volume
-start_time=$(date +%s)
-cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not created"
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Attach to server
-DEVICE=/dev/vdb
-start_time=$(date +%s)
-nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
-    die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-attach in $((end_time - start_time)) seconds"
-
-VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
-if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
-    die $LINENO "Volume not attached to correct instance"
-fi
-
-# Clean up
-# --------
-
-# Detach volume
-start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-detach in $((end_time - start_time)) seconds"
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not deleted"
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    die $LINENO "Server $VM_NAME not deleted"
-fi
-
-if [[ $SECGROUP = "default" ]] ; then
-    echo "Skipping deleting default security group"
-else
-    # Delete secgroup
-    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 15ecfe3..06c73ec 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -6,7 +6,7 @@
         source $TOP_DIR/lib/tempest
     elif [[ "$1" == "stack" && "$2" == "install" ]]; then
         echo_summary "Installing Tempest"
-        install_tempest
+        async_runfunc install_tempest
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
         # Tempest config must come after layer 2 services are running
         :
@@ -17,6 +17,7 @@
         # local.conf Tempest option overrides
         :
     elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
+        async_wait install_tempest
         echo_summary "Initializing Tempest"
         configure_tempest
         echo_summary "Installing Tempest Plugins"
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index bfd7567..efcfc03 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,5 +1,5 @@
 <VirtualHost *:80>
-    WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
+    WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py
     WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP}
     WSGIApplicationGroup %{GLOBAL}
 
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1284360..1a353e5 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,5 +1,4 @@
 Listen %PUBLICPORT%
-Listen %ADMINPORT%
 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
 
 <Directory %KEYSTONE_BIN%>
@@ -20,20 +19,6 @@
     %SSLKEYFILE%
 </VirtualHost>
 
-<VirtualHost *:%ADMINPORT%>
-    WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
-    WSGIProcessGroup keystone-admin
-    WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    ErrorLogFormat "%M"
-    ErrorLog /var/log/%APACHE_NAME%/keystone.log
-    CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
-    %SSLENGINE%
-    %SSLCERTFILE%
-    %SSLKEYFILE%
-</VirtualHost>
-
 %SSLLISTEN%<VirtualHost *:443>
 %SSLLISTEN%    %SSLENGINE%
 %SSLLISTEN%    %SSLCERTFILE%
@@ -49,13 +34,3 @@
     WSGIApplicationGroup %{GLOBAL}
     WSGIPassAuthorization On
 </Location>
-
-Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_admin>
-    SetHandler wsgi-script
-    Options +ExecCGI
-
-    WSGIProcessGroup keystone-admin
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-</Location>
diff --git a/files/apache-neutron.template b/files/apache-neutron.template
new file mode 100644
index 0000000..c7796b9
--- /dev/null
+++ b/files/apache-neutron.template
@@ -0,0 +1,36 @@
+Listen %PUBLICPORT%
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined
+
+<Directory %NEUTRON_BIN%>
+    Require all granted
+</Directory>
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup neutron-server
+    WSGIScriptAlias / %NEUTRON_BIN%/neutron-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%M"
+    ErrorLog /var/log/%APACHE_NAME%/neutron.log
+    CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+
+%SSLLISTEN%<VirtualHost *:443>
+%SSLLISTEN%    %SSLENGINE%
+%SSLLISTEN%    %SSLCERTFILE%
+%SSLLISTEN%    %SSLKEYFILE%
+%SSLLISTEN%</VirtualHost>
+
+Alias /networking %NEUTRON_BIN%/neutron-api
+<Location /networking>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup neutron-server
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/apts b/files/apts
deleted file mode 120000
index ef926de..0000000
--- a/files/apts
+++ /dev/null
@@ -1 +0,0 @@
-debs/
\ No newline at end of file
diff --git a/files/debs/cinder b/files/debs/cinder
index c1b79fd..5d390e2 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -1,6 +1,4 @@
 lvm2
-open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:precise
 qemu-utils
 tgt # NOPRIME
 thin-provisioning-tools
diff --git a/files/debs/dstat b/files/debs/dstat
index 0d9da44..40d00f4 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1,2 +1,2 @@
-dstat
-python-psutil
+dstat # dist:bionic
+pcp
diff --git a/files/debs/general b/files/debs/general
index 8e0018d..364f3cc 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,11 +1,11 @@
 apache2
 apache2-dev
 bc
-bridge-utils
 bsdmainutils
 curl
 default-jre-headless  # NOPRIME
 g++
+gawk
 gcc
 gettext  # used for compiling message catalogs
 git
@@ -14,7 +14,7 @@
 libapache2-mod-proxy-uwsgi
 libffi-dev # for pyOpenSSL
 libjpeg-dev # Pillow 3.0.0
-libmysqlclient-dev  # MySQL-python
+libpcre3-dev # for python-pcre
 libpq-dev  # psycopg2
 libssl-dev # for pyOpenSSL
 libsystemd-dev # for systemd-python
@@ -26,9 +26,10 @@
 openssl
 pkg-config
 psmisc
-python2.7
-python-dev
-python-gdbm # needed for testr
+python3-dev
+python3-pip
+python3-systemd
+python3-venv
 tar
 tcpdump
 unzip
diff --git a/files/debs/horizon b/files/debs/horizon
index 1f45b54..4833289 100644
--- a/files/debs/horizon
+++ b/files/debs/horizon
@@ -1,3 +1,2 @@
 apache2  # NOPRIME
 libapache2-mod-wsgi  # NOPRIME
-libpcre3-dev  # pyScss
diff --git a/files/debs/keystone b/files/debs/keystone
index fd0317b..1cfa6ff 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -2,5 +2,5 @@
 libldap2-dev
 libsasl2-dev
 memcached
-python-mysqldb
+python3-mysqldb
 sqlite3
diff --git a/files/debs/ldap b/files/debs/ldap
index aa3a934..54896bb 100644
--- a/files/debs/ldap
+++ b/files/debs/ldap
@@ -1,3 +1,3 @@
 ldap-utils
-python-ldap
+python3-ldap
 slapd
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index d8bbf59..54d6fa3 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,10 +1,11 @@
 cryptsetup
+dosfstools
 genisoimage
 gir1.2-libosinfo-1.0
 lvm2 # NOPRIME
 netcat-openbsd
 open-iscsi
-python-guestfs # NOPRIME
+python3-guestfs # NOPRIME
 qemu-utils
 sg3-utils
 sysfsutils
diff --git a/files/debs/neutron-common b/files/debs/neutron-common
index e30f678..f6afc5b 100644
--- a/files/debs/neutron-common
+++ b/files/debs/neutron-common
@@ -1,15 +1,14 @@
 acl
 dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:precise
+dnsmasq-utils # for dhcp_release
 ebtables
 haproxy # to serve as metadata proxy inside router/dhcp namespaces
 iptables
 iputils-arping
 iputils-ping
-libmysqlclient-dev
 mysql-server #NOPRIME
 postgresql-server-dev-all
-python-mysqldb
+python3-mysqldb
 rabbitmq-server # NOPRIME
 radvd # NOPRIME
 sqlite3
diff --git a/files/debs/nova b/files/debs/nova
index 5e14aec..0194f00 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -3,22 +3,18 @@
 dnsmasq-base
 dnsmasq-utils # for dhcp_release
 ebtables
-gawk
 genisoimage # required for config_drive
 iptables
 iputils-arping
 kpartx
 libjs-jquery-tablesorter # Needed for coverage html reports
-libmysqlclient-dev
-libvirt-bin # dist:xenial NOPRIME
-libvirt-clients # not:xenial NOPRIME
-libvirt-daemon-system # not:xenial NOPRIME
+libvirt-clients # NOPRIME
+libvirt-daemon-system # NOPRIME
 libvirt-dev # NOPRIME
 mysql-server # NOPRIME
 parted
 pm-utils
-python-mysqldb
-qemu # dist:wheezy,jessie NOPRIME
+python3-mysqldb
 qemu-kvm # NOPRIME
 rabbitmq-server # NOPRIME
 socat # used by ajaxterm
diff --git a/files/debs/os-brick b/files/debs/os-brick
new file mode 100644
index 0000000..4148b0c
--- /dev/null
+++ b/files/debs/os-brick
@@ -0,0 +1,3 @@
+lsscsi
+open-iscsi
+open-iscsi-utils # Deprecated since quantal dist:precise
diff --git a/files/debs/ovn b/files/debs/ovn
new file mode 100644
index 0000000..81eea5e
--- /dev/null
+++ b/files/debs/ovn
@@ -0,0 +1,3 @@
+ovn-central
+ovn-controller-vtep
+ovn-host
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index 189a232..b39cc79 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,4 +1,3 @@
 lvm2
-open-iscsi
 qemu-tools
 tgt # NOPRIME
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
index 0d9da44..2b643b8 100644
--- a/files/rpms-suse/dstat
+++ b/files/rpms-suse/dstat
@@ -1,2 +1 @@
 dstat
-python-psutil
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 0c1a281..f636110 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,9 +1,9 @@
 apache2
 apache2-devel
 bc
-bridge-utils
 ca-certificates-mozilla
 curl
+gawk
 gcc
 gcc-c++
 git-core
@@ -11,7 +11,6 @@
 iputils
 libffi-devel  # pyOpenSSL
 libjpeg8-devel # Pillow 3.0.0
-libmysqlclient-devel # MySQL-python
 libopenssl-devel # to rebuild pyOpenSSL if needed
 libxslt-devel  # lxml
 lsof # useful when debugging
@@ -19,12 +18,13 @@
 net-tools
 openssh
 openssl
+pcre-devel # python-pcre
 postgresql-devel  # psycopg2
 psmisc
+python3-systemd
 python-cmd2 # dist:opensuse-12.3
 python-devel  # pyOpenSSL
 python-xml
-systemd-devel # for systemd-python
 tar
 tcpdump
 unzip
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index 9ece115..9c724cb 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,7 +1,9 @@
+cdrkit-cdrtools-compat # dist:sle12
 cryptsetup
-genisoimage
+dosfstools
 libosinfo
 lvm2
+mkisofs # not:sle12
 open-iscsi
 sg3_utils
 # Stuff for diablo volumes
diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common
index d1cc73f..e3799a9 100644
--- a/files/rpms-suse/neutron-common
+++ b/files/rpms-suse/neutron-common
@@ -5,7 +5,6 @@
 haproxy # to serve as metadata proxy inside router/dhcp namespaces
 iptables
 iputils
-mariadb # NOPRIME
 rabbitmq-server # NOPRIME
 radvd # NOPRIME
 sqlite3
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index ae115d2..1cc2f62 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,17 +1,17 @@
+cdrkit-cdrtools-compat # dist:sle12
 conntrack-tools
 curl
 dnsmasq
 dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
 ebtables
-gawk
-genisoimage # required for config_drive
 iptables
 iputils
 kpartx
 kvm # NOPRIME
 libvirt # NOPRIME
 libvirt-python # NOPRIME
-mariadb # NOPRIME
+# mkisofs is required for config_drive
+mkisofs # not:sle12
 parted
 polkit
 # qemu as fallback if kvm cannot be used
diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick
new file mode 100644
index 0000000..67b33a9
--- /dev/null
+++ b/files/rpms-suse/os-brick
@@ -0,0 +1,2 @@
+lsscsi
+open-iscsi
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 3bc4e7a..375f93e 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,3 @@
-iscsi-initiator-utils
 lvm2
 qemu-img
-scsi-target-utils # not:rhel7,f24,f25,f26 NOPRIME
-targetcli # dist:rhel7,f24,f25,f26 NOPRIME
+targetcli
diff --git a/files/rpms/dstat b/files/rpms/dstat
index 0d9da44..6524bed 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1,2 +1 @@
-dstat
-python-psutil
+pcp-system-tools
diff --git a/files/rpms/general b/files/rpms/general
index f3f8708..33da0a5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,7 +1,7 @@
 bc
-bridge-utils
 curl
 dbus
+gawk
 gcc
 gcc-c++
 gettext  # used for compiling message catalogs
@@ -9,26 +9,25 @@
 graphviz # needed only for docs
 httpd
 httpd-devel
-iptables-services  # NOPRIME f23,f24,f25,f26
-java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f23,f24,f25,f26
+iptables-services
+java-1.8.0-openjdk-headless
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
 libxslt-devel # lxml
 libyaml-devel
-mariadb-devel  # MySQL-python
 net-tools
 openssh-server
 openssl
 openssl-devel # to rebuild pyOpenSSL if needed
+pcre-devel # for python-pcre
 pkgconfig
 postgresql-devel  # psycopg2
 psmisc
-pyOpenSSL # version in pip uses too much memory
-python-devel
+python3-devel
+python3-pip
+python3-systemd
 redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # for systemd-python
 tar
 tcpdump
 unzip
diff --git a/files/rpms/horizon b/files/rpms/horizon
index aeb2cb5..a88552b 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,5 +1,2 @@
-Django
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
-pcre-devel  # pyScss
-pyxattr
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 26c5ced..68e5472 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,4 +1,5 @@
 cryptsetup
+dosfstools
 genisoimage
 iscsi-initiator-utils
 libosinfo
diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/n-spice b/files/rpms/n-spice
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common
index 0cc8d11..fe25f57 100644
--- a/files/rpms/neutron-common
+++ b/files/rpms/neutron-common
@@ -5,8 +5,6 @@
 haproxy # to serve as metadata proxy inside router/dhcp namespaces
 iptables
 iputils
-mysql-devel
-mysql-server # NOPRIME
 openvswitch # NOPRIME
 rabbitmq-server # NOPRIME
 radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 64ed480..8ea8ccc 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,19 +1,13 @@
 conntrack-tools
 curl
-dnsmasq # for nova-network
+dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
 ebtables
-gawk
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f23,f24,f25,f26
+kernel-modules
 kpartx
-libxml2-python
-m2crypto
-mysql-devel
-mysql-server # NOPRIME
-numpy # needed by websockify for spice console
 parted
 polkit
 rabbitmq-server # NOPRIME
diff --git a/files/rpms/os-brick b/files/rpms/os-brick
new file mode 100644
index 0000000..14ff870
--- /dev/null
+++ b/files/rpms/os-brick
@@ -0,0 +1,2 @@
+iscsi-initiator-utils
+lsscsi
diff --git a/files/rpms/ovn b/files/rpms/ovn
new file mode 100644
index 0000000..698e57b
--- /dev/null
+++ b/files/rpms/ovn
@@ -0,0 +1,3 @@
+ovn-central
+ovn-host
+ovn-vtep
diff --git a/files/rpms/swift b/files/rpms/swift
index 2e09cec..18c957c 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,8 +1,7 @@
 curl
 liberasurecode-devel
 memcached
-pyxattr
-rsync-daemon # dist:f23,f24,f25,f26
+rsync-daemon
 sqlite
 xfsprogs
-xinetd
+xinetd # not:f34
diff --git a/functions b/functions
index 8b69c73..ccca5cd 100644
--- a/functions
+++ b/functions
@@ -18,8 +18,10 @@
 FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 source ${FUNC_DIR}/functions-common
 source ${FUNC_DIR}/inc/ini-config
+source ${FUNC_DIR}/inc/meta-config
 source ${FUNC_DIR}/inc/python
 source ${FUNC_DIR}/inc/rootwrap
+source ${FUNC_DIR}/inc/async
 
 # Save trace setting
 _XTRACE_FUNCTIONS=$(set +o | grep xtrace)
@@ -76,6 +78,48 @@
     fi
 }
 
+# Generate image property arguments for OSC
+#
+# Arguments: properties, one per, like propname=value
+#
+# Result is --property propname1=value1 --property propname2=value2
+function _image_properties_to_arg {
+    local result=""
+    for property in $*; do
+        result+=" --property $property"
+    done
+    echo $result
+}
+
+# Upload an image to glance using the configured mechanism
+#
+# Arguments:
+#  image name
+#  container format
+#  disk format
+#  path to image file
+#  optional properties (format of propname=value)
+#
+function _upload_image {
+    local image_name="$1"
+    shift
+    local container="$1"
+    shift
+    local disk="$1"
+    shift
+    local image="$1"
+    shift
+    local properties
+    local useimport
+
+    properties=$(_image_properties_to_arg $*)
+
+    if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+        useimport="--import"
+    fi
+
+    openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
+}
 
 # Retrieve an image from a URL and upload into Glance.
 # Uses the following variables:
@@ -117,7 +161,7 @@
     # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
     if [[ "$image_url" =~ 'openvz' ]]; then
         image_name="${image_fname%.tar.gz}"
-        openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
+        _upload_image "$image_name" ami ami "$image"
         return
     fi
 
@@ -231,42 +275,8 @@
         vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
         vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
 
-        openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
-        return
-    fi
+        _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter"
 
-    # XenServer-vhd-ovf-format images are provided as .vhd.tgz
-    # and should not be decompressed prior to loading
-    if [[ "$image_url" =~ '.vhd.tgz' ]]; then
-        image_name="${image_fname%.vhd.tgz}"
-        local force_vm_mode=""
-        if [[ "$image_name" =~ 'cirros' ]]; then
-            # Cirros VHD image currently only boots in PV mode.
-            # Nova defaults to PV for all VHD images, but
-            # the glance setting is needed for booting
-            # directly from volume.
-            force_vm_mode="--property vm_mode=xen"
-        fi
-        openstack \
-            --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
-            image create \
-            "$image_name" --public \
-            --container-format=ovf --disk-format=vhd \
-            $force_vm_mode < "${image}"
-        return
-    fi
-
-    # .xen-raw.tgz suggests a Xen capable raw image inside a tgz.
-    # and should not be decompressed prior to loading.
-    # Setting metadata, so PV mode is used.
-    if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
-        image_name="${image_fname%.xen-raw.tgz}"
-        openstack \
-            --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
-            image create \
-            "$image_name" --public \
-            --container-format=tgz --disk-format=raw \
-            --property vm_mode=xen < "${image}"
         return
     fi
 
@@ -277,13 +287,7 @@
             die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image"
         fi
 
-        openstack \
-            --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
-            image create \
-            "$image_name" --public \
-            --container-format=bare --disk-format=ploop \
-            --property hypervisor_type=vz \
-            --property vm_mode=$vm_mode < "${image}"
+        _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode
         return
     fi
 
@@ -293,6 +297,15 @@
     local container_format=""
     local unpack=""
     local img_property=""
+
+    # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model
+    # to libvirt in the image properties.
+    if [[ "$VIRT_DRIVER" == "libvirt" ]]; then
+        if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then
+            img_property="hw_rng_model=virtio"
+        fi
+    fi
+
     case "$image_fname" in
         *.tar.gz|*.tgz)
             # Extract ami and aki files
@@ -341,6 +354,12 @@
             disk_format=qcow2
             container_format=bare
             ;;
+        *.qcow2.xz)
+            image_name=$(basename "$image" ".qcow2.xz")
+            disk_format=qcow2
+            container_format=bare
+            unpack=unxz
+            ;;
         *.raw)
             image_name=$(basename "$image" ".raw")
             disk_format=raw
@@ -364,20 +383,30 @@
     esac
 
     if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then
-        img_property="--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
+        img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0"
     fi
 
     if is_arch "aarch64"; then
-        img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'"
+        img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'"
     fi
 
     if [ "$container_format" = "bare" ]; then
         if [ "$unpack" = "zcat" ]; then
-            openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+            _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property
         elif [ "$unpack" = "bunzip2" ]; then
-            openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}")
+            _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property
+        elif [ "$unpack" = "unxz" ]; then
+            # NOTE(brtknr): unxz the file first and cleanup afterwards to
+            # prevent timeout while Glance tries to upload image (e.g. to Swift).
+            local tmp_dir
+            local image_path
+            tmp_dir=$(mktemp -d)
+            image_path="$tmp_dir/$image_name"
+            unxz -cv "${image}" > "$image_path"
+            _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property
+            rm -rf $tmp_dir
         else
-            openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
+            _upload_image "$image_name" $container_format $disk_format "$image" $img_property
         fi
     else
         # Use glance client to add the kernel the root filesystem.
@@ -385,12 +414,12 @@
         # kernel for use when uploading the root filesystem.
         local kernel_id="" ramdisk_id="";
         if [ -n "$kernel" ]; then
-            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
         fi
         if [ -n "$ramdisk" ]; then
-            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
         fi
-        openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
+        _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
     fi
 }
 
@@ -441,12 +470,22 @@
 function wait_for_compute {
     local timeout=$1
     local rval=0
+    local compute_hostname
     time_start "wait_for_service"
+    compute_hostname=$(iniget $NOVA_CONF DEFAULT host)
+    if [[ -z $compute_hostname ]]; then
+        compute_hostname=$(hostname)
+    fi
     timeout $timeout bash -x <<EOF || rval=$?
         ID=""
         while [[ "\$ID" == "" ]]; do
             sleep 1
-            ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname` --service nova-compute -c ID -f value)
+            if [[ "$VIRT_DRIVER" = 'fake' ]]; then
+                # When using the fake driver the compute hostnames have a suffix of 1 to NUMBER_FAKE_NOVA_COMPUTE
+                ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname`1 --service nova-compute -c ID -f value)
+            else
+                ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host "$compute_hostname" --service nova-compute -c ID -f value)
+            fi
         done
 EOF
     time_stop "wait_for_service"
@@ -460,7 +499,7 @@
 
 
 # ping check
-# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK``
+# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``PRIVATE_NETWORK``
 # ping_check <ip> [boot-timeout] [from_net] [expected]
 function ping_check {
     local ip=$1
@@ -474,12 +513,9 @@
     # if we don't specify a from_net we're expecting things to work
     # fine from our local box.
     if [[ -n "$from_net" ]]; then
+        # TODO(stephenfin): Is there any way neutron could be disabled now?
         if is_service_enabled neutron; then
             ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net"
-        elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
-            # there is no way to address the multihost / private case, bail here for compatibility.
-            # TODO: remove this cruft and redo code to handle this at the caller level.
-            return
         fi
     fi
 
@@ -503,13 +539,13 @@
 function get_instance_ip {
     local vm_id=$1
     local network_name=$2
-    local nova_result
+    local addresses
     local ip
 
-    nova_result="$(nova show $vm_id)"
-    ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+    addresses=$(openstack server show -c addresses -f value "$vm_id")
+    ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p")
     if [[ $ip = "" ]];then
-        echo "$nova_result"
+        echo "addresses of server $vm_id : $addresses"
         die $LINENO "[Fail] Couldn't get ipaddress of VM"
     fi
     echo $ip
@@ -628,40 +664,29 @@
 # This sets up defaults we like in devstack for logging for tracking
 # down issues, and makes sure everything is done the same between
 # projects.
+# NOTE(jh): Historically this function switched between three different
+# functions: setup_systemd_logging, setup_colorized_logging and
+# setup_standard_logging_identity. Since we always run with systemd now,
+# this could be cleaned up, but the other functions may still be in use
+# by plugins. Since deprecations haven't worked in the past, we'll just
+# leave them in place.
 function setup_logging {
-    local conf_file=$1
-    local other_cond=${2:-"False"}
-    if [[ "$USE_SYSTEMD" == "True" ]]; then
-        setup_systemd_logging $conf_file
-    elif [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then
-        setup_colorized_logging $conf_file
-    else
-        setup_standard_logging_identity $conf_file
-    fi
+    setup_systemd_logging $1
 }
 
 # This function sets log formatting options for colorizing log
 # output to stdout. It is meant to be called by lib modules.
-# The last two parameters are optional and can be used to specify
-# non-default value for project and user format variables.
-# Defaults are respectively 'project_name' and 'user_name'
-#
-# setup_colorized_logging something.conf SOMESECTION
 function setup_colorized_logging {
     local conf_file=$1
-    local conf_section="DEFAULT"
-    local project_var="project_name"
-    local user_var="user_name"
     # Add color to logging output
-    iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %("$project_var")s %("$user_var")s%(color)s] %(instance)s%(color)s%(message)s"
-    iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
-    iniset $conf_file $conf_section logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
-    iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
+    iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s"
+    iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
+    iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
+    iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
 }
 
 function setup_systemd_logging {
     local conf_file=$1
-    local conf_section="DEFAULT"
     # NOTE(sdague): this is a nice to have, and means we're using the
     # native systemd path, which provides for things like search on
     # request-id. However, there may be an eventlet interaction here,
@@ -669,16 +694,16 @@
     USE_JOURNAL=$(trueorfalse False USE_JOURNAL)
     local pidstr=""
     if [[ "$USE_JOURNAL" == "True" ]]; then
-        iniset $conf_file $conf_section use_journal "True"
+        iniset $conf_file DEFAULT use_journal "True"
         # if we are using the journal directly, our process id is already correct
     else
         pidstr="(pid=%(process)d) "
     fi
-    iniset $conf_file $conf_section logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}"
+    iniset $conf_file DEFAULT logging_debug_format_suffix "{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}"
 
-    iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s"
-    iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
-    iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s %(instance)s"
+    iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s"
+    iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
+    iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s"
 }
 
 function setup_standard_logging_identity {
@@ -702,23 +727,22 @@
 fi
 
 
-# create_disk - Create backing disk
+# create_disk - Create, configure, and mount a backing disk
 function create_disk {
     local node_number
     local disk_image=${1}
     local storage_data_dir=${2}
     local loopback_disk_size=${3}
+    local key
 
-    # Create a loopback disk and format it to XFS.
-    if [[ -e ${disk_image} ]]; then
-        if egrep -q ${storage_data_dir} /proc/mounts; then
-            sudo umount ${storage_data_dir}/drives/sdb1
-            sudo rm -f ${disk_image}
-        fi
-    fi
+    key=$(echo $disk_image | sed 's#/.##')
+    key="devstack-$key"
 
-    sudo mkdir -p ${storage_data_dir}/drives/images
+    destroy_disk $disk_image $storage_data_dir
 
+    # Create an empty file of the correct size (and ensure the
+    # directory structure up to that path exists)
+    sudo mkdir -p $(dirname ${disk_image})
     sudo truncate -s ${loopback_disk_size} ${disk_image}
 
     # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
@@ -728,11 +752,31 @@
     # Swift and Ceph.
     sudo mkfs.xfs -f -i size=1024 ${disk_image}
 
-    # Mount the disk with mount options to make it as efficient as possible
-    if ! egrep -q ${storage_data_dir} /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
-            ${disk_image} ${storage_data_dir}
+    # Install a new loopback fstab entry for this disk image, and mount it
+    echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab
+    sudo mkdir -p $storage_data_dir
+    sudo mount -v $storage_data_dir
+}
+
+# Unmount, de-configure, and destroy a backing disk
+function destroy_disk {
+    local disk_image=$1
+    local storage_data_dir=$2
+    local key
+
+    key=$(echo $disk_image | sed 's#/.##')
+    key="devstack-$key"
+
+    # Unmount the target, if mounted
+    if egrep -q $storage_data_dir /proc/mounts; then
+        sudo umount $storage_data_dir
     fi
+
+    # Clear any fstab rules
+    sudo sed -i '/.*comment=$key.*/ d' /etc/fstab
+
+    # Delete the file
+    sudo rm -f $disk_image
 }
 
 
@@ -805,13 +849,11 @@
 #
 # Write out various useful state information to /etc/devstack-version
 function write_devstack_version {
-    cat - > /tmp/devstack-version <<EOF
+    cat - <<EOF | sudo tee /etc/devstack-version >/dev/null
 DevStack Version: ${DEVSTACK_SERIES}
 Change: $(git log --format="%H %s %ci" -1)
 OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME}
 EOF
-    sudo install -m 644 /tmp/devstack-version /etc/devstack-version
-    rm /tmp/devstack-version
 }
 
 # Restore xtrace
diff --git a/functions-common b/functions-common
index 030ff8c..11679e4 100644
--- a/functions-common
+++ b/functions-common
@@ -27,7 +27,6 @@
 # - ``RECLONE``
 # - ``REQUIREMENTS_DIR``
 # - ``STACK_USER``
-# - ``TRACK_DEPENDS``
 # - ``http_proxy``, ``https_proxy``, ``no_proxy``
 #
 
@@ -44,12 +43,11 @@
 declare -A -g GITBRANCH
 declare -A -g GITDIR
 
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 KILL_PATH="$(which kill)"
 
 # Save these variables to .stackenv
 STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
-    KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \
+    KEYSTONE_SERVICE_URI \
     LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
     HOST_IPV6 SERVICE_IP_VERSION"
 
@@ -92,7 +90,6 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username demo \
@@ -104,7 +101,6 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack-alt \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username alt_demo \
@@ -116,13 +112,28 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack-admin \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username admin \
         --os-password $ADMIN_PASSWORD \
         --os-project-name admin
 
+    # admin with a system-scoped token -> devstack-system
+    $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+        --file $CLOUDS_YAML \
+        --os-cloud devstack-system-admin \
+        --os-region-name $REGION_NAME \
+        $CA_CERT_ARG \
+        --os-auth-url $KEYSTONE_SERVICE_URI \
+        --os-username admin \
+        --os-password $ADMIN_PASSWORD \
+        --os-system-scope all
+
+    cat >> $CLOUDS_YAML <<EOF
+functional:
+  image_name: $DEFAULT_IMAGE_NAME
+EOF
+
     # CLean up any old clouds.yaml files we had laying around
     rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml
 }
@@ -228,9 +239,9 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
+    echo "$msg" 1>&2;
     if [[ -n ${LOGDIR} ]]; then
-        echo $msg >> "${LOGDIR}/error.log"
+        echo "$msg" >> "${LOGDIR}/error.log"
     fi
     $xtrace
     return $exitcode
@@ -283,7 +294,7 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg
+    echo "$msg"
     $xtrace
     return $exitcode
 }
@@ -323,9 +334,6 @@
         sudo zypper -n install lsb-release
     elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
         sudo dnf install -y redhat-lsb-core
-    elif [[ -x $(command -v yum 2>/dev/null) ]]; then
-        # all rh patforms (fedora, centos, rhel) have this pkg
-        sudo yum install -y redhat-lsb-core
     else
         die $LINENO "Unable to find or auto-install lsb_release"
     fi
@@ -371,9 +379,14 @@
     elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
         # For Fedora, just use 'f' and the release
         DISTRO="f$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
+    elif is_opensuse; then
         DISTRO="opensuse-$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+        # Tumbleweed uses "n/a" as a codename, and the release is a datestring
+        # like 20180218, so not very useful. Leap however uses a release
+        # with a "dot", so for example 15.0
+        [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \
+            DISTRO="opensuse-tumbleweed"
+    elif is_suse_linux_enterprise; then
         # just use major release
         DISTRO="sle${os_RELEASE%.*}"
     elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
@@ -384,10 +397,6 @@
         # Drop the . release as we assume it's compatible
         # XXX re-evaluate when we get RHEL10
         DISTRO="rhel${os_RELEASE::1}"
-    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
-        DISTRO="xs${os_RELEASE%.*}"
-    elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then
-        DISTRO="${os_VENDOR}${os_RELEASE::1}"
     else
         # We can't make a good choice here.  Setting a sensible DISTRO
         # is part of the problem, but not the major issue -- we really
@@ -440,8 +449,9 @@
 
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
         [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
-        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \
-        [ "$os_VENDOR" = "Virtuozzo" ] || [ "$os_VENDOR" = "kvmibm" ]
+        [ "$os_VENDOR" = "RedHatEnterprise" ] || \
+        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
+        [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
 }
 
 
@@ -449,11 +459,30 @@
 # (openSUSE, SLE).
 # is_suse
 function is_suse {
+    is_opensuse || is_suse_linux_enterprise
+}
+
+
+# Determine if current distribution is an openSUSE distribution
+# is_opensuse
+function is_opensuse {
     if [[ -z "$os_VENDOR" ]]; then
         GetOSVersion
     fi
 
-    [[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]]
+    [[ "$os_VENDOR" =~ (openSUSE) ]]
+}
+
+
+# Determine if current distribution is a SUSE Linux Enterprise (SLE)
+# distribution
+# is_suse_linux_enterprise
+function is_suse_linux_enterprise {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [[ "$os_VENDOR" =~ (^SUSE) ]]
 }
 
 
@@ -1008,6 +1037,11 @@
     return 1
 }
 
+function is_ironic_enforce_scope {
+    is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+    return 1
+}
+
 
 # Package Functions
 # =================
@@ -1189,10 +1223,16 @@
             if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/glance"
             fi
+            if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+                file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+            fi
         elif [[ $service == c-* ]]; then
             if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/cinder"
             fi
+            if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+                file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+            fi
         elif [[ $service == s-* ]]; then
             if [[ ! $file_to_parse =~ $package_dir/swift ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/swift"
@@ -1201,6 +1241,9 @@
             if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/nova"
             fi
+            if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+                file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+            fi
         elif [[ $service == g-* ]]; then
             if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
                 file_to_parse="${file_to_parse} ${package_dir}/glance"
@@ -1245,6 +1288,30 @@
     $xtrace
 }
 
+# Search plugins for a bindep.txt file
+#
+# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS``
+#
+# Note this is only valid after BINDEP_CMD is setup in stack.sh, and
+# is thus not really intended to be called externally.
+function _get_plugin_bindep_packages {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local bindep_file
+    local packages
+
+    for plugin in ${DEVSTACK_PLUGINS//,/ }; do
+        bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt
+        if [[ -f ${bindep_file} ]]; then
+            packages+=$($BINDEP_CMD -b --file ${bindep_file} || true)
+        fi
+    done
+    echo "${packages}"
+    $xtrace
+}
+
 # Distro-agnostic package installer
 # Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
 # install_package package [package ...]
@@ -1309,7 +1376,7 @@
     if is_ubuntu; then
         apt_get purge "$@"
     elif is_fedora; then
-        sudo ${YUM:-yum} remove -y "$@" ||:
+        sudo dnf remove -y "$@" ||:
     elif is_suse; then
         sudo zypper remove -y "$@" ||:
     else
@@ -1317,8 +1384,11 @@
     fi
 }
 
-# Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
+# Wrapper for ``dnf`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# The name is kept for backwards compatability with external
+# callers, despite none of our supported platforms using yum
+# any more.
 # yum_install package [package ...]
 function yum_install {
     local result parse_yum_result
@@ -1326,44 +1396,8 @@
     [[ "$OFFLINE" = "True" ]] && return
 
     time_start "yum_install"
-
-    # This is a bit tricky, because yum -y assumes missing or failed
-    # packages are OK (see [1]).  We want devstack to stop if we are
-    # installing missing packages.
-    #
-    # Thus we manually match on the output (stack.sh runs in a fixed
-    # locale, so lang shouldn't change).
-    #
-    # If yum returns !0, we echo the result as "YUM_FAILED" and return
-    # that from the awk (we're subverting -e with this trick).
-    # Otherwise we use awk to look for failure strings and return "2"
-    # to indicate a terminal failure.
-    #
-    # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
-    parse_yum_result='              \
-        BEGIN { result=0 }          \
-        /^YUM_FAILED/ { result=$2 } \
-        /^No package/ { result=2 }  \
-        /^Failed:/    { result=2 }  \
-        //{ print }                 \
-        END { exit result }'
-    (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
-        | awk "$parse_yum_result" && result=$? || result=$?
-
+    sudo_with_proxies dnf install -y "$@"
     time_stop "yum_install"
-
-    # if we return 1, then the wrapper functions will run an update
-    # and try installing the package again as a defense against bad
-    # mirrors.  This can hide failures, especially when we have
-    # packages that are in the "Failed:" section because their rpm
-    # install scripts failed to run correctly (in this case, the
-    # package looks installed, so when the retry happens we just think
-    # the package is OK, and incorrectly continue on).
-    if [ "$result" == 2 ]; then
-        die "Detected fatal package install failure"
-    fi
-
-    return "$result"
 }
 
 # zypper wrapper to set arguments correctly
@@ -1375,7 +1409,37 @@
     [[ "$(id -u)" = "0" ]] && sudo="env"
     $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
+        zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@"
+}
+
+# Run bindep and install packages it outputs
+#
+# Usage:
+#  install_bindep <path-to-bindep.txt> [profile,profile]
+#
+# Note unlike the bindep command itself, profile(s) specified should
+# be a single, comma-separated string, no spaces.
+function install_bindep {
+    local file=$1
+    local profiles=${2:-""}
+    local pkgs
+
+    if [[ ! -f $file ]]; then
+        warn $LINENO "Can not find bindep file: $file"
+        return
+    fi
+
+    # converting here makes it much easier to work with passing
+    # arguments
+    profiles=${profiles/,/ /}
+
+    # Note bindep returns 1 when packages need to be installed, so we
+    # have to ignore it's return for "-e"
+    pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true)
+
+    if [[ -n "${pkgs}" ]]; then
+        install_package ${pkgs}
+    fi
 }
 
 function write_user_unit_file {
@@ -1394,7 +1458,7 @@
     iniset -sudo $unitfile "Service" "User" "$user"
     iniset -sudo $unitfile "Service" "ExecStart" "$command"
     iniset -sudo $unitfile "Service" "KillMode" "process"
-    iniset -sudo $unitfile "Service" "TimeoutStopSec" "infinity"
+    iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
     iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
@@ -1438,24 +1502,24 @@
     # do some sanity checks on $cmd to see things we don't expect to work
 
     if [[ "$cmd" =~ "sudo" ]]; then
-        local msg=<<EOF
+        read -r -d '' msg << EOF || true  # read returns 1 for EOF, but it is ok here
 You are trying to use run_process with sudo, this is not going to work under systemd.
 
-If you need to run a service as a user other than $STACK_USER call it with:
+If you need to run a service as a user other than \$STACK_USER call it with:
 
    run_process \$name \$cmd \$group \$user
 EOF
-        die $LINENO $msg
+        die $LINENO "$msg"
     fi
 
     if [[ ! "$cmd" =~ ^/ ]]; then
-        local msg=<<EOF
+        read -r -d '' msg << EOF || true  # read returns 1 for EOF, but it is ok here
 The cmd="$cmd" does not start with an absolute path. It will fail to
 start under systemd.
 
 Please update your run_process stanza to have an absolute path.
 EOF
-        die $LINENO $msg
+        die $LINENO "$msg"
     fi
 
 }
@@ -1548,10 +1612,6 @@
 }
 
 
-function tail_log {
-    deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens"
-}
-
 # Plugin Functions
 # =================
 
@@ -1705,6 +1765,35 @@
     fi
 }
 
+# define_plugin <name>
+#
+# This function is a no-op.  It allows a plugin to define its name So
+# that other plugins may reference it by name.  It should generally be
+# the last component of the canonical git repo name.  E.g.,
+# openstack/devstack-foo should use "devstack-foo" as the name here.
+#
+# This function is currently a noop, but the value may still be used
+# by external tools (as in plugin_requires) and may be used by
+# devstack in the future.
+#
+# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
+function define_plugin {
+    :
+}
+
+# plugin_requires <name> <other>
+#
+# This function is a no-op.  It is currently used by external tools
+# (such as the devstack module for Ansible) to automatically generate
+# local.conf files.  It is not currently used by devstack itself to
+# resolve dependencies.
+#
+# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
+# ``other`` is the name of another plugin
+function plugin_requires {
+    :
+}
+
 
 # Service Functions
 # =================
@@ -1841,10 +1930,6 @@
 #   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
 #   **s-** services will be enabled. This will be deprecated in the future.
 #
-# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
-# We also need to make sure to treat **n-cell-region** and **n-cell-child**
-# as enabled in this case.
-#
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
 function is_service_enabled {
@@ -1867,7 +1952,6 @@
         # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
         #                are implemented
 
-        [[ ${service} == n-cell-* && ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && enabled=0
         [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0
         [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0
@@ -1968,11 +2052,7 @@
         return 0
     fi
 
-    if [[ $TRACK_DEPENDS = True ]]; then
-        sudo_cmd="env"
-    else
-        sudo_cmd="sudo"
-    fi
+    sudo_cmd="sudo"
 
     $xtrace
     $sudo_cmd $@
@@ -2022,6 +2102,11 @@
     fi
 }
 
+# Remove "[]" around urlquoted IPv6 addresses
+function ipv6_unquote {
+    echo $1 | tr -d []
+}
+
 # Gracefully cp only if source file/dir exists
 # cp_it source destination
 function cp_it {
@@ -2098,7 +2183,7 @@
 function python3_version {
     local python3_version
     python3_version=$(_get_python_version python3)
-    echo "python${python_version}"
+    echo "python${python3_version}"
 }
 
 
@@ -2242,6 +2327,10 @@
 }
 
 function oscwrap {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local out
     local rc
     local start
@@ -2256,6 +2345,7 @@
     echo $((end - start)) >> $OSCWRAP_TIMER_FILE
 
     echo "$out"
+    $xtrace
     return $rc
 }
 
@@ -2274,12 +2364,7 @@
 
 function cleanup_oscwrap {
     local total=0
-    if python3_enabled ; then
-        local python=python3
-    else
-        local python=python
-    fi
-    total=$(cat $OSCWRAP_TIMER_FILE | $python -c "import sys; print(sum(int(l) for l in sys.stdin))")
+    total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))")
     _TIME_TOTAL["osc"]=$total
     rm $OSCWRAP_TIMER_FILE
 }
@@ -2329,6 +2414,13 @@
     $xtrace
 }
 
+function clean_pyc_files {
+    # Clean up all *.pyc files
+    if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
+        sudo find $DEST -name "*.pyc" -delete
+    fi
+}
+
 # Restore xtrace
 $_XTRACE_FUNCTIONS_COMMON
 
diff --git a/inc/async b/inc/async
new file mode 100644
index 0000000..56338f5
--- /dev/null
+++ b/inc/async
@@ -0,0 +1,256 @@
+#!/bin/bash
+#
+# Symbolic asynchronous tasks for devstack
+#
+# Usage:
+#
+#  async_runfunc my_shell_func foo bar baz
+#
+#  ... do other stuff ...
+#
+#  async_wait my_shell_func
+#
+
+DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL)
+_ASYNC_BG_TIME=0
+
+# Keep track of how much total time was spent in background tasks
+# Takes a job runtime in ms.
+function _async_incr_bg_time {
+    local elapsed_ms="$1"
+    _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms))
+}
+
+# Get the PID of a named future to wait on
+function async_pidof {
+    local name="$1"
+    local inifile="${DEST}/async/${name}.ini"
+
+    if [ -f "$inifile" ]; then
+        iniget $inifile job pid
+    else
+        echo 'UNKNOWN'
+        return 1
+    fi
+}
+
+# Log a message about a job. If the message contains "%command" then the
+# full command line of the job will be substituted in the output
+function async_log {
+    local name="$1"
+    shift
+    local message="$*"
+    local inifile=${DEST}/async/${name}.ini
+    local pid
+    local command
+
+    pid=$(iniget $inifile job pid)
+    command=$(iniget $inifile job command | tr '#' '-')
+    message=$(echo "$message" | sed "s#%command#$command#g")
+
+    echo "[$BASHPID Async ${name}:${pid}]: $message"
+}
+
+# Inner function that actually runs the requested task. We wrap it like this
+# just so we can emit a finish message as soon as the work is done, to make
+# it easier to find the tracking just before an error.
+function async_inner {
+    local name="$1"
+    local rc
+    local fifo="${DEST}/async/${name}.fifo"
+    shift
+    set -o xtrace
+    if $* >${DEST}/async/${name}.log 2>&1; then
+        rc=0
+        set +o xtrace
+        async_log "$name" "finished successfully"
+    else
+        rc=$?
+        set +o xtrace
+        async_log "$name" "FAILED with rc $rc"
+    fi
+    iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N")
+    # Block on the fifo until we are signaled to exit by the main process
+    cat $fifo
+    return $rc
+}
+
+# Run something async. Takes a symbolic name and a list of arguments of
+# what to run. Ideally this would be rarely used and async_runfunc() would
+# be used everywhere for readability.
+#
+# This spawns the work in a background worker, records a "future" to be
+# collected by a later call to async_wait()
+function async_run {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local name="$1"
+    shift
+    local inifile=${DEST}/async/${name}.ini
+    local fifo=${DEST}/async/${name}.fifo
+
+    touch $inifile
+    iniset $inifile job command "$*"
+    iniset $inifile job start_time $(date +%s%3N)
+
+    if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then
+        mkfifo $fifo
+        async_inner $name $* &
+        iniset $inifile job pid $!
+        async_log "$name" "running: %command"
+        $xtrace
+    else
+        iniset $inifile job pid "self"
+        async_log "$name" "Running synchronously: %command"
+        $xtrace
+        $*
+        return $?
+    fi
+}
+
+# Shortcut for running a shell function async. Uses the function name as the
+# async name.
+function async_runfunc {
+    async_run $1 $*
+}
+
+# Dump some information to help debug a failed wait
+function async_wait_dump {
+    local failpid=$1
+
+    echo "=== Wait failure dump from $BASHPID ==="
+    echo "Processes:"
+    ps -f
+    echo "Waiting jobs:"
+    for name in $(ls ${DEST}/async/*.ini); do
+        echo "Job $name :"
+        cat "$name"
+    done
+    echo "Failed PID status:"
+    sudo cat /proc/$failpid/status
+    sudo cat /proc/$failpid/cmdline
+    echo "=== End wait failure dump ==="
+}
+
+# Wait for an async future to complete. May return immediately if already
+# complete, or of the future has already been waited on (avoid this). May
+# block until the future completes.
+function async_wait {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local pid rc running inifile runtime fifo
+    rc=0
+    for name in $*; do
+        running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l)
+        inifile="${DEST}/async/${name}.ini"
+        fifo="${DEST}/async/${name}.fifo"
+
+        if pid=$(async_pidof "$name"); then
+            async_log "$name" "Waiting for completion of %command" \
+                      "running on PID $pid ($running other jobs running)"
+            time_start async_wait
+            if [[ "$pid" != "self" ]]; then
+                # Signal the child to go ahead and exit since we are about to
+                # wait for it to collect its status.
+                async_log "$name" "Signaling child to exit"
+                echo WAKEUP > $fifo
+                async_log "$name" "Signaled"
+                # Do not actually call wait if we ran synchronously
+                if wait $pid; then
+                    rc=0
+                else
+                    rc=$?
+                fi
+                cat ${DEST}/async/${name}.log
+                rm -f $fifo
+            fi
+            time_stop async_wait
+            local start_time
+            local end_time
+            start_time=$(iniget $inifile job start_time)
+            end_time=$(iniget $inifile job end_time)
+            _async_incr_bg_time $(($end_time - $start_time))
+            runtime=$((($end_time - $start_time) / 1000))
+            async_log "$name" "finished %command with result" \
+                      "$rc in $runtime seconds"
+            rm -f $inifile
+            if [ $rc -ne 0 ]; then
+                async_wait_dump $pid
+                echo Stopping async wait due to error: $*
+                break
+            fi
+        else
+            # This could probably be removed - it is really just here
+            # to help notice if you wait for something by the wrong
+            # name, but it also shows up for things we didn't start
+            # because they were not enabled.
+            echo Not waiting for async task $name that we never started or \
+                 has already been waited for
+        fi
+    done
+
+    $xtrace
+    return $rc
+}
+
+# Check for uncollected futures and wait on them
+function async_cleanup {
+    local name
+
+    if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+        return 0
+    fi
+
+    for inifile in $(find ${DEST}/async -name '*.ini'); do
+        name=$(basename $pidfile .ini)
+        echo "WARNING: uncollected async future $name"
+        async_wait $name || true
+    done
+}
+
+# Make sure our async dir is created and clean
+function async_init {
+    local async_dir=${DEST}/async
+
+    # Clean any residue if present from previous runs
+    rm -Rf $async_dir
+
+    # Make sure we have a state directory
+    mkdir -p $async_dir
+}
+
+function async_print_timing {
+    local bg_time_minus_wait
+    local elapsed_time
+    local serial_time
+    local speedup
+
+    if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+        return 0
+    fi
+
+    # The logic here is: All the background task time would be
+    # serialized if we did not do them in the background. So we can
+    # add that to the elapsed time for the whole run. However, time we
+    # spend waiting for async things to finish adds to the elapsed
+    # time, but is time where we're not doing anything useful. Thus,
+    # we substract that from the would-be-serialized time.
+
+    bg_time_minus_wait=$((\
+            ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000))
+    elapsed_time=$(($(date "+%s") - $_TIME_BEGIN))
+    serial_time=$(($elapsed_time + $bg_time_minus_wait))
+
+    echo
+    echo "================="
+    echo " Async summary"
+    echo "================="
+    echo " Time spent in the background minus waits: $bg_time_minus_wait sec"
+    echo " Elapsed time: $elapsed_time sec"
+    echo " Time if we did everything serially: $serial_time sec"
+    echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}")
+}
diff --git a/inc/ini-config b/inc/ini-config
index 68d48d1..7993682 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -88,17 +88,22 @@
 }
 
 # Determinate is the given option present in the INI file
-# ini_has_option config-file section option
+# ini_has_option [-sudo] config-file section option
 function ini_has_option {
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
+    local sudo=""
+    if [ $1 == "-sudo" ]; then
+        sudo="sudo "
+        shift
+    fi
     local file=$1
     local section=$2
     local option=$3
     local line
 
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
     $xtrace
     [ -n "$line" ]
 }
@@ -173,8 +178,10 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
+    local sudo_option=""
     if [ $1 == "-sudo" ]; then
         sudo="sudo "
+        sudo_option="-sudo "
         shift
     fi
     local file=$1
@@ -187,11 +194,11 @@
         return
     fi
 
-    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+    if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then
         # Add section at the end
         echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
     fi
-    if ! ini_has_option "$file" "$section" "$option"; then
+    if ! ini_has_option $sudo_option "$file" "$section" "$option"; then
         # Add it
         $sudo sed -i -e "/^\[$section\]/ a\\
 $option = $value
@@ -200,7 +207,7 @@
         local sep
         sep=$(echo -ne "\x01")
         # Replace it
-        $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+        $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('"${option}"'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
     fi
     $xtrace
 }
@@ -228,7 +235,7 @@
         # the reverse order. Do a reverse here to keep the original order.
         values="$v ${values}"
     done
-    if ! grep -q "^\[$section\]" "$file"; then
+    if ! $sudo grep -q "^\[$section\]" "$file"; then
         # Add section at the end
         echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
     else
diff --git a/inc/python b/inc/python
index 9c810ec..9382d35 100644
--- a/inc/python
+++ b/inc/python
@@ -21,6 +21,14 @@
 # project.  A null value installs to the system Python directories.
 declare -A -g PROJECT_VENV
 
+# Utility Functions
+# =================
+
+# Joins bash array of extras with commas as expected by other functions
+function join_extras {
+    local IFS=","
+    echo "$*"
+}
 
 # Python Functions
 # ================
@@ -29,6 +37,10 @@
 # get_pip_command
 function get_pip_command {
     local version="$1"
+    if [ -z "$version" ]; then
+        die $LINENO "pip python version is not set."
+    fi
+
     # NOTE(dhellmann): I don't know if we actually get a pip3.4-python
     # under any circumstances.
     which pip${version} || which pip${version}-python
@@ -49,11 +61,9 @@
     fi
     $xtrace
 
-    if is_fedora || is_suse; then
-        echo "/usr/bin"
-    else
-        echo "/usr/local/bin"
-    fi
+    local PYTHON_PATH=/usr/local/bin
+    is_suse && PYTHON_PATH=/usr/bin
+    echo $PYTHON_PATH
 }
 
 # Wrapper for ``pip install`` that only installs versions of libraries
@@ -78,147 +88,40 @@
 function pip_install_gr_extras {
     local name=$1
     local extras=$2
-    local clean_name
-    clean_name=$(get_from_global_requirements $name)
-    pip_install $clean_name[$extras]
+    local version_constraints
+    version_constraints=$(get_version_constraints_from_global_requirements $name)
+    pip_install $name[$extras]$version_constraints
 }
 
-# Determine the python versions supported by a package
-function get_python_versions_for_package {
-    local name=$1
-    cd $name && python setup.py --classifiers \
-        | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' '
-}
-
-# Check for python3 classifier in local directory
-function check_python3_support_for_package_local {
-    local name=$1
-    cd $name
-    set +e
-    classifier=$(python setup.py --classifiers \
-        | grep 'Programming Language :: Python :: 3$')
-    set -e
-    echo $classifier
-}
-
-# Check for python3 classifier on pypi
-function check_python3_support_for_package_remote {
-    local name=$1
-    set +e
-    classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \
-        | grep '"Programming Language :: Python :: 3"')
-    set -e
-    echo $classifier
-}
-
-# python3_enabled_for() checks if the service(s) specified as arguments are
-# enabled by the user in ``ENABLED_PYTHON3_PACKAGES``.
+# enable_python3_package() -- no-op for backwards compatibility
 #
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
-# python3_enabled_for dir [dir ...]
-function python3_enabled_for {
-    local xtrace
-    xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-
-    local enabled=1
-    local dirs=$@
-    local dir
-    for dir in ${dirs}; do
-        [[ ,${ENABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
-    done
-
-    $xtrace
-    return $enabled
-}
-
-# python3_disabled_for() checks if the service(s) specified as arguments are
-# disabled by the user in ``DISABLED_PYTHON3_PACKAGES``.
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# Uses global ``DISABLED_PYTHON3_PACKAGES``
-# python3_disabled_for dir [dir ...]
-function python3_disabled_for {
-    local xtrace
-    xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-
-    local enabled=1
-    local dirs=$@
-    local dir
-    for dir in ${dirs}; do
-        [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
-    done
-
-    $xtrace
-    return $enabled
-}
-
-# enable_python3_package() adds the repositories passed as argument to the
-# ``ENABLED_PYTHON3_PACKAGES`` list, if they are not already present.
-#
-# For example:
-#   enable_python3_package nova
-#
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
 # enable_python3_package dir [dir ...]
 function enable_python3_package {
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
-    local tmpsvcs="${ENABLED_PYTHON3_PACKAGES}"
-    local python3
-    for dir in $@; do
-        if [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]]; then
-            warn $LINENO "Attempt to enable_python3_package ${dir} when it has been disabled"
-            continue
-        fi
-        if ! python3_enabled_for $dir; then
-            tmpsvcs+=",$dir"
-        fi
-    done
-    ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$tmpsvcs")
+    echo "It is no longer necessary to call enable_python3_package()."
 
     $xtrace
 }
 
-# disable_python3_package() prepares the services passed as argument to be
-# removed from the ``ENABLED_PYTHON3_PACKAGES`` list, if they are present.
+# disable_python3_package() -- no-op for backwards compatibility
 #
-# For example:
-#   disable_python3_package swift
-#
-# Uses globals ``ENABLED_PYTHON3_PACKAGES`` and ``DISABLED_PYTHON3_PACKAGES``
 # disable_python3_package dir [dir ...]
 function disable_python3_package {
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
-    local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}"
-    local enabled_svcs=",${ENABLED_PYTHON3_PACKAGES},"
-    local dir
-    for dir in $@; do
-        disabled_svcs+=",$dir"
-        if python3_enabled_for $dir; then
-            enabled_svcs=${enabled_svcs//,$dir,/,}
-        fi
-    done
-    DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs")
-    ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$enabled_svcs")
+    echo "It is no longer possible to call disable_python3_package()."
 
     $xtrace
 }
 
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``PIP_UPGRADE``, ``*_proxy``,
 # Usage:
 #  pip_install pip_arguments
 function pip_install {
@@ -262,92 +165,27 @@
     # this works (for now...)
     local package_dir=${!#%\[*\]}
 
-    if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
-        # TRACK_DEPENDS=True installation creates a circular dependency when
-        # we attempt to install virtualenv into a virtualenv, so we must global
-        # that installation.
-        source $DEST/.venv/bin/activate
-        local cmd_pip=$DEST/.venv/bin/pip
+    if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
+        local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
         local sudo_pip="env"
     else
-        if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
-            local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
-            local sudo_pip="env"
-        else
-            local cmd_pip
-            cmd_pip=$(get_pip_command $PYTHON2_VERSION)
-            local sudo_pip="sudo -H"
-            if python3_enabled; then
-                # Look at the package classifiers to find the python
-                # versions supported, and if we find the version of
-                # python3 we've been told to use, use that instead of the
-                # default pip
-                local python_versions
-
-                # Special case some services that have experimental
-                # support for python3 in progress, but don't claim support
-                # in their classifier
-                echo "Check python version for : $package_dir"
-                if python3_disabled_for ${package_dir##*/}; then
-                    echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
-                elif python3_enabled_for ${package_dir##*/}; then
-                    echo "Explicitly using $PYTHON3_VERSION version to install $package_dir based on ENABLED_PYTHON3_PACKAGES"
-                    sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                    cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                elif [[ -d "$package_dir" ]]; then
-                    python_versions=$(get_python_versions_for_package $package_dir)
-                    if [[ $python_versions =~ $PYTHON3_VERSION ]]; then
-                        echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on classifiers"
-                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                    else
-                        # The package may not have yet advertised python3.5
-                        # support so check for just python3 classifier and log
-                        # a warning.
-                        python3_classifier=$(check_python3_support_for_package_local $package_dir)
-                        if [[ ! -z "$python3_classifier" ]]; then
-                            echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings"
-                            sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                            cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                        fi
-                    fi
-                else
-                    # Check pypi as we don't have the package on disk
-                    package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*')
-                    python3_classifier=$(check_python3_support_for_package_remote $package)
-                    if [[ ! -z "$python3_classifier" ]]; then
-                        echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings"
-                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                    fi
-                fi
-            fi
-        fi
+        local cmd_pip="python$PYTHON3_VERSION -m pip"
+        # See
+        #  https://github.com/pypa/setuptools/issues/2232
+        #  http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html
+        # this makes setuptools >=50 use the platform distutils.
+        # We only want to do this on global pip installs, not if
+        # installing in a virtualenv
+        local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib "
+        echo "Using python $PYTHON3_VERSION to install $package_dir"
     fi
 
     cmd_pip="$cmd_pip install"
     # Always apply constraints
     cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
 
-    # FIXME(dhellmann): Need to force multiple versions of pip for
-    # packages like setuptools?
-    local pip_version
-    pip_version=$(python -c "import pip; \
-                        print(pip.__version__.strip('.')[0])")
-    if (( pip_version<6 )); then
-        die $LINENO "Currently installed pip version ${pip_version} does not" \
-            "meet minimum requirements (>=6)."
-    fi
-
     $xtrace
 
-    # Also install test requirements
-    local install_test_reqs=""
-    local test_req="${package_dir}/test-requirements.txt"
-    if [[ -e "$test_req" ]]; then
-        install_test_reqs="-r $test_req"
-    fi
-
     # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
     # the same behaviour of setuptools before version 25.0.0.
     # related issue: https://github.com/pypa/pip/issues/3874
@@ -357,7 +195,7 @@
         no_proxy="${no_proxy:-}" \
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
         SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
-        $cmd_pip $upgrade $install_test_reqs \
+        $cmd_pip $upgrade \
         $@
     result=$?
 
@@ -374,9 +212,8 @@
         local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
         local sudo_pip="env"
     else
-        local cmd_pip
-        cmd_pip=$(get_pip_command $PYTHON2_VERSION)
-        local sudo_pip="sudo -H"
+        local cmd_pip="python$PYTHON3_VERSION -m pip"
+        local sudo_pip="sudo -H LC_ALL=en_US.UTF-8"
     fi
     # don't error if we can't uninstall, it might not be there
     $sudo_pip $cmd_pip uninstall -y $name || /bin/true
@@ -394,6 +231,19 @@
     echo $required_pkg
 }
 
+# get only version constraints of a package from global requirements file
+# get_version_constraints_from_global_requirements <package>
+function get_version_constraints_from_global_requirements {
+    local package=$1
+    local required_pkg_version_constraint
+    # drop the package name from output (\K)
+    required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+    if [[ $required_pkg_version_constraint == ""  ]]; then
+        die $LINENO "Can't find package $package in requirements"
+    fi
+    echo $required_pkg_version_constraint
+}
+
 # should we use this library from their git repo, or should we let it
 # get pulled in via pip dependencies.
 function use_library_from_git {
@@ -406,6 +256,9 @@
 # determine if a package was installed from git
 function lib_installed_from_git {
     local name=$1
+    local safe_name
+    safe_name=$(python -c "from pkg_resources import safe_name; \
+        print(safe_name('${name}'))")
     # Note "pip freeze" doesn't always work here, because it tries to
     # be smart about finding the remote of the git repo the package
     # was installed from.  This doesn't work with zuul which clones
@@ -414,28 +267,12 @@
     # The best option seems to be to use "pip list" which will tell
     # you the path an editable install was installed from; for example
     # in response to something like
-    #  pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate'
-    # pip list shows
-    #  bashate (0.5.2.dev19, /tmp/env/src/bashate)
-    # Thus we look for "path after a comma" to indicate we were
-    # installed from some local place
-    pip list 2>/dev/null | grep -- "$name" | grep -q -- ', .*)$'
-}
-
-# check that everything that's in LIBS_FROM_GIT was actually installed
-# correctly, this helps double check issues with library fat fingering.
-function check_libs_from_git {
-    local lib=""
-    local not_installed=""
-    for lib in $(echo ${LIBS_FROM_GIT} | tr "," " "); do
-        if ! lib_installed_from_git "$lib"; then
-            not_installed+=" $lib"
-        fi
-    done
-    # if anything is not installed, say what it is.
-    if [[ -n "$not_installed" ]]; then
-        die $LINENO "The following LIBS_FROM_GIT were not installed correct: $not_installed"
-    fi
+    #  pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate'
+    # pip list --format columns shows
+    #  bashate 0.5.2.dev19 /tmp/env/src/bashate
+    # Thus we check the third column to see if we're installed from
+    # some local place.
+    [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]]
 }
 
 # setup a library by name. If we are trying to use the library from
@@ -454,20 +291,18 @@
 # another project.
 #
 # use this for non namespaced libraries
+#
+# setup_dev_lib [-bindep] <name> [<extras>]
 function setup_dev_lib {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local name=$1
     local dir=${GITDIR[$name]}
-    if python3_enabled; then
-        # Turn off Python 3 mode and install the package again,
-        # forcing a Python 2 installation. This ensures that all libs
-        # being used for development are installed under both versions
-        # of Python.
-        echo "Installing $name again without Python 3 enabled"
-        USE_PYTHON3=False
-        setup_develop $dir
-        USE_PYTHON3=True
-    fi
-    setup_develop $dir
+    local extras=$2
+    setup_develop $bindep $dir $extras
 }
 
 # this should be used if you want to install globally, all libraries should
@@ -478,11 +313,17 @@
 # extras: comma-separated list of optional dependencies to install
 #         (e.g., ldap,memcache).
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
+# bindep: Set "-bindep" as first argument to install bindep.txt packages
 # The command is like "pip install <project_dir>[<extras>]"
 function setup_install {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local extras=$2
-    _setup_package_with_constraints_edit $project_dir "" $extras
+    _setup_package_with_constraints_edit $bindep $project_dir "" $extras
 }
 
 # this should be used for projects which run services, like all services
@@ -494,20 +335,14 @@
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 # The command is like "pip install -e <project_dir>[<extras>]"
 function setup_develop {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local extras=$2
-    _setup_package_with_constraints_edit $project_dir -e $extras
-}
-
-# determine if a project as specified by directory is in
-# projects.txt. This will not be an exact match because we throw away
-# the namespacing when we clone, but it should be good enough in all
-# practical ways.
-function is_in_projects_txt {
-    local project_dir=$1
-    local project_name
-    project_name=$(basename $project_dir)
-    grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
+    _setup_package_with_constraints_edit $bindep $project_dir -e $extras
 }
 
 # ``pip install -e`` the package, which processes the dependencies
@@ -526,6 +361,11 @@
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 # The command is like "pip install <flags> <project_dir>[<extras>]"
 function _setup_package_with_constraints_edit {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local flags=$2
     local extras=$3
@@ -538,30 +378,51 @@
     project_dir=$(cd $project_dir && pwd)
 
     if [ -n "$REQUIREMENTS_DIR" ]; then
-        # Constrain this package to this project directory from here on out.
+        # Remove this package from constraints before we install it.
+        # That way, later installs won't "downgrade" the install from
+        # source we are about to do.
         local name
         name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
         $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
-            $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
-            "$flags file://$project_dir#egg=$name"
+            $REQUIREMENTS_DIR/upper-constraints.txt -- $name
     fi
 
-    setup_package $project_dir "$flags" $extras
+    setup_package $bindep $project_dir "$flags" $extras
 
+    # If this project is in LIBS_FROM_GIT, verify it was actually installed
+    # correctly.  This helps catch errors caused by constraints mismatches.
+    if use_library_from_git "$project_dir"; then
+        if ! lib_installed_from_git "$project_dir"; then
+            die $LINENO "The following LIBS_FROM_GIT was not installed correctly: $project_dir"
+        fi
+    fi
 }
 
 # ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
+# using pip before running `setup.py develop`.  The command is like
+# "pip install <flags> <project_dir>[<extras>]"
 #
 # Uses globals ``STACK_USER``
-# setup_package project_dir [flags] [extras]
-# project_dir: directory of project repo (e.g., /opt/stack/keystone)
-# flags: pip CLI options/flags
-# extras: comma-separated list of optional dependencies to install
-#         (e.g., ldap,memcache).
-#         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
-# The command is like "pip install <flags> <project_dir>[<extras>]"
+#
+# Usage:
+#  setup_package [-bindep[=profile,profile]] <project_dir> <flags> [extras]
+#
+# -bindep     : Use bindep to install dependencies; select extra profiles
+#               as comma separated arguments after "="
+# project_dir : directory of project repo (e.g., /opt/stack/keystone)
+# flags       : pip CLI options/flags
+# extras      : comma-separated list of optional dependencies to install
+#               (e.g., ldap,memcache).
+#               See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 function setup_package {
+    local bindep=0
+    local bindep_flag=""
+    local bindep_profiles=""
+    if [[ $1 == -bindep* ]]; then
+        bindep=1
+        IFS="=" read bindep_flag bindep_profiles <<< ${1}
+        shift
+    fi
     local project_dir=$1
     local flags=$2
     local extras=$3
@@ -577,6 +438,11 @@
         extras="[$extras]"
     fi
 
+    # install any bindep packages
+    if [[ $bindep == 1 ]]; then
+        install_bindep $project_dir/bindep.txt $bindep_profiles
+    fi
+
     pip_install $flags "$project_dir$extras"
     # ensure that further actions can do things like setup.py sdist
     if [[ "$flags" == "-e" ]]; then
@@ -585,12 +451,15 @@
 }
 
 # Report whether python 3 should be used
+# TODO(frickler): drop this once all legacy uses are removed
 function python3_enabled {
-    if [[ $USE_PYTHON3 == "True" ]]; then
-        return 0
-    else
-        return 1
-    fi
+    return 0
+}
+
+# Provide requested python version and sets PYTHON variable
+function install_python {
+    install_python3
+    export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null)
 }
 
 # Install python3 packages
@@ -599,6 +468,12 @@
         apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev
     elif is_suse; then
         install_package python3-devel python3-dbm
+    elif is_fedora; then
+        if [ "$os_VENDOR" = "Fedora" ]; then
+            install_package python${PYTHON3_VERSION//.}
+        else
+            install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel
+        fi
     fi
 }
 
diff --git a/lib/apache b/lib/apache
index 5dc0e98..4bea07d 100644
--- a/lib/apache
+++ b/lib/apache
@@ -82,26 +82,48 @@
         apxs="apxs"
     fi
 
-    # Ubuntu xenial is back level on uwsgi so the proxy doesn't
-    # actually work. Hence we have to build from source for now.
+    # This varies based on packaged/installed.  If we've
+    # pip_installed, then the pip setup will only build a "python"
+    # module that will be either python2 or python3 depending on what
+    # it was built with.
     #
-    # Centos 7 actually has the module in epel, but there was a big
-    # push to disable epel by default. As such, compile from source
-    # there as well.
+    # For package installs, the distro ships both plugins and you need
+    # to select the right one ... it will not be autodetected.
+    UWSGI_PYTHON_PLUGIN=python3
 
-    local dir
-    dir=$(mktemp -d)
-    pushd $dir
-    pip_install uwsgi
-    pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
-    local uwsgi
-    uwsgi=$(ls uwsgi*)
-    tar xvf $uwsgi
-    cd uwsgi*/apache2
-    sudo $apxs -i -c mod_proxy_uwsgi.c
-    popd
-    # delete the temp directory
-    sudo rm -rf $dir
+    if is_ubuntu; then
+        local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
+        install_package ${pkg_list}
+    elif is_fedora; then
+        # Note httpd comes with mod_proxy_uwsgi and it is loaded by
+        # default; the mod_proxy_uwsgi package actually conflicts now.
+        # See:
+        #  https://bugzilla.redhat.com/show_bug.cgi?id=1574335
+        #
+        # Thus there is nothing else to do after this install
+        install_package uwsgi \
+                        uwsgi-plugin-python3
+    elif [[ $os_VENDOR =~ openSUSE ]]; then
+        install_package uwsgi \
+                        uwsgi-python3 \
+                        apache2-mod_uwsgi
+    else
+        # Compile uwsgi from source.
+        local dir
+        dir=$(mktemp -d)
+        pushd $dir
+        pip_install uwsgi
+        pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
+        local uwsgi
+        uwsgi=$(ls uwsgi*)
+        tar xvf $uwsgi
+        cd uwsgi*/apache2
+        sudo $apxs -i -c mod_proxy_uwsgi.c
+        popd
+        # delete the temp directory
+        sudo rm -rf $dir
+        UWSGI_PYTHON_PLUGIN=python
+    fi
 
     if is_ubuntu || is_suse ; then
         # we've got to enable proxy and proxy_uwsgi for this to work
@@ -121,20 +143,17 @@
     if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
         install_package apache2
-        if python3_enabled; then
-            if is_package_installed libapache2-mod-wsgi; then
-                uninstall_package libapache2-mod-wsgi
-            fi
-            install_package libapache2-mod-wsgi-py3
-        else
-            install_package libapache2-mod-wsgi
+        if is_package_installed libapache2-mod-wsgi; then
+            uninstall_package libapache2-mod-wsgi
         fi
+        install_package libapache2-mod-wsgi-py3
     elif is_fedora; then
         sudo rm -f /etc/httpd/conf.d/000-*
-        install_package httpd mod_wsgi
+        install_package httpd python3-mod_wsgi
         # For consistency with Ubuntu, switch to the worker mpm, as
-        # the default is prefork
+        # the default is event
         sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
+        sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
         sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf
     elif is_suse; then
         install_package apache2 apache2-mod_wsgi
@@ -264,7 +283,7 @@
     # configured after graceful shutdown
     iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
     iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins python
+    iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
     # uwsgi recommends this to prevent thundering herd on accept.
     iniset "$file" uwsgi thunder-lock true
     # Set hook to trigger graceful shutdown on SIGTERM
@@ -282,10 +301,9 @@
     else
         local apache_conf=""
         apache_conf=$(apache_site_config_for $name)
-        echo "SetEnv proxy-sendcl 1" | sudo tee $apache_conf
         iniset "$file" uwsgi socket "$socket"
         iniset "$file" uwsgi chmod-socket 666
-        echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf
+        echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf
         enable_apache_site $name
         restart_apache_server
     fi
@@ -318,7 +336,7 @@
     iniset "$file" uwsgi die-on-term true
     iniset "$file" uwsgi exit-on-reload false
     iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins python
+    iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
     # uwsgi recommends this to prevent thundering herd on accept.
     iniset "$file" uwsgi thunder-lock true
     # Set hook to trigger graceful shutdown on SIGTERM
@@ -345,11 +363,31 @@
     local apache_conf=""
     apache_conf=$(apache_site_config_for $name)
     echo "KeepAlive Off" | sudo tee $apache_conf
+    echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
     echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf
     enable_apache_site $name
     restart_apache_server
 }
 
+# Write a straight-through proxy for a service that runs locally and just needs
+# to be reachable via the main http proxy at $loc
+function write_local_proxy_http_config {
+    local name=$1
+    local url=$2
+    local loc=$3
+    local apache_conf
+    apache_conf=$(apache_site_config_for $name)
+
+    enable_apache_mod proxy
+    enable_apache_mod proxy_http
+
+    echo "KeepAlive Off" | sudo tee $apache_conf
+    echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
+    echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf
+    enable_apache_site $name
+    restart_apache_server
+}
+
 function remove_uwsgi_config {
     local file=$1
     local wsgi=$2
diff --git a/lib/cinder b/lib/cinder
index 07f82a1..cefb609 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -31,6 +31,7 @@
 CINDER_DRIVER=${CINDER_DRIVER:-default}
 CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins
 CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends
+CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups
 
 # grab plugin config if specified via cinder_driver
 if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
@@ -51,7 +52,6 @@
 fi
 
 CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
-CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder}
 
 CINDER_CONF_DIR=/etc/cinder
 CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
@@ -67,7 +67,7 @@
 CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
 CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to auto, which will do thin provisioning if it's a fresh
@@ -88,25 +88,42 @@
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
-# Cinder reports allocations back to the scheduler on periodic intervals
-# it turns out we can get an "out of space" issue when we run tests too
-# quickly just because cinder didn't realize we'd freed up resources.
-# Make this configurable so that devstack-gate/tempest can set it to
-# less than the 60 second default
-# https://bugs.launchpad.net/cinder/+bug/1180976
-CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
+# Default to lioadm
+CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
 
-# Centos7 switched to using LIO and that's all that's supported,
-# although the tgt bits are in EPEL we don't want that for CI
-if is_fedora; then
-    CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+# EL and SUSE should only use lioadm
+if is_fedora || is_suse; then
     if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
-        die "lioadm is the only valid Cinder iscsi_helper config on this platform"
+        die "lioadm is the only valid Cinder target_helper config on this platform"
     fi
-else
-    CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
 fi
 
+# When Cinder is used as a backend for Glance, it can be configured to clone
+# the volume containing image data directly in the backend instead of
+# transferring data from volume to volume.  Value is a comma separated list of
+# schemes (currently only 'file' and 'cinder' are supported).  The default
+# configuration in Cinder is empty (that is, do not use this feature).  NOTE:
+# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or
+# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf.
+CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-}
+if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
+    if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \
+            && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then
+        warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \
+GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True"
+    fi
+fi
+
+# For backward compatibility
+# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured
+# along with ceph backend driver.
+if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then
+    CINDER_BACKUP_DRIVER=ceph
+fi
+
+# Supported backup drivers are in lib/cinder_backups
+CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift}
+
 # Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi
 # reference should be cleaned up to more accurately refer to uwsgi.
 CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True}
@@ -122,6 +139,15 @@
     done
 fi
 
+# Source the backup driver
+if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+    if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then
+        source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER
+    else
+        die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported"
+    fi
+fi
+
 # Environment variables to configure the image-volume cache
 CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
 
@@ -134,6 +160,12 @@
 # enable the cache for all cinder backends.
 CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
 
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the  Volume API policies to start checking the scope of token. by default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE)
+
 # Functions
 # ---------
 
@@ -198,6 +230,12 @@
         done
     fi
 
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            cleanup_cinder_backup_$CINDER_BACKUP_DRIVER
+        fi
+    fi
+
     stop_process "c-api"
     remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
 }
@@ -206,8 +244,6 @@
 function configure_cinder {
     sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR
 
-    cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
-
     rm -f $CINDER_CONF
 
     configure_rootwrap cinder
@@ -227,12 +263,11 @@
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
     inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir
 
-    configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
+    configure_keystone_authtoken_middleware $CINDER_CONF cinder
 
-    iniset $CINDER_CONF DEFAULT auth_strategy keystone
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
-    iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER"
     iniset $CINDER_CONF database connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
@@ -240,10 +275,26 @@
     iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
-    iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
-    iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
-
+    if [[ $SERVICE_IP_VERSION == 6 ]]; then
+        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6"
+    else
+        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
+    fi
     iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
+    iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
+    if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
+        iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES
+    fi
+
+    # set default quotas
+    iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10}
+    iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10}
+    iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10}
+
+    # Avoid RPC timeouts in slow CI and test environments by doubling the
+    # default response timeout set by RPC clients. See bug #1873234 for more
+    # details and example failures.
+    iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
@@ -259,9 +310,6 @@
                 default_name=$be_name
             fi
             enabled_backends+=$be_name,
-
-            iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR
-
         done
         iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
         if [[ -n "$default_name" ]]; then
@@ -270,8 +318,12 @@
         configure_cinder_image_volume_cache
     fi
 
-    if is_service_enabled swift; then
-        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            configure_cinder_backup_$CINDER_BACKUP_DRIVER
+        else
+            die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER"
+        fi
     fi
 
     if is_service_enabled ceilometer; then
@@ -315,19 +367,20 @@
         iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE
     fi
 
-    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
-        iniset $CINDER_CONF DEFAULT glance_api_version 2
-    fi
-
     # Set nova credentials (used for os-assisted-snapshots)
-    configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova
+    configure_keystone_authtoken_middleware $CINDER_CONF nova nova
     iniset $CINDER_CONF nova region_name "$REGION_NAME"
     iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
     if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
         iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL"
     elif is_service_enabled etcd3; then
-        iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:2379"
+        iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
+    fi
+
+    if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+        iniset $CINDER_CONF oslo_policy enforce_scope true
+        iniset $CINDER_CONF oslo_policy enforce_new_defaults true
     fi
 }
 
@@ -344,18 +397,13 @@
 
         create_service_user "cinder"
 
-        get_or_create_service "cinder" "volume" "Cinder Volume Service"
+        # block-storage is the official service type
+        get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
         if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
             get_or_create_endpoint \
-                "volume" \
+                "block-storage" \
                 "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s"
-
-            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
-            get_or_create_endpoint \
-                "volumev2" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s"
+                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
 
             get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
             get_or_create_endpoint \
@@ -364,15 +412,9 @@
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
         else
             get_or_create_endpoint \
-                "volume" \
+                "block-storage" \
                 "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s"
-
-            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
-            get_or_create_endpoint \
-                "volumev2" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s"
+                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
 
             get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
             get_or_create_endpoint \
@@ -385,13 +427,6 @@
     fi
 }
 
-# create_cinder_cache_dir() - Part of the init_cinder() process
-function create_cinder_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR
-    rm -f $CINDER_AUTH_CACHE_DIR/*
-}
-
 # init_cinder() - Initialize database and volume group
 function init_cinder {
     if is_service_enabled $DATABASE_BACKENDS; then
@@ -419,8 +454,13 @@
         done
     fi
 
+    if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+        if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+            init_cinder_backup_$CINDER_BACKUP_DRIVER
+        fi
+    fi
+
     mkdir -p $CINDER_STATE_PATH/volumes
-    create_cinder_cache_dir
 }
 
 # install_cinder() - Collect source and prepare
@@ -429,8 +469,15 @@
     setup_develop $CINDER_DIR
     if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
         install_package tgt
-    elif [[ "$CINDER_ISCI_HELPER" == "lioadm" ]]; then
-        install_package targetcli
+    elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
+        if is_ubuntu; then
+            # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
+            sudo mkdir -p /etc/target
+
+            install_package targetcli-fb
+        else
+            install_package targetcli
+        fi
     fi
 }
 
@@ -496,7 +543,7 @@
                 start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
             fi
         else
-            run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
+            run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
             cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
         fi
     fi
@@ -530,8 +577,26 @@
         local be be_name
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
             be_name=${be##*:}
-            openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name}
+            # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode
+            if is_service_enabled keystone; then
+                openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name}
+            else
+                # TODO (e0ne): use openstack client once it will support cinder in noauth mode:
+                # https://bugs.launchpad.net/python-cinderclient/+bug/1755279
+                local cinder_url
+                cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3
+                OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name}
+                OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name}
+            fi
         done
+
+        # Increase quota for the service project if glance is using cinder,
+        # since it's likely to occasionally go above the default 10 in parallel
+        # test execution.
+        if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+            openstack --os-region-name="$REGION_NAME" \
+                      quota set --volumes 50 "$SERVICE_PROJECT_NAME"
+        fi
     fi
 }
 
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 00a0bb3..0b46573 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -6,12 +6,6 @@
 # Enable with:
 #
 #   CINDER_ENABLED_BACKENDS+=,ceph:ceph
-#
-# Optional parameters:
-#   CINDER_BAK_CEPH_POOL=<pool-name>
-#   CINDER_BAK_CEPH_USER=<user>
-#   CINDER_BAK_CEPH_POOL_PG=<pg-num>
-#   CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
 
 # Dependencies:
 #
@@ -29,11 +23,6 @@
 # Defaults
 # --------
 
-CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
-CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
-CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
-CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
-
 
 # Entry Points
 # ------------
@@ -52,27 +41,6 @@
     iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
     iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
     iniset $CINDER_CONF DEFAULT glance_api_version 2
-
-    if is_service_enabled c-bak; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            # Configure Cinder backup service options, ceph pool, ceph user and ceph key
-            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
-            if [[ $CEPH_REPLICAS -ne 1 ]]; then
-                sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
-            fi
-        fi
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-
-        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
-        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
-        iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
-        iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
-        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
-        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
-        iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
-    fi
 }
 
 # Restore xtrace
diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi
new file mode 100644
index 0000000..94412e0
--- /dev/null
+++ b/lib/cinder_backends/ceph_iscsi
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# lib/cinder_backends/ceph_iscsi
+# Configure the ceph_iscsi backend
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi
+#
+# Optional paramteters:
+#   CEPH_ISCSI_API_URL=<url to the rbd-target-api service>
+#
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_ceph_backend_ceph_iscsi - called from configure_cinder()
+
+
+# Save trace setting
+_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace)
+set +o xtrace
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc
+# configure_cinder_backend_ceph_iscsi $name
+function configure_cinder_backend_ceph_iscsi {
+    local be_name=$1
+
+    CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT}
+
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE"
+    iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
+    iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
+    iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER"
+    iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD"
+    iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL"
+    iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN"
+    iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
+    iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
+    iniset $CINDER_CONF DEFAULT glance_api_version 2
+
+    pip_install rbd-iscsi-client
+}
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH_ISCSI
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate
index 6b1f848..3ffd9a6 100644
--- a/lib/cinder_backends/fake_gate
+++ b/lib/cinder_backends/fake_gate
@@ -50,7 +50,7 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
 
     if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 03e1880..e03ef14 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -50,9 +50,9 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
-
+    iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
 }
 
 # init_cinder_backend_lvm - Initialize volume group
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
new file mode 100644
index 0000000..e4003c0
--- /dev/null
+++ b/lib/cinder_backups/ceph
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# lib/cinder_backups/ceph
+# Configure the ceph backup driver
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=ceph
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_CEPH=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+function configure_cinder_backup_ceph {
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+    if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+    sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
+    iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+    iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+    iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+    iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+    iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+}
+
+# init_cinder_backup_ceph: nothing to do
+# cleanup_cinder_backup_ceph: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift
new file mode 100644
index 0000000..6fb2486
--- /dev/null
+++ b/lib/cinder_backups/s3_swift
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# lib/cinder_backups/s3_swift
+# Configure the s3 backup driver with swift s3api
+#
+# TODO: create lib/cinder_backup/s3 for external s3 compatible storage
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=s3_swift
+#   enable_service s3api s-proxy s-object s-container s-account
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_cinder_backup_s3_swift {
+    # This configuration requires swift and s3api. If we're
+    # on a subnode we might not know if they are enabled
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT"
+}
+
+function init_cinder_backup_s3_swift {
+    openstack ec2 credential create
+    iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)"
+    iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)"
+    if is_service_enabled tls-proxy; then
+        iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE"
+    fi
+}
+
+# cleanup_cinder_backup_s3_swift: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_S3_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
new file mode 100644
index 0000000..d7c977e
--- /dev/null
+++ b/lib/cinder_backups/swift
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# lib/cinder_backups/swift
+# Configure the swift backup driver
+
+# Enable with:
+#
+#   CINDER_BACKUP_DRIVER=swift
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+
+function configure_cinder_backup_swift {
+    # NOTE(mriedem): The default backup driver uses swift and if we're
+    # on a subnode we might not know if swift is enabled, but chances are
+    # good that it is on the controller so configure the backup service
+    # to use it.
+    iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
+    iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+}
+
+# init_cinder_backup_swift: nothing to do
+# cleanup_cinder_backup_swift: nothing to do
+
+
+# Restore xtrace
+$_XTRACE_CINDER_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS
deleted file mode 100644
index 92135e7..0000000
--- a/lib/cinder_plugins/XenAPINFS
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/XenAPINFS
-# Configure the XenAPINFS driver
-
-# Enable with:
-#
-#   CINDER_DRIVER=XenAPINFS
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
-    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
-    iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
-    iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
-    iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_XENAPINFS
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog
deleted file mode 100644
index 558de46..0000000
--- a/lib/cinder_plugins/sheepdog
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/sheepdog
-# Configure the sheepdog driver
-
-# Enable with:
-#
-#   CINDER_DRIVER=sheepdog
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
-    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_SHEEPDOG
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/database b/lib/database
index 0d72052..7940cf2 100644
--- a/lib/database
+++ b/lib/database
@@ -87,8 +87,6 @@
 
     if [ -n "$MYSQL_PASSWORD" ]; then
         DATABASE_PASSWORD=$MYSQL_PASSWORD
-    else
-        read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
     fi
 
     # We configure Nova, Horizon, Glance and Keystone to use MySQL as their
diff --git a/lib/databases/mysql b/lib/databases/mysql
index a0cf7a4..d0fa119 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -15,10 +15,19 @@
 
 register_database mysql
 
-# Linux distros, thank you for being incredibly consistent
-MYSQL=mysql
-if is_fedora && ! is_oraclelinux; then
-    MYSQL=mariadb
+if [[ -z "$MYSQL_SERVICE_NAME" ]]; then
+    MYSQL_SERVICE_NAME=mysql
+    if is_fedora && ! is_oraclelinux; then
+        MYSQL_SERVICE_NAME=mariadb
+    elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
+        # Older mariadb packages on SLES 12 provided mysql.service.  The
+        # newer ones on SLES 12 and 15 use mariadb.service; they also
+        # provide a mysql.service symlink for backwards-compatibility, but
+        # let's not rely on that.
+        MYSQL_SERVICE_NAME=mariadb
+    elif [[ "$DISTRO" == "bullseye" ]]; then
+        MYSQL_SERVICE_NAME=mariadb
+    fi
 fi
 
 # Functions
@@ -34,17 +43,17 @@
 
 # Get rid of everything enough to cleanly change database backends
 function cleanup_database_mysql {
-    stop_service $MYSQL
+    stop_service $MYSQL_SERVICE_NAME
     if is_ubuntu; then
         # Get ruthless with mysql
         apt_get purge -y mysql* mariadb*
         sudo rm -rf /var/lib/mysql
         sudo rm -rf /etc/mysql
         return
-    elif is_suse || is_oraclelinux; then
+    elif is_oraclelinux; then
         uninstall_package mysql-community-server
         sudo rm -rf /var/lib/mysql
-    elif is_fedora; then
+    elif is_suse || is_fedora; then
         uninstall_package mariadb-server
         sudo rm -rf /var/lib/mysql
     else
@@ -64,12 +73,9 @@
 
     if is_ubuntu; then
         my_conf=/etc/mysql/my.cnf
-        mysql=mysql
     elif is_suse || is_oraclelinux; then
         my_conf=/etc/my.cnf
-        mysql=mysql
     elif is_fedora; then
-        mysql=mariadb
         my_conf=/etc/my.cnf
         local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf
         if [ -f "$cracklib_conf" ]; then
@@ -82,7 +88,7 @@
     # Start mysql-server
     if is_fedora || is_suse; then
         # service is not started by default
-        start_service $mysql
+        start_service $MYSQL_SERVICE_NAME
     fi
 
     # Set the root password - only works the first time. For Ubuntu, we already
@@ -90,19 +96,34 @@
     # because the package might have been installed already.
     sudo mysqladmin -u root password $DATABASE_PASSWORD || true
 
+    # In case of Mariadb, giving hostname in arguments causes permission
+    # problems as it expects connection through socket
+    if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+        local cmd_args="-uroot -p$DATABASE_PASSWORD "
+    else
+        local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 "
+    fi
+
+    # In mariadb e.g. on Ubuntu socket plugin is used for authentication
+    # as root so it works only as sudo. To restore old "mysql like" behaviour,
+    # we need to change auth plugin for root user
+    if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+        sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+        sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+    fi
+    # Create DB user if it does not already exist
+    sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
     # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
-    sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+    sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
     # Change bind-address from localhost (127.0.0.1) to any (::) and
     # set default db type to InnoDB
-    iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS"
+    iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
     iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
     iniset -sudo $my_conf mysqld default-storage-engine InnoDB
     iniset -sudo $my_conf mysqld max_connections 1024
-    iniset -sudo $my_conf mysqld query_cache_type OFF
-    iniset -sudo $my_conf mysqld query_cache_size 0
 
     if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
         echo_summary "Enabling MySQL query logging"
@@ -124,7 +145,7 @@
         iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
     fi
 
-    restart_service $mysql
+    restart_service $MYSQL_SERVICE_NAME
 }
 
 function install_database_mysql {
@@ -146,20 +167,24 @@
 [client]
 user=$DATABASE_USER
 password=$DATABASE_PASSWORD
-host=$MYSQL_HOST
 EOF
+
+        if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then
+            echo "host=$MYSQL_HOST" >> $HOME/.my.cnf
+        fi
         chmod 0600 $HOME/.my.cnf
     fi
     # Install mysql-server
-    if is_suse || is_oraclelinux; then
-        if ! is_package_installed mariadb; then
-            install_package mysql-community-server
-        fi
+    if is_oraclelinux; then
+        install_package mysql-community-server
     elif is_fedora; then
+        install_package mariadb-server mariadb-devel
+        sudo systemctl enable $MYSQL_SERVICE_NAME
+    elif is_suse; then
         install_package mariadb-server
-        sudo systemctl enable mariadb
+        sudo systemctl enable $MYSQL_SERVICE_NAME
     elif is_ubuntu; then
-        install_package mysql-server
+        install_package $MYSQL_SERVICE_NAME-server
     else
         exit_distro_not_supported "mysql installation"
     fi
diff --git a/lib/dstat b/lib/dstat
index fe38d75..eb03ae0 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -9,6 +9,7 @@
 
 # ``stack.sh`` calls the entry points in this order:
 #
+# - install_dstat
 # - start_dstat
 # - stop_dstat
 
@@ -16,6 +17,14 @@
 _XTRACE_DSTAT=$(set +o | grep xtrace)
 set +o xtrace
 
+# install_dstat() - Install prerequisites for dstat services
+function install_dstat {
+    if is_service_enabled memory_tracker; then
+        # Install python libraries required by tools/mlock_report.py
+        pip_install_gr psutil
+    fi
+}
+
 # start_dstat() - Start running processes
 function start_dstat {
     # A better kind of sysstat, with the top process per time slice
@@ -26,10 +35,10 @@
     # to your localrc
     run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root"
 
-    # remove support for the old name when it's no longer used (sometime in Queens)
+    # TODO(jh): Fail when using the old service name otherwise consumers might
+    # never notice that is has been removed.
     if is_service_enabled peakmem_tracker; then
-        deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead"
-        run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root"
+        die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead"
     fi
 }
 
diff --git a/lib/etcd3 b/lib/etcd3
index 51df8e4..4f3a7a4 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -27,7 +27,10 @@
 ETCD_DATA_DIR="$DATA_DIR/etcd"
 ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
 ETCD_BIN_DIR="$DEST/bin"
-ETCD_PORT=2379
+# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run
+# etcd-heavy services in the gate VM's, e.g. Kubernetes.
+ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK)
+ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512}
 
 if is_ubuntu ; then
     UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1`
@@ -38,15 +41,18 @@
     local cmd="$ETCD_BIN_DIR/etcd"
     cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR"
     cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01"
-    cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380"
-    cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380"
+    cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT"
+    cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT"
     cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT"
     if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then
-        cmd+=" --listen-peer-urls http://[::]:2380 "
+        cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT "
     else
-        cmd+=" --listen-peer-urls http://0.0.0.0:2380 "
+        cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT "
     fi
     cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
+    if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
+        cmd+=" --debug"
+    fi
 
     local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
     write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root"
@@ -87,6 +93,9 @@
 
     $SYSTEMCTL daemon-reload
 
+    if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+        sudo umount $ETCD_DATA_DIR
+    fi
     sudo rm -rf $ETCD_DATA_DIR
 }
 
@@ -96,6 +105,9 @@
     # Create the necessary directories
     sudo mkdir -p $ETCD_BIN_DIR
     sudo mkdir -p $ETCD_DATA_DIR
+    if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+        sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR
+    fi
 
     # Download and cache the etcd tgz for subsequent use
     local etcd_file
@@ -107,9 +119,11 @@
 
         tar xzvf $etcd_file -C $FILES
         sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd
+        sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl
     fi
     if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then
         sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd
+        sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl
     fi
 }
 
diff --git a/lib/glance b/lib/glance
index 74734c7..f18bea9 100644
--- a/lib/glance
+++ b/lib/glance
@@ -41,22 +41,77 @@
     GLANCE_BIN_DIR=$(get_python_exec_prefix)
 fi
 
+# Cinder for Glance
+USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE)
+# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values
+# from CINDER_ENABLED_BACKENDS
+GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1}
+GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance
+# NOTE (abhishekk): For opensuse data files are stored in different directory
+if is_opensuse; then
+    GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance
+fi
+# When Cinder is used as a glance store, you can optionally configure cinder to
+# optimize bootable volume creation by allowing volumes to be cloned directly
+# in the backend instead of transferring data via Glance.  To use this feature,
+# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable
+# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance.  The
+# default value for both of these is False, because for some backends they
+# present a grave security risk (though not for Cinder, because all that's
+# exposed is the volume_id where the image data is stored.)  See OSSN-0065 for
+# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065
+GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL)
+GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS)
+
+# Glance multi-store configuration
+# Boolean flag to enable multiple store configuration for glance
+GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES)
+
+# Comma separated list for configuring multiple file stores of glance,
+# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow
+GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast}
+
+# Default store/backend for glance, must be one of the store specified
+# in GLANCE_MULTIPLE_FILE_STORES option.
+GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
+
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+
+# Full Glance functionality requires running in standalone mode. If we are
+# not in uwsgi mode, then we are standalone, otherwise allow separate control.
+if [[ "$WSGI_MODE" != "uwsgi" ]]; then
+    GLANCE_STANDALONE=True
+fi
+GLANCE_STANDALONE=${GLANCE_STANDALONE:-False}
+
+# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store
+# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES
+# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast
+# and $DATA_DIR/glance/cheap.
+GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance}
 GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
+GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt
 GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks}
-GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance}
+GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store}
+GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store}
+
+GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW)
+GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS)
+
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the Image API policies to start checking the scope of token. By Default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE)
 
 GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
 GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
-GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
 GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
-GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
 GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
 GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
-GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
 GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
 GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
-GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False}
+GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf
 
 if is_service_enabled tls-proxy; then
     GLANCE_SERVICE_PROTOCOL="https"
@@ -64,15 +119,17 @@
 
 # Glance connection info.  Note the port must be specified.
 GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST}
-GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292}
 GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
 GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
-GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
 GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api
 GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini
+
+# Glance default limit for Devstack
+GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000}
+
 # If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet
 # TODO(mtreinish): Remove the eventlet path here and in all the similar
 # conditionals below after the Pike release
@@ -96,57 +153,204 @@
 # cleanup_glance() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_glance {
-    # kill instances (nova)
-    # delete image files (glance)
-    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
+    # delete image files (glance) and all of the glance-remote temporary
+    # storage
+    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote"
+
+    # Cleanup multiple stores directories
+    if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
+        local store file_dir
+        for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+            file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+            sudo rm -rf $file_dir
+        done
+
+        # Cleanup reserved stores directories
+        sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
+    fi
+}
+
+# Set multiple cinder store related config options for each of the cinder store
+#
+function configure_multiple_cinder_stores {
+
+    local be be_name be_type enabled_backends
+    for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
+        be_type=${be%%:*}
+        be_name=${be##*:}
+        enabled_backends+="${be_name}:cinder,"
+
+        set_common_cinder_store_params $be_name
+        iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name}
+        if [[ "$be_type" == "nfs" ]]; then
+            mkdir -p "$GLANCE_NFS_MOUNTPOINT"
+            iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT"
+        fi
+    done
+    iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1}
+    iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND
+}
+
+# Set common cinder store options to given config section
+#
+# Arguments:
+# config_section
+#
+function set_common_cinder_store_params {
+    local config_section="$1"
+    iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3
+    iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance
+    iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD
+    iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME
+}
+
+# Configure multiple file stores options for each file store
+#
+# Arguments:
+#
+function configure_multiple_file_stores {
+    local store enabled_backends
+    enabled_backends=""
+    for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+        enabled_backends+="${store}:file,"
+    done
+    iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1}
+
+    # Glance multiple store Store specific configs
+    iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND
+    local store
+    for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do
+        iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+    done
+}
+
+# Set reserved stores for glance
+function configure_reserved_stores {
+    iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/"
+    iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/"
+}
+
+# Copy rootwrap file from glance_store/etc/glance to /etc/glance
+#
+# Arguments:
+# source_path Source path to copy rootwrap files from
+#
+function copy_rootwrap {
+    local source_path="$1"
+    # Make glance configuration directory if it is not exists
+    sudo install -d -o $STACK_USER $GLANCE_CONF_DIR
+    cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/
+}
+
+# Set glance_store related config options
+#
+# Arguments:
+# USE_CINDER_FOR_GLANCE
+# GLANCE_ENABLE_MULTIPLE_STORES
+#
+function configure_glance_store {
+    local use_cinder_for_glance="$1"
+    local glance_enable_multiple_stores="$2"
+    local be
+
+    if [[ "$glance_enable_multiple_stores" == "False" ]]; then
+        # Configure traditional glance_store
+        if [[ "$use_cinder_for_glance" == "True" ]]; then
+            # set common glance_store parameters
+            iniset $GLANCE_API_CONF glance_store stores "cinder,file,http"
+            iniset $GLANCE_API_CONF glance_store default_store cinder
+
+            # set cinder related store parameters
+            set_common_cinder_store_params glance_store
+            # set nfs mount_point dir
+            for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
+                local be_name=${be##*:}
+                if [[ "$be_name" == "nfs" ]]; then
+                    mkdir -p $GLANCE_NFS_MOUNTPOINT
+                    iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT
+                fi
+            done
+        fi
+        # Store specific configs
+        iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+    else
+        if [[ "$use_cinder_for_glance" == "True" ]]; then
+            # Configure multiple cinder stores for glance
+            configure_multiple_cinder_stores
+        else
+            # Configure multiple file stores for glance
+            configure_multiple_file_stores
+        fi
+        # Configure reserved stores
+        configure_reserved_stores
+    fi
+}
+
+function configure_glance_quotas {
+
+    # NOTE(danms): We need to have some of the OS_ things unset in
+    # order to use system scope, which is required for creating these
+    # limits. This is a hack, but I dunno how else to get osc to use
+    # system scope.
+
+    bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME;
+        openstack --os-cloud devstack-system-admin registered limit create \
+                --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
+                --region $REGION_NAME image_size_total; \
+        openstack --os-cloud devstack-system-admin registered limit create \
+                --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
+                --region $REGION_NAME image_stage_total; \
+        openstack --os-cloud devstack-system-admin registered limit create \
+                --service glance --default-limit 100 --region $REGION_NAME \
+                image_count_total; \
+        openstack --os-cloud devstack-system-admin registered limit create \
+                --service glance --default-limit 100 --region $REGION_NAME \
+                image_count_uploading"
+
+    # Tell glance to use these limits
+    iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True
+
+    # Configure oslo_limit so it can talk to keystone
+    iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME
+    iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD
+    iniset $GLANCE_API_CONF oslo_limit username glance
+    iniset $GLANCE_API_CONF oslo_limit auth_type password
+    iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
+    iniset $GLANCE_API_CONF oslo_limit system_scope "'all'"
+    iniset $GLANCE_API_CONF oslo_limit endpoint_id \
+           $(openstack endpoint list --service glance -f value -c ID)
+
+    # Allow the glance service user to read quotas
+    openstack role add --user glance --user-domain Default --system all \
+              reader
 }
 
 # configure_glance() - Set config files, create data dirs, etc
 function configure_glance {
     sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
 
-    # Copy over our glance configurations and update them
-    cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
-    iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
-    inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
+    # Set non-default configuration options for the API server
     local dburl
     dburl=`database_connection_url glance`
-    iniset $GLANCE_REGISTRY_CONF database connection $dburl
-    iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
-    iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
-    configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
-    iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2
-    iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
-    iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
     iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    inicomment $GLANCE_API_CONF DEFAULT log_file
     iniset $GLANCE_API_CONF database connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
-    iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR
+    iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
-    configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
+    configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
     iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2
     iniset_rpc_backend glance $GLANCE_API_CONF
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-        iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
-        iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
-    fi
     if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then
         iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop"
     fi
+    # Only use these if you know what you are doing!  See OSSN-0065
+    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL
+    iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS
 
-    # NOTE(flaper87): To uncomment as soon as all services consuming Glance are
-    # able to consume V2 entirely.
-    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
-        iniset $GLANCE_API_CONF DEFAULT enable_v1_api False
-    fi
-
-    # Store specific configs
-    iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
-    iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
+    # Configure glance_store
+    configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES
 
     # CORS feature support - to allow calls from Horizon by default
     if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
@@ -155,35 +359,27 @@
         iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST"
     fi
 
-    # Store the images in swift if enabled.
-    if is_service_enabled s-proxy; then
-        iniset $GLANCE_API_CONF glance_store default_store swift
-        iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
-        if python3_enabled; then
-            iniset $GLANCE_API_CONF glance_store swift_store_auth_insecure True
-        fi
+    # No multiple stores for swift yet
+    if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then
+        # Store the images in swift if enabled.
+        if is_service_enabled s-proxy; then
+            iniset $GLANCE_API_CONF glance_store default_store swift
+            iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
 
-        iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
-        iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
-        iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
-        iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
+            iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
+            iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
+            iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+            if is_service_enabled tls-proxy; then
+                iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE
+            fi
+            iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
+            iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
 
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
-        if python3_enabled; then
-            # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag
-            # or ability to specify the CACERT. So fallback to http:// url
-            iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address ${KEYSTONE_SERVICE_URI/https/http}/v3
-        else
+            iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
             iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
+            iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
         fi
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
-
-        # commenting is not strictly necessary but it's confusing to have bad values in conf
-        inicomment $GLANCE_API_CONF glance_store swift_store_user
-        inicomment $GLANCE_API_CONF glance_store swift_store_key
-        inicomment $GLANCE_API_CONF glance_store swift_store_auth_address
     fi
 
     # We need to tell glance what it's public endpoint is so that the version
@@ -192,42 +388,31 @@
 
     if is_service_enabled tls-proxy; then
         iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
-        iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
-
-        iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-        iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    fi
-
-    if is_service_enabled tls-proxy; then
-        iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https
+        iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI
     fi
 
     # Format logging
     setup_logging $GLANCE_API_CONF
-    setup_logging $GLANCE_REGISTRY_CONF
 
-    cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
     cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
 
-    cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF
+    # Set non-default configuration options for the glance-cache
     iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    inicomment $GLANCE_CACHE_CONF DEFAULT log_file
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
-    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
-    iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI
-    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name
+    iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
     iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
-    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user
     iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
-    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
     iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
-    iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
 
     # Store specific confs
     iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
 
-    cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
+    # Set default configuration options for the glance-image-import
+    iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins []
+    iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin
+    iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject
+
     cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
 
     cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
@@ -236,16 +421,29 @@
         CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
         CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 
-        iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
-        iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+        iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
+        iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
     fi
 
-    if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+    if [[ "$GLANCE_STANDALONE" == False ]]; then
         write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image"
+        # Grab our uwsgi listen address and use that to fill out our
+        # worker_self_reference_url config
+        iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \
+               $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \
+                    $GLANCE_UWSGI_CONF)
     else
+        write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image"
         iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
+        iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
         iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
     fi
+
+    if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+        iniset $GLANCE_API_CONF oslo_policy enforce_scope true
+        iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
+        iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
+    fi
 }
 
 # create_glance_accounts() - Set up common required glance accounts
@@ -276,14 +474,12 @@
         service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
         iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
         iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
-    fi
-}
 
-# create_glance_cache_dir() - Part of the init_glance() process
-function create_glance_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact
-    rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/*
+        if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then
+            configure_glance_quotas
+        fi
+
+    fi
 }
 
 # init_glance() - Initialize databases, etc.
@@ -292,10 +488,6 @@
     rm -rf $GLANCE_IMAGE_DIR
     mkdir -p $GLANCE_IMAGE_DIR
 
-    # Delete existing cache
-    rm -rf $GLANCE_CACHE_DIR
-    mkdir -p $GLANCE_CACHE_DIR
-
     # (Re)create glance database
     recreate_database glance
 
@@ -306,8 +498,6 @@
     # Load metadata definitions
     $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
     time_stop "dbsync"
-
-    create_glance_cache_dir
 }
 
 # install_glanceclient() - Collect source and prepare
@@ -321,11 +511,26 @@
 
 # install_glance() - Collect source and prepare
 function install_glance {
+    local glance_store_extras=()
+
+    if is_service_enabled cinder; then
+        glance_store_extras=("cinder" "${glance_store_extras[@]}")
+    fi
+
+    if is_service_enabled swift; then
+        glance_store_extras=("swift" "${glance_store_extras[@]}")
+    fi
+
     # Install glance_store from git so we make sure we're testing
     # the latest code.
     if use_library_from_git "glance_store"; then
         git_clone_by_name "glance_store"
-        setup_dev_lib "glance_store"
+        setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}")
+        copy_rootwrap ${DEST}/glance_store/etc/glance
+    else
+        # we still need to pass extras
+        pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}")
+        copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR
     fi
 
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
@@ -333,6 +538,72 @@
     setup_develop $GLANCE_DIR
 }
 
+# glance_remote_conf() - Return the path to an alternate config file for
+#                        the remote glance clone
+function glance_remote_conf {
+    echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1")
+}
+
+# start_glance_remote_clone() - Clone the regular glance api worker
+function start_glance_remote_clone {
+    local glance_remote_conf_dir glance_remote_port remote_data
+    local glance_remote_uwsgi
+
+    glance_remote_conf_dir="$(glance_remote_conf "")"
+    glance_remote_port=$(get_random_port)
+    glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)"
+
+    # Clone the existing ready-to-go glance-api setup
+    sudo rm -Rf "$glance_remote_conf_dir"
+    sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir"
+    sudo chown $STACK_USER -R "$glance_remote_conf_dir"
+
+    # Point this worker at different data dirs
+    remote_data="${DATA_DIR}/glance-remote"
+    mkdir -p $remote_data/os_glance_tasks_store \
+          "${remote_data}/os_glance_staging_store"
+    iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \
+           filesystem_store_datadir "${remote_data}/os_glance_staging_store"
+    iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \
+           filesystem_store_datadir "${remote_data}/os_glance_tasks_store"
+
+    # Point this worker to use different cache dir
+    mkdir -p "$remote_data/cache"
+    iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \
+           image_cache_dir "${remote_data}/cache"
+
+    # Change our uwsgi to our new port
+    sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \
+        "$glance_remote_uwsgi"
+
+    # Update the self-reference url with our new port
+    iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \
+           worker_self_reference_url \
+           $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+                    "$glance_remote_uwsgi")
+
+    # We need to create the systemd service for the clone, but then
+    # change it to include an Environment line to point the WSGI app
+    # at the alternate config directory.
+    write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \
+                               --procname-prefix \
+                               glance-api-remote \
+                               --ini $glance_remote_uwsgi" \
+                               "" "$STACK_USER"
+    iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
+           "Service" "Environment" \
+           "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir"
+
+    # Reload and restart with the new config
+    $SYSTEMCTL daemon-reload
+    $SYSTEMCTL restart devstack@g-api-r
+
+    get_or_create_service glance_remote image_remote "Alternate glance"
+    get_or_create_endpoint image_remote $REGION_NAME \
+                $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+                    $glance_remote_uwsgi)
+}
+
 # start_glance() - Start running processes
 function start_glance {
     local service_protocol=$GLANCE_SERVICE_PROTOCOL
@@ -340,14 +611,17 @@
         if [[ "$WSGI_MODE" != "uwsgi" ]]; then
             start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
         fi
-        start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
     fi
 
-    run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
-    if [[ "$WSGI_MODE" == "uwsgi" ]]; then
-        run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
+    if [[ "$GLANCE_STANDALONE" == False ]]; then
+        run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
     else
-        run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+        run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
+    fi
+
+    if is_service_enabled g-api-r; then
+        echo "Starting the g-api-r clone service..."
+        start_glance_remote_clone
     fi
 
     echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
@@ -359,7 +633,7 @@
 # stop_glance() - Stop running processes
 function stop_glance {
     stop_process g-api
-    stop_process g-reg
+    stop_process g-api-r
 }
 
 # Restore xtrace
diff --git a/lib/horizon b/lib/horizon
index 3d2f68d..b2bf7bc 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -26,9 +26,6 @@
 # Defaults
 # --------
 
-# Set up default directories
-GITDIR["django_openstack_auth"]=$DEST/django_openstack_auth
-
 HORIZON_DIR=$DEST/horizon
 
 # local_settings.py is used to customize Dashboard settings.
@@ -46,8 +43,8 @@
     local value=$4
 
     if [ -z "$section" ]; then
-        sed -e "/^$option/d" -i $local_settings
-        echo -e "\n$option=$value" >> $file
+        sed -e "/^$option/d" -i $file
+        echo "$option = $value" >> $file
     elif grep -q "^$section" $file; then
         local line
         line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
@@ -87,14 +84,16 @@
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
 
+    # Ensure local_setting.py file ends with EOL (newline)
+    echo >> $local_settings
+
     _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\"
 
     _horizon_config_set $local_settings "" COMPRESS_OFFLINE True
-    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
+    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"member\"
 
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
 
-    _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
     # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed
@@ -159,20 +158,6 @@
 
 }
 
-# install_django_openstack_auth() - Collect source and prepare
-function install_django_openstack_auth {
-    if use_library_from_git "django_openstack_auth"; then
-        local dir=${GITDIR["django_openstack_auth"]}
-        git_clone_by_name "django_openstack_auth"
-        # Compile message catalogs before installation
-        _prepare_message_catalog_compilation
-        (cd $dir; $PYTHON setup.py compile_catalog)
-        setup_dev_lib "django_openstack_auth"
-    fi
-    # if we aren't using this library from git, then we just let it
-    # get dragged in by the horizon setup.
-}
-
 # install_horizon() - Collect source and prepare
 function install_horizon {
     # Apache installation, because we mark it NOPRIME
@@ -191,13 +176,6 @@
     stop_apache_server
 }
 
-# NOTE: It can be moved to common functions, but it is only used by compilation
-# of django_openstack_auth catalogs at the moment.
-function _prepare_message_catalog_compilation {
-    pip_install_gr Babel
-}
-
-
 # Restore xtrace
 $_XTRACE_HORIZON
 
diff --git a/lib/infra b/lib/infra
index cf003cc..b983f2b 100644
--- a/lib/infra
+++ b/lib/infra
@@ -29,7 +29,7 @@
 # install_infra() - Collect source and prepare
 function install_infra {
     local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
-    [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
+    [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
     # We don't care about testing git pbr in the requirements venv.
     PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
     PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
diff --git a/lib/keystone b/lib/keystone
index 714f089..0609abd 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -49,11 +49,8 @@
 
 KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
-KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
-KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini
 KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
-KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin
 
 # KEYSTONE_DEPLOY defines how keystone is deployed, allowed values:
 # - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi
@@ -64,9 +61,6 @@
     KEYSTONE_DEPLOY=mod_wsgi
 fi
 
-# Select the token persistence backend driver
-KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql}
-
 # Select the Identity backend driver
 KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql}
 
@@ -80,25 +74,17 @@
 KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql}
 
 # Select Keystone's token provider (and format)
-# Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
+# Refer keystone doc for supported token provider:
+# https://docs.openstack.org/keystone/latest/admin/token-provider.html
 KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
-# Set Keystone interface configuration
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
-
 # Public facing bits
 KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
 KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
 KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
 KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 
-# Bind hosts
-KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST}
-
 # Set the project for service accounts in Keystone
 SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default}
 SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service}
@@ -109,7 +95,6 @@
 
 # if we are running with SSL use https protocols
 if is_service_enabled tls-proxy; then
-    KEYSTONE_AUTH_PROTOCOL="https"
     KEYSTONE_SERVICE_PROTOCOL="https"
 fi
 
@@ -118,7 +103,7 @@
 KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI
 
 # V3 URIs
-KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3
+KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3
 KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3
 
 # Security compliance
@@ -134,6 +119,12 @@
 # however may not be suitable for real production.
 KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4}
 
+# Cache settings
+KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True}
+
+# Whether to create a keystone admin endpoint for legacy applications
+KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT)
+
 # Functions
 # ---------
 
@@ -154,11 +145,8 @@
         sudo rm -f $(apache_site_config_for keystone)
     else
         stop_process "keystone"
-        # TODO: remove admin at pike-2
         remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
-        remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
         sudo rm -f $(apache_site_config_for keystone-wsgi-public)
-        sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
     fi
 }
 
@@ -171,12 +159,10 @@
     local keystone_certfile=""
     local keystone_keyfile=""
     local keystone_service_port=$KEYSTONE_SERVICE_PORT
-    local keystone_auth_port=$KEYSTONE_AUTH_PORT
     local venv_path=""
 
     if is_service_enabled tls-proxy; then
         keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
-        keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
     fi
     if [[ ${USE_VENV} = True ]]; then
         venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
@@ -185,7 +171,6 @@
     sudo cp $FILES/apache-keystone.template $keystone_apache_conf
     sudo sed -e "
         s|%PUBLICPORT%|$keystone_service_port|g;
-        s|%ADMINPORT%|$keystone_auth_port|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
         s|%SSLLISTEN%|$keystone_ssl_listen|g;
         s|%SSLENGINE%|$keystone_ssl|g;
@@ -202,25 +187,9 @@
     sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR
 
     if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
-        install -m 600 $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
-        if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then
-            cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI"
-        fi
+        install -m 600 /dev/null $KEYSTONE_CONF
     fi
-    if [[ -f "$KEYSTONE_PASTE_INI" ]]; then
-        iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI"
-    else
-        # compatibility with mixed cfg and paste.deploy configuration
-        KEYSTONE_PASTE_INI="$KEYSTONE_CONF"
-    fi
-
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        # Only Identity v3 API should be available; then disable v2 pipelines
-        inidelete $KEYSTONE_PASTE_INI composite:main \\/v2.0
-        inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0
-    fi
-
-    # Rewrite stock ``keystone.conf``
+    # Populate ``keystone.conf``
     if is_service_enabled ldap; then
         iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains"
         iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True"
@@ -232,19 +201,17 @@
     iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND"
 
     # Enable caching
-    iniset $KEYSTONE_CONF cache enabled "True"
-    iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
-    iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
+    iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE
+    iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND
+    iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS
 
-    iniset_rpc_backend keystone $KEYSTONE_CONF
+    iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
 
     local service_port=$KEYSTONE_SERVICE_PORT
-    local auth_port=$KEYSTONE_AUTH_PORT
 
     if is_service_enabled tls-proxy; then
         # Set the service ports for a proxy to take the originals
         service_port=$KEYSTONE_SERVICE_PORT_INT
-        auth_port=$KEYSTONE_AUTH_PORT_INT
     fi
 
     # Override the endpoints advertised by keystone (the public_endpoint and
@@ -254,7 +221,7 @@
     # don't want the port (in the case of putting keystone on a path in
     # apache).
     iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
-    iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
+    iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_SERVICE_URI
 
     if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
         iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
@@ -262,8 +229,6 @@
 
     iniset $KEYSTONE_CONF database connection `database_connection_url keystone`
 
-    iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND"
-
     # Set up logging
     if [ "$SYSLOG" != "False" ]; then
         iniset $KEYSTONE_CONF DEFAULT use_syslog "True"
@@ -279,7 +244,6 @@
         _config_keystone_apache_wsgi
     else # uwsgi
         write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity"
-        write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin"
     fi
 
     iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
@@ -309,61 +273,52 @@
 # service              --         --
 # --                   --         service
 # --                   --         ResellerAdmin
-# --                   --         Member
+# --                   --         member
 # demo                 admin      admin
-# demo                 demo       Member, anotherrole
+# demo                 demo       member, anotherrole
 # alt_demo             admin      admin
-# alt_demo             alt_demo   Member, anotherrole
-# invisible_to_admin   demo       Member
+# alt_demo             alt_demo   member, anotherrole
+# invisible_to_admin   demo       member
 
 # Group                Users            Roles                 Project
 # ------------------------------------------------------------------
 # admins               admin            admin                 admin
-# nonadmins            demo, alt_demo   Member, anotherrole   demo, alt_demo
+# nonadmins            demo, alt_demo   member, anotherrole   demo, alt_demo
 
 
 # Migrated from keystone_data.sh
 function create_keystone_accounts {
 
-    # The keystone bootstrapping process (performed via keystone-manage bootstrap)
-    # creates an admin user, admin role and admin project. As a sanity check
-    # we exercise the CLI to retrieve the IDs for these values.
+    # The keystone bootstrapping process (performed via keystone-manage
+    # bootstrap) creates an admin user, admin role, member role, and admin
+    # project. As a sanity check we exercise the CLI to retrieve the IDs for
+    # these values.
     local admin_project
     admin_project=$(openstack project show "admin" -f value -c id)
     local admin_user
     admin_user=$(openstack user show "admin" -f value -c id)
     local admin_role="admin"
+    local member_role="member"
 
-    get_or_add_user_domain_role $admin_role $admin_user default
+    async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
 
     # Create service project/role
     get_or_create_domain "$SERVICE_DOMAIN_NAME"
-    get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
+    async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
 
     # Service role, so service users do not have to be admins
-    get_or_create_role service
+    async_run ks-service get_or_create_role service
 
     # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
     # The admin role in swift allows a user to act as an admin for their project,
     # but ResellerAdmin is needed for a user to act as any project. The name of this
     # role is also configurable in swift-proxy.conf
-    get_or_create_role ResellerAdmin
-
-    # The Member role is used by Horizon and Swift so we need to keep it:
-    local member_role="member"
-
-    # Capital Member role is legacy hard coded in Horizon / Swift
-    # configs. Keep it around.
-    get_or_create_role "Member"
-
-    # The reality is that the rest of the roles listed below honestly
-    # should work by symbolic names.
-    get_or_create_role $member_role
+    async_run ks-reseller get_or_create_role ResellerAdmin
 
     # another_role demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
     local another_role="anotherrole"
-    get_or_create_role $another_role
+    async_run ks-anotherrole get_or_create_role $another_role
 
     # invisible project - admin can't see this one
     local invis_project
@@ -376,10 +331,12 @@
     demo_user=$(get_or_create_user "demo" \
         "$ADMIN_PASSWORD" "default" "demo@example.com")
 
-    get_or_add_user_project_role $member_role $demo_user $demo_project
-    get_or_add_user_project_role $admin_role $admin_user $demo_project
-    get_or_add_user_project_role $another_role $demo_user $demo_project
-    get_or_add_user_project_role $member_role $demo_user $invis_project
+    async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
+
+    async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+    async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
+    async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
+    async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
 
     # alt_demo
     local alt_demo_project
@@ -388,9 +345,9 @@
     alt_demo_user=$(get_or_create_user "alt_demo" \
         "$ADMIN_PASSWORD" "default" "alt_demo@example.com")
 
-    get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
-    get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
-    get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+    async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
+    async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+    async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
 
     # groups
     local admin_group
@@ -400,11 +357,15 @@
     non_admin_group=$(get_or_create_group "nonadmins" \
         "default" "non-admin group")
 
-    get_or_add_group_project_role $member_role $non_admin_group $demo_project
-    get_or_add_group_project_role $another_role $non_admin_group $demo_project
-    get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
-    get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
-    get_or_add_group_project_role $admin_role $admin_group $admin_project
+    async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project
+    async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project
+    async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
+    async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
+    async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
+
+    async_wait ks-demo-{member,admin,another,invis}
+    async_wait ks-alt-{member,admin,another}
+    async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
 
     if is_service_enabled ldap; then
         create_ldap_domain
@@ -427,20 +388,20 @@
     fi
 }
 
-# Configure the service to use the auth token middleware.
+# Configure a service to use the auth token middleware.
 #
-# configure_auth_token_middleware conf_file admin_user signing_dir [section]
+# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section]
 #
 # section defaults to keystone_authtoken, which is where auth_token looks in
 # the .conf file. If the paste config file is used (api-paste.ini) then
 # provide the section name for the auth_token filter.
-function configure_auth_token_middleware {
+function configure_keystone_authtoken_middleware {
     local conf_file=$1
     local admin_user=$2
-    local signing_dir=$3
-    local section=${4:-keystone_authtoken}
+    local section=${3:-keystone_authtoken}
 
     iniset $conf_file $section auth_type password
+    iniset $conf_file $section interface public
     iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
     iniset $conf_file $section username $admin_user
     iniset $conf_file $section password $SERVICE_PASSWORD
@@ -449,8 +410,14 @@
     iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME"
 
     iniset $conf_file $section cafile $SSL_BUNDLE_FILE
-    iniset $conf_file $section signing_dir $signing_dir
-    iniset $conf_file $section memcached_servers localhost:11211
+    iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS
+}
+
+# configure_auth_token_middleware conf_file admin_user IGNORED [section]
+# TODO(frickler): old function for backwards compatibility, remove in U cycle
+function configure_auth_token_middleware {
+    echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead"
+    configure_keystone_authtoken_middleware $1 $2 $4
 }
 
 # init_keystone() - Initialize databases, etc.
@@ -469,11 +436,6 @@
     $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync
     time_stop "dbsync"
 
-    if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
-        # Set up certificates
-        rm -rf $KEYSTONE_CONF_DIR/ssl
-        $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF pki_setup
-    fi
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then
         rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
         $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup
@@ -531,8 +493,6 @@
 
     if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
         install_apache_wsgi
-    elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then
-        pip_install uwsgi
     fi
 }
 
@@ -540,7 +500,7 @@
 function start_keystone {
     # Get right service port for testing
     local service_port=$KEYSTONE_SERVICE_PORT
-    local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
+    local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL
     if is_service_enabled tls-proxy; then
         service_port=$KEYSTONE_SERVICE_PORT_INT
         auth_protocol="http"
@@ -550,7 +510,7 @@
         enable_apache_site keystone
         restart_apache_server
     else # uwsgi
-        run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
+        run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
     fi
 
     echo "Waiting for keystone to start..."
@@ -568,7 +528,6 @@
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
         start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
-        start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
     fi
 
     # (re)start memcached to make sure we have a clean memcache.
@@ -590,11 +549,8 @@
 # - ``KEYSTONE_BIN_DIR``
 # - ``ADMIN_PASSWORD``
 # - ``IDENTITY_API_VERSION``
-# - ``KEYSTONE_AUTH_URI``
 # - ``REGION_NAME``
-# - ``KEYSTONE_SERVICE_PROTOCOL``
-# - ``KEYSTONE_SERVICE_HOST``
-# - ``KEYSTONE_SERVICE_PORT``
+# - ``KEYSTONE_SERVICE_URI``
 function bootstrap_keystone {
     $KEYSTONE_BIN_DIR/keystone-manage bootstrap \
         --bootstrap-username admin \
@@ -603,8 +559,16 @@
         --bootstrap-role-name admin \
         --bootstrap-service-name keystone \
         --bootstrap-region-id "$REGION_NAME" \
-        --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \
         --bootstrap-public-url "$KEYSTONE_SERVICE_URI"
+    if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then
+        openstack endpoint create --region "$REGION_NAME" \
+            --os-username admin \
+            --os-user-domain-id default \
+            --os-password "$ADMIN_PASSWORD" \
+            --os-project-name admin \
+            --os-project-domain-id default \
+            keystone admin "$KEYSTONE_SERVICE_URI"
+    fi
 }
 
 # create_ldap_domain() - Create domain file and initialize domain with a user
diff --git a/lib/libraries b/lib/libraries
old mode 100644
new mode 100755
index 6d52f64..67ff21f
--- a/lib/libraries
+++ b/lib/libraries
@@ -28,6 +28,7 @@
 GITDIR["cursive"]=$DEST/cursive
 GITDIR["debtcollector"]=$DEST/debtcollector
 GITDIR["futurist"]=$DEST/futurist
+GITDIR["openstacksdk"]=$DEST/openstacksdk
 GITDIR["os-client-config"]=$DEST/os-client-config
 GITDIR["osc-lib"]=$DEST/osc-lib
 GITDIR["osc-placement"]=$DEST/osc-placement
@@ -51,7 +52,6 @@
 GITDIR["oslo.vmware"]=$DEST/oslo.vmware
 GITDIR["osprofiler"]=$DEST/osprofiler
 GITDIR["pycadf"]=$DEST/pycadf
-GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk
 GITDIR["stevedore"]=$DEST/stevedore
 GITDIR["taskflow"]=$DEST/taskflow
 GITDIR["tooz"]=$DEST/tooz
@@ -59,6 +59,7 @@
 # Non oslo libraries are welcomed below as well, this prevents
 # duplication of this code.
 GITDIR["os-brick"]=$DEST/os-brick
+GITDIR["os-resource-classes"]=$DEST/os-resource-classes
 GITDIR["os-traits"]=$DEST/os-traits
 
 # Support entry points installation of console scripts
@@ -72,7 +73,7 @@
     local name=$1
     if use_library_from_git "$name"; then
         git_clone_by_name "$name"
-        setup_dev_lib "$name"
+        setup_dev_lib -bindep "$name"
     fi
 }
 
@@ -91,6 +92,7 @@
     _install_lib_from_source "cursive"
     _install_lib_from_source "debtcollector"
     _install_lib_from_source "futurist"
+    _install_lib_from_source "openstacksdk"
     _install_lib_from_source "osc-lib"
     _install_lib_from_source "osc-placement"
     _install_lib_from_source "os-client-config"
@@ -114,7 +116,6 @@
     _install_lib_from_source "oslo.vmware"
     _install_lib_from_source "osprofiler"
     _install_lib_from_source "pycadf"
-    _install_lib_from_source "python-openstacksdk"
     _install_lib_from_source "stevedore"
     _install_lib_from_source "taskflow"
     _install_lib_from_source "tooz"
@@ -122,6 +123,7 @@
     #
     # os-traits for nova
     _install_lib_from_source "os-brick"
+    _install_lib_from_source "os-resource-classes"
     _install_lib_from_source "os-traits"
     #
     # python client libraries we might need from git can go here
diff --git a/lib/lvm b/lib/lvm
index f047181..b826c1b 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -99,8 +99,15 @@
     if ! sudo vgs $vg; then
         # Only create if the file doesn't already exists
         [[ -f $backing_file ]] || truncate -s $size $backing_file
+
+        local directio=""
+        # Check to see if we can do direct-io
+        if losetup -h | grep -q direct-io; then
+            directio="--direct-io=on"
+        fi
+
         local vg_dev
-        vg_dev=`sudo losetup -f --show $backing_file`
+        vg_dev=$(sudo losetup -f --show $directio $backing_file)
 
         # Only create volume group if it doesn't already exist
         if ! sudo vgs $vg; then
@@ -117,13 +124,9 @@
     local vg=$1
     local size=$2
 
-    # Start the lvmetad and tgtd services
-    if is_fedora || is_suse; then
-        # services is not started by default
-        start_service lvm2-lvmetad
-        if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
-            start_service tgtd
-        fi
+    # Start the tgtd service on Fedora and SUSE if tgtadm is used
+    if  is_fedora || is_suse  && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+        start_service tgtd
     fi
 
     # Start with a clean volume group
diff --git a/lib/neutron b/lib/neutron
index 21c8d4c..885df97 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -28,9 +28,25 @@
 # Set up default directories
 GITDIR["python-neutronclient"]=$DEST/python-neutronclient
 
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
 NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
 NEUTRON_DIR=$DEST/neutron
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
+
+NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
+# Distributed Virtual Router (DVR) configuration
+# Can be:
+# - ``legacy``          - No DVR functionality
+# - ``dvr_snat``        - Controller or single node DVR
+# - ``dvr``             - Compute node in multi-node DVR
+# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
+#
+# Default is 'dvr_snat' since it can handle both DVR and legacy routers
+NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat}
 
 NEUTRON_BIN_DIR=$(get_python_exec_prefix)
 NEUTRON_DHCP_BINARY="neutron-dhcp-agent"
@@ -38,13 +54,16 @@
 NEUTRON_CONF_DIR=/etc/neutron
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
+NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
 
 NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
 NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
 NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/
+NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
 
 NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
 
 # By default, use the ML2 plugin
 NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
@@ -75,14 +94,22 @@
 NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
 NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
 
-# This is needed because _neutron_ovs_base_configure_l3_agent will set
-# external_network_bridge
-Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
 # This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
 # an external network bridge
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
 PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
 
+# Network type - default vxlan, however enables vlan based jobs to override
+# using the legacy environment variable as well as a new variable in greater
+# alignment with the naming scheme of this plugin.
+NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan}
+
+NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}}
+
+# Physical network for VLAN network usage.
+NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+
+
 # Additional neutron api config files
 declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
 
@@ -100,7 +127,9 @@
 # Test if any Neutron services are enabled
 # is_neutron_enabled
 function is_neutron_legacy_enabled {
-    [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1
+    # first we need to remove all "neutron-" from DISABLED_SERVICES list
+    disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g')
+    [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1
     [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
     return 1
 }
@@ -163,9 +192,14 @@
     # Neutron API server & Neutron plugin
     if is_service_enabled neutron-api; then
         local policy_file=$NEUTRON_CONF_DIR/policy.json
-        cp $NEUTRON_DIR/etc/policy.json $policy_file
         # Allow neutron user to administer neutron to match neutron account
-        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
+        # NOTE(amotoki): This is required for nova works correctly with neutron.
+        if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+            cp $NEUTRON_DIR/etc/policy.json $policy_file
+            sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
+        else
+            echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $policy_file
+        fi
 
         cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
 
@@ -173,17 +207,28 @@
 
         iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
         iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
+        iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
 
         iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
-        configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
-        configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova
+        configure_keystone_authtoken_middleware $NEUTRON_CONF neutron
+        configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
 
-        # Configure VXLAN
-        # TODO(sc68cal) not hardcode?
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
+        # Configure tenant network type
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE
+
+        local mech_drivers="openvswitch"
+        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+            mech_drivers+=",l2population"
+        else
+            mech_drivers+=",linuxbridge"
+        fi
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
+
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
+        if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE}
+        fi
         if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
             neutron_ml2_extension_driver_add port_security
         fi
@@ -199,9 +244,15 @@
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
             iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
             iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
-        else
-            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables_hybrid
+        elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
             iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+
+            if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+                iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
+                iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
+                iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True
+            fi
         fi
 
         if ! running_in_container; then
@@ -236,6 +287,10 @@
         else
             iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
         fi
+
+        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+            iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE
+        fi
     fi
 
     # Metadata
@@ -243,20 +298,20 @@
         cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
 
         iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
+        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST
         iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
         # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
         configure_root_helper_options $NEUTRON_META_CONF
 
         # TODO(dtroyer): remove the v2.0 hard code below
         iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
-        configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT
+        configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT
     fi
 
     # Format logging
     setup_logging $NEUTRON_CONF
 
-    if is_service_enabled tls-proxy; then
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
         iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -295,26 +350,25 @@
 }
 
 # Make Neutron-required changes to nova.conf
+# Takes a single optional argument which is the config file to update,
+# if not passed $NOVA_CONF is used.
 function configure_neutron_nova_new {
-    iniset $NOVA_CONF DEFAULT use_neutron True
-    iniset $NOVA_CONF neutron auth_type "password"
-    iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $NOVA_CONF neutron username neutron
-    iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
-    iniset $NOVA_CONF neutron user_domain_name "Default"
-    iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME"
-    iniset $NOVA_CONF neutron project_domain_name "Default"
-    iniset $NOVA_CONF neutron auth_strategy $NEUTRON_AUTH_STRATEGY
-    iniset $NOVA_CONF neutron region_name "$REGION_NAME"
-    iniset $NOVA_CONF neutron url $NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT
-
-    iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
+    local conf=${1:-$NOVA_CONF}
+    iniset $conf neutron auth_type "password"
+    iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
+    iniset $conf neutron username neutron
+    iniset $conf neutron password "$SERVICE_PASSWORD"
+    iniset $conf neutron user_domain_name "Default"
+    iniset $conf neutron project_name "$SERVICE_TENANT_NAME"
+    iniset $conf neutron project_domain_name "Default"
+    iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
+    iniset $conf neutron region_name "$REGION_NAME"
 
     # optionally set options in nova_conf
-    neutron_plugin_create_nova_conf
+    neutron_plugin_create_nova_conf $conf
 
     if is_service_enabled neutron-metadata-agent; then
-        iniset $NOVA_CONF neutron service_metadata_proxy "True"
+        iniset $conf neutron service_metadata_proxy "True"
     fi
 
 }
@@ -325,6 +379,15 @@
 
 # create_neutron_accounts() - Create required service accounts
 function create_neutron_accounts_new {
+    local neutron_url
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+    else
+        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+    fi
+
+
     if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
 
         create_service_user "neutron"
@@ -332,18 +395,10 @@
         neutron_service=$(get_or_create_service "neutron" \
             "network" "Neutron Service")
         get_or_create_endpoint $neutron_service \
-            "$REGION_NAME" \
-            "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/"
+            "$REGION_NAME" "$neutron_url"
     fi
 }
 
-# create_neutron_cache_dir() - Part of the init_neutron() process
-function create_neutron_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR
-    rm -f $NEUTRON_AUTH_CACHE_DIR/*
-}
-
 # init_neutron() - Initialize databases, etc.
 function init_neutron_new {
 
@@ -353,8 +408,6 @@
     # Run Neutron db migrations
     $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads
     time_stop "dbsync"
-
-    create_neutron_cache_dir
 }
 
 # install_neutron() - Collect source and prepare
@@ -395,6 +448,7 @@
 function start_neutron_api {
     local service_port=$NEUTRON_SERVICE_PORT
     local service_protocol=$NEUTRON_SERVICE_PROTOCOL
+    local neutron_url
     if is_service_enabled tls-proxy; then
         service_port=$NEUTRON_SERVICE_PORT_INT
         service_protocol="http"
@@ -408,17 +462,24 @@
         opts+=" --config-file $cfg_file"
     done
 
-    # Start the Neutron service
-    # TODO(sc68cal) Stop hard coding this
-    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
-
-    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then
-        die $LINENO "neutron-api did not start"
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
+        enable_service neutron-rpc-server
+        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
+    else
+        # Start the Neutron service
+        # TODO(sc68cal) Stop hard coding this
+        run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
+        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+        fi
     fi
 
-    # Start proxy if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+    if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
+        die $LINENO "neutron-api did not start"
     fi
 }
 
@@ -438,7 +499,7 @@
     if is_service_enabled neutron-l3; then
         run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF"
     fi
-    if is_service_enabled neutron-api; then
+    if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
         # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
         # of the code in lib/neutron_plugins/services/l3
         if type -p neutron_plugin_create_initial_networks > /dev/null; then
@@ -465,6 +526,10 @@
         stop_process $serv
     done
 
+    if is_service_enabled neutron-rpc-server; then
+        stop_process neutron-rpc-server
+    fi
+
     if is_service_enabled neutron-dhcp; then
         stop_process neutron-dhcp
         pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
@@ -519,6 +584,13 @@
 # neutron-legacy is removed.
 # TODO(sc68cal) Remove when neutron-legacy is no more.
 function cleanup_neutron {
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-api
+        stop_process neutron-rpc-server
+        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+        sudo rm -f $(apache_site_config_for neutron-api)
+    fi
+
     if is_neutron_legacy_enabled; then
         # Call back to old function
         cleanup_mutnauq "$@"
@@ -534,14 +606,32 @@
     else
         configure_neutron_new "$@"
     fi
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
+    fi
 }
 
 function configure_neutron_nova {
     if is_neutron_legacy_enabled; then
         # Call back to old function
-        create_nova_conf_neutron "$@"
+        create_nova_conf_neutron $NOVA_CONF
+        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
+            for i in $(seq 1 $NOVA_NUM_CELLS); do
+                local conf
+                conf=$(conductor_conf $i)
+                create_nova_conf_neutron $conf
+            done
+        fi
     else
-        configure_neutron_nova_new "$@"
+        configure_neutron_nova_new $NOVA_CONF
+        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
+            for i in $(seq 1 $NOVA_NUM_CELLS); do
+                local conf
+                conf=$(conductor_conf $i)
+                configure_neutron_nova_new $conf
+            done
+        fi
     fi
 }
 
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index e2e0bb9..704d2e8 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -50,8 +50,6 @@
 # See "Neutron Network Configuration" below for additional variables
 # that must be set in localrc for connectivity across hosts with
 # Neutron.
-#
-# With Neutron networking the NETWORK_MANAGER variable is ignored.
 
 # Settings
 # --------
@@ -60,8 +58,6 @@
 # Neutron Network Configuration
 # -----------------------------
 
-deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future"
-
 if is_service_enabled tls-proxy; then
     Q_PROTOCOL="https"
 fi
@@ -73,7 +69,6 @@
 
 NEUTRON_DIR=$DEST/neutron
 NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # Support entry points installation of console scripts
 if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
@@ -86,6 +81,15 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
 AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -111,7 +115,7 @@
 # Default protocol
 Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
 # Default listen address
-Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
@@ -121,7 +125,7 @@
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
 Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
 # Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
+Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
@@ -222,15 +226,17 @@
 # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
 OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
 
-default_route_dev=$(ip route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
 # With the linuxbridge agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
 # the name of the network interface to use for the physical
 # network.
 #
 # Example: ``LB_PHYSICAL_INTERFACE=eth1``
-LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev}
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+    LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
 
 # When Neutron tunnels are enabled it is needed to specify the
 # IP address of the end point in the local server. This IP is set
@@ -269,6 +275,10 @@
 
 # L3 Service functions
 source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+
 # Use security group or not
 if has_neutron_plugin_security_group; then
     Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
@@ -358,6 +368,16 @@
         _configure_neutron_ceilometer_notifications
     fi
 
+    if [[ $Q_AGENT == "ovn" ]]; then
+        configure_ovn
+        configure_ovn_plugin
+    fi
+
+    # Configure Neutron's advanced services
+    if is_service_enabled q-trunk neutron-trunk; then
+        configure_trunk_extension
+    fi
+
     iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
     # devstack is not a tool for running uber scale OpenStack
     # clouds, therefore running without a dedicated RPC worker
@@ -366,32 +386,26 @@
 }
 
 function create_nova_conf_neutron {
-    iniset $NOVA_CONF DEFAULT use_neutron True
-    iniset $NOVA_CONF neutron auth_type "password"
-    iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI"
-    iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
-    iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
-    iniset $NOVA_CONF neutron user_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $NOVA_CONF neutron project_name "$SERVICE_PROJECT_NAME"
-    iniset $NOVA_CONF neutron project_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
-    iniset $NOVA_CONF neutron region_name "$REGION_NAME"
-    iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
-
-    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-        iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-    fi
+    local conf=${1:-$NOVA_CONF}
+    iniset $conf neutron auth_type "password"
+    iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
+    iniset $conf neutron username "$Q_ADMIN_USERNAME"
+    iniset $conf neutron password "$SERVICE_PASSWORD"
+    iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
+    iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
+    iniset $conf neutron region_name "$REGION_NAME"
 
     # optionally set options in nova_conf
-    neutron_plugin_create_nova_conf
+    neutron_plugin_create_nova_conf $conf
 
     if is_service_enabled q-meta; then
-        iniset $NOVA_CONF neutron service_metadata_proxy "True"
+        iniset $conf neutron service_metadata_proxy "True"
     fi
 
-    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
-    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
+    iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_mutnauq_accounts() - Set up common required neutron accounts
@@ -402,6 +416,13 @@
 
 # Migrated from keystone_data.sh
 function create_mutnauq_accounts {
+    local neutron_url
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
+    else
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
+    fi
+
     if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
 
         create_service_user "neutron"
@@ -409,8 +430,7 @@
         get_or_create_service "neutron" "network" "Neutron Service"
         get_or_create_endpoint \
             "network" \
-            "$REGION_NAME" \
-            "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
+            "$REGION_NAME" "$neutron_url"
     fi
 }
 
@@ -434,6 +454,10 @@
 
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
+
+    if [[ $Q_AGENT == "ovn" ]]; then
+        install_ovn
+    fi
 }
 
 # install_neutron_agent_packages() - Collect source and prepare
@@ -455,11 +479,28 @@
     fi
 }
 
+# Start running OVN processes
+function start_ovn_services {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        init_ovn
+        start_ovn
+        if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
+            if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
+                echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
+                echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
+            else
+                create_public_bridge
+            fi
+        fi
+    fi
+}
+
 # Start running processes
 function start_neutron_service_and_check {
     local service_port=$Q_PORT
     local service_protocol=$Q_PROTOCOL
     local cfg_file_options
+    local neutron_url
 
     cfg_file_options="$(determine_config_files neutron-server)"
 
@@ -468,16 +509,24 @@
         service_protocol="http"
     fi
     # Start the Neutron service
-    run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        enable_service neutron-api
+        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
+        enable_service neutron-rpc-server
+        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+    else
+        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+        neutron_url=$service_protocol://$Q_HOST:$service_port
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+        fi
+    fi
     echo "Waiting for Neutron to start..."
 
-    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port"
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
     test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-
-    # Start proxy if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
-    fi
 }
 
 # Control of the l2 agent is separated out to make it easier to test partial
@@ -528,7 +577,12 @@
         [ ! -z "$pid" ] && sudo kill -9 $pid
     fi
 
-    stop_process q-svc
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-rpc-server
+        stop_process neutron-api
+    else
+        stop_process q-svc
+    fi
 
     if is_service_enabled q-l3; then
         sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
@@ -553,6 +607,10 @@
 function stop_mutnauq {
     stop_mutnauq_other
     stop_mutnauq_l2_agent
+
+    if [[ $Q_AGENT == "ovn" ]]; then
+        stop_ovn
+    fi
 }
 
 # _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
@@ -600,7 +658,7 @@
             IP_UP="sudo ip link set $to_intf up"
             if [[ "$af" == "inet" ]]; then
                 IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
-                ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP "
+                ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
             fi
         fi
 
@@ -610,6 +668,27 @@
     fi
 }
 
+# _configure_public_network_connectivity() - Configures connectivity to the
+# external network using $PUBLIC_INTERFACE or NAT on the single interface
+# machines
+function _configure_public_network_connectivity {
+    # If we've given a PUBLIC_INTERFACE to take over, then we assume
+    # that we can own the whole thing, and privot it into the OVS
+    # bridge. If we are not, we're probably on a single interface
+    # machine, and we just setup NAT so that fixed guests can get out.
+    if [[ -n "$PUBLIC_INTERFACE" ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+
+        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+        fi
+    else
+        for d in $default_v4_route_devs; do
+            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+        done
+    fi
+}
+
 # cleanup_mutnauq() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_mutnauq {
@@ -646,6 +725,10 @@
     for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
         sudo ip netns delete ${ns}
     done
+
+    if [[ $Q_AGENT == "ovn" ]]; then
+        cleanup_ovn
+    fi
 }
 
 
@@ -666,10 +749,15 @@
     cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
 
     Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
-    cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
 
     # allow neutron user to administer neutron to match neutron account
-    sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    # NOTE(amotoki): This is required for nova works correctly with neutron.
+    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    else
+        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+    fi
 
     # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
     # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
@@ -711,7 +799,7 @@
     # Format logging
     setup_logging $NEUTRON_CONF
 
-    if is_service_enabled tls-proxy; then
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
         iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -753,7 +841,7 @@
     cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
 
     iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
+    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
     iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
     iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
@@ -805,13 +893,13 @@
     iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
 
     iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
-    _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken
+    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
 
     # Configuration for neutron notifications to nova.
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
 
-    configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova
+    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
 
     # Configure plugin
     neutron_plugin_configure_service
@@ -897,15 +985,6 @@
     fi
 }
 
-# Configures keystone integration for neutron service
-function _neutron_setup_keystone {
-    local conf_file=$1
-    local section=$2
-
-    create_neutron_cache_dir
-    configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section
-}
-
 function _neutron_setup_interface_driver {
 
     # ovs_use_veth needs to be set before the plugin configuration
@@ -925,7 +1004,7 @@
 }
 
 function _get_net_id {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}'
+    openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
 }
 
 function _get_probe_cmd_prefix {
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 52c6ad5..d3f5bd5 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Neuton Big Switch/FloodLight plugin
+# Neutron Big Switch/FloodLight plugin
 # ------------------------------------
 
 # Save trace setting
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index f2302e3..bdeaf0f 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -8,21 +8,23 @@
 set +o xtrace
 
 function neutron_lb_cleanup {
-    sudo ip link set $PUBLIC_BRIDGE down
-    sudo brctl delbr $PUBLIC_BRIDGE
+    sudo ip link delete $PUBLIC_BRIDGE
 
+    bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/`
+    if [[ -z "$bridge_list" ]]; then
+        return
+    fi
     if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then
-        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
+        for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
             sudo ip link delete $port
         done
     elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then
-        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
+        for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
             sudo ip link delete $port
         done
     fi
-    for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do
-        sudo ip link set $bridge down
-        sudo brctl delbr $bridge
+    for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do
+        sudo ip link delete $bridge
     done
 }
 
@@ -36,7 +38,7 @@
 }
 
 function neutron_plugin_install_agent_packages {
-    install_package bridge-utils
+    :
 }
 
 function neutron_plugin_configure_dhcp_agent {
@@ -46,7 +48,7 @@
 
 function neutron_plugin_configure_l3_agent {
     local conf_file=$1
-    sudo brctl addbr $PUBLIC_BRIDGE
+    sudo ip link add $PUBLIC_BRIDGE type bridge
     set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU
 }
 
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index c5a4c02..e1f868f 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -7,9 +7,16 @@
 _XTRACE_NEUTRON_ML2=$(set +o | grep xtrace)
 set +o xtrace
 
+# Default OVN L2 agent
+Q_AGENT=${Q_AGENT:-ovn}
+if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
+    source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
+fi
+
 # Enable this to simply and quickly enable tunneling with ML2.
-# Select either 'gre', 'vxlan', or 'gre,vxlan'
-Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"}
+# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'.
+# For ML2/OVN use 'geneve'.
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
 # This has to be set here since the agent will set this in the config file
 if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then
     Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE
@@ -17,14 +24,8 @@
     Q_TUNNEL_TYPES=gre
 fi
 
-# Default openvswitch L2 agent
-Q_AGENT=${Q_AGENT:-openvswitch}
-if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
-    source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
-fi
-
 # List of MechanismDrivers to load
-Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge}
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn}
 # Default GRE TypeDriver options
 Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES}
 # Default VXLAN TypeDriver options
@@ -44,7 +45,7 @@
 # L3 Plugin to load for ML2
 # For some flat network environment, they not want to extend L3 plugin.
 # Make sure it is able to set empty to ML2_L3_PLUGIN.
-ML2_L3_PLUGIN=${ML2_L3_PLUGIN-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
+ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router}
 
 function populate_ml2_config {
     CONF=$1
@@ -147,6 +148,7 @@
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True
     fi
 }
 
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 1c04aaa..8c75e15 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -8,10 +8,9 @@
 set +o xtrace
 
 function neutron_plugin_create_nova_conf {
+    local conf="$1"
     NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
-    iniset $NOVA_CONF neutron ovs_bridge $NOVA_OVS_BRIDGE
-    LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+    iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE
 }
 
 function neutron_plugin_install_agent_packages {
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index b65a258..7fed8bf 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -15,6 +15,10 @@
 
 function neutron_plugin_install_agent_packages {
     _neutron_ovs_base_install_agent_packages
+    if use_library_from_git "os-ken"; then
+        git_clone_by_name "os-ken"
+        setup_dev_lib "os-ken"
+    fi
 }
 
 function neutron_plugin_configure_dhcp_agent {
@@ -41,8 +45,10 @@
     # Setup physical network bridge mappings.  Override
     # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
     # complex physical network configurations.
-    if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
-        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+    if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then
+            OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+        fi
 
         # Configure bridge manually with physical interface as port for multi-node
         _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
new file mode 100644
index 0000000..1f737fb
--- /dev/null
+++ b/lib/neutron_plugins/ovn_agent
@@ -0,0 +1,796 @@
+#!/bin/bash
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+# Global Sources
+# --------------
+
+# There are some ovs functions OVN depends on that must be sourced from
+# the ovs neutron plugins.
+source ${TOP_DIR}/lib/neutron_plugins/ovs_base
+source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent
+
+# Load devstack ovs compliation and loading functions
+source ${TOP_DIR}/lib/neutron_plugins/ovs_source
+
+# Defaults
+# --------
+
+Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT)
+
+# Set variables for building OVN from source
+OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git}
+OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.')
+OVN_REPO_NAME=${OVN_REPO_NAME:-ovn}
+OVN_BRANCH=${OVN_BRANCH:-v20.06.1}
+# The commit removing OVN bits from the OVS tree, it is the commit that is not
+# present in OVN tree and is used to distinguish if OVN is part of OVS or not.
+# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d
+OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d
+
+if is_service_enabled tls-proxy; then
+    OVN_PROTO=ssl
+else
+    OVN_PROTO=tcp
+fi
+
+# How to connect to ovsdb-server hosting the OVN SB database.
+OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642}
+
+# How to connect to ovsdb-server hosting the OVN NB database
+OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641}
+
+# ml2/config for neutron_sync_mode
+OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log}
+
+# Configured DNS servers to be used with internal_dns extension, only
+# if the subnet DNS is not configured.
+OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8}
+
+# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the
+# hypervisor/chassis where a routers gateway should be hosted in OVN. The
+# default OVN L3 scheduler is leastloaded
+OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded}
+
+# A UUID to uniquely identify this system.  If one is not specified, a random
+# one will be generated.  A randomly generated UUID will be saved in a file
+# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf)
+# so that the same one will be re-used if you re-run DevStack or restart
+# Open vSwitch service.
+OVN_UUID=${OVN_UUID:-}
+
+# Whether or not to build the openvswitch kernel module from ovs.  This is required
+# unless the distro kernel includes ovs+conntrack support.
+OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES)
+OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE)
+
+# Whether or not to install the ovs python module from ovs source.  This can be
+# used to test and validate new ovs python features.  This should only be used
+# for development purposes since the ovs python version is controlled by OpenStack
+# requirements.
+OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE)
+
+# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version
+# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined
+# based on the ML2 overlay_ip_version option. The ML2 framework will use this to
+# configure the MTU DHCP option.
+OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38}
+
+# The log level of the OVN databases (north and south).
+# Supported log levels are: off, emer, err, warn, info or dbg.
+# More information about log levels can be found at
+# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt
+OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
+
+OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
+OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
+
+# If True (default) the node will be considered a gateway node.
+ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW)
+OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
+
+export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+    OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+fi
+
+OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
+
+OVS_PREFIX=
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+    OVS_PREFIX=/usr/local
+fi
+OVS_SBINDIR=$OVS_PREFIX/sbin
+OVS_BINDIR=$OVS_PREFIX/bin
+OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch
+OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch
+OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts
+OVS_DATADIR=$DATA_DIR/ovs
+OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
+
+OVN_DATADIR=$DATA_DIR/ovn
+OVN_SHAREDIR=$OVS_PREFIX/share/ovn
+OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts
+OVN_RUNDIR=$OVS_PREFIX/var/run/ovn
+
+NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix)
+NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent"
+
+STACK_GROUP="$( id --group --name "$STACK_USER" )"
+
+OVN_NORTHD_SERVICE=ovn-northd.service
+if is_ubuntu; then
+    # The ovn-central.service file on Ubuntu is responsible for starting
+    # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service)
+    OVN_NORTHD_SERVICE=ovn-central.service
+fi
+OVSDB_SERVER_SERVICE=ovsdb-server.service
+OVS_VSWITCHD_SERVICE=ovs-vswitchd.service
+OVN_CONTROLLER_SERVICE=ovn-controller.service
+OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+    OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service
+    OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service
+    OVN_NORTHD_SERVICE=devstack@ovn-northd.service
+    OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service
+    OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service
+fi
+
+# Defaults Overwrite
+# ------------------
+
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger}
+Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve}
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
+Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"}
+Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos}
+# this one allows empty:
+ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"}
+
+Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100}
+Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25}
+Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter}
+
+# Utility Functions
+# -----------------
+
+function wait_for_sock_file {
+    local count=0
+    while [ ! -S $1 ]; do
+        sleep 1
+        count=$((count+1))
+        if [ "$count" -gt 5 ]; then
+            die $LINENO "Socket $1 not found"
+        fi
+    done
+}
+
+function use_new_ovn_repository {
+    if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then
+        return 0
+    fi
+    if [ -z "$is_new_ovn" ]; then
+        local ovs_repo_dir=$DEST/$OVS_REPO_NAME
+        if [ ! -d $ovs_repo_dir ]; then
+            git_timed clone $OVS_REPO $ovs_repo_dir
+            pushd $ovs_repo_dir
+            git checkout $OVS_BRANCH
+            popd
+        else
+            clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH
+        fi
+        # Check the split commit exists in the current branch
+        pushd $ovs_repo_dir
+        git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH
+        is_new_ovn=$?
+        popd
+    fi
+    return $is_new_ovn
+}
+
+# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge
+# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup
+# removed. The call is not relevant for OVN, as it is specific to the use
+# of Neutron's OVS agent and hangs when running stack.sh because
+# neutron-ovs-cleanup uses the OVSDB native interface.
+function ovn_base_setup_bridge {
+    local bridge=$1
+    local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15"
+
+    if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
+        addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
+    fi
+
+    $addbr_cmd
+    sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
+}
+
+function _start_process {
+    $SYSTEMCTL daemon-reload
+    $SYSTEMCTL enable $1
+    $SYSTEMCTL restart $1
+}
+
+function _run_process {
+    local service=$1
+    local cmd="$2"
+    local stop_cmd="$3"
+    local group=$4
+    local user=${5:-$STACK_USER}
+
+    local systemd_service="devstack@$service.service"
+    local unit_file="$SYSTEMD_DIR/$systemd_service"
+    local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
+
+    echo "Starting $service executed command": $cmd
+
+    write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+    iniset -sudo $unit_file "Service" "Type" "forking"
+    iniset -sudo $unit_file "Service" "RemainAfterExit" "yes"
+    iniset -sudo $unit_file "Service" "KillMode" "mixed"
+    iniset -sudo $unit_file "Service" "LimitNOFILE" "65536"
+    iniset -sudo $unit_file "Service" "Environment" "$environment"
+    if [ -n "$stop_cmd" ]; then
+        iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd"
+    fi
+
+    _start_process $systemd_service
+
+    local testcmd="test -e $OVS_RUNDIR/$service.pid"
+    test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
+    sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info
+}
+
+function clone_repository {
+    local repo=$1
+    local dir=$2
+    local branch=$3
+    # Set ERROR_ON_CLONE to false to avoid the need of having the
+    # repositories like OVN and OVS in the required_projects of the job
+    # definition.
+    ERROR_ON_CLONE=false git_clone $repo $dir $branch
+}
+
+function create_public_bridge {
+    # Create the public bridge that OVN will use
+    sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15
+    sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE
+    _configure_public_network_connectivity
+}
+
+function _disable_libvirt_apparmor {
+    if ! sudo aa-status --enabled ; then
+        return 0
+    fi
+    # NOTE(arosen): This is used as a work around to allow newer versions
+    # of libvirt to work with ovs configured ports. See LP#1466631.
+    # requires the apparmor-utils
+    install_package apparmor-utils
+    # disables apparmor for libvirtd
+    sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd
+}
+
+
+# OVN compilation functions
+# -------------------------
+
+
+# compile_ovn() - Compile OVN from source and load needed modules
+#                 Accepts three parameters:
+#                   - first optional is False by default and means that
+#                     modules are built and installed.
+#                   - second optional parameter defines prefix for
+#                     ovn compilation
+#                   - third optional parameter defines localstatedir for
+#                     ovn single machine runtime
+function compile_ovn {
+    local build_modules=${1:-False}
+    local prefix=$2
+    local localstatedir=$3
+
+    if [ -n "$prefix" ]; then
+        prefix="--prefix=$prefix"
+    fi
+
+    if [ -n "$localstatedir" ]; then
+        localstatedir="--localstatedir=$localstatedir"
+    fi
+
+    clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH
+    pushd $DEST/$OVN_REPO_NAME
+
+    if [ ! -f configure ] ; then
+        ./boot.sh
+    fi
+
+    if [ ! -f config.status ] || [ configure -nt config.status ] ; then
+        ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir
+    fi
+    make -j$(($(nproc) + 1))
+    sudo make install
+    popd
+}
+
+
+# OVN Neutron driver functions
+# ----------------------------
+
+# OVN service sanity check
+function ovn_sanity_check {
+    if is_service_enabled q-agt neutron-agt; then
+        die $LINENO "The q-agt/neutron-agt service must be disabled with OVN."
+    elif is_service_enabled q-l3 neutron-l3; then
+        die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN."
+    elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then
+        die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS"
+    elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then
+        die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN"
+    fi
+}
+
+# install_ovn() - Collect source and prepare
+function install_ovn {
+    if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then
+        echo "Installation of OVS from source disabled."
+        return 0
+    fi
+
+    echo "Installing OVN and dependent packages"
+
+    # Check the OVN configuration
+    ovn_sanity_check
+
+    # Install tox, used to generate the config (see devstack/override-defaults)
+    pip_install tox
+
+    sudo mkdir -p $OVS_RUNDIR
+    sudo chown $(whoami) $OVS_RUNDIR
+    # NOTE(lucasagomes): To keep things simpler, let's reuse the same
+    # RUNDIR for both OVS and OVN. This way we avoid having to specify the
+    # --db option in the ovn-{n,s}bctl commands while playing with DevStack
+    sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
+
+    if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+        # If OVS is already installed, remove it, because we're about to
+        # re-install it from source.
+        for package in openvswitch openvswitch-switch openvswitch-common; do
+            if is_package_installed $package ; then
+                uninstall_package $package
+            fi
+        done
+
+        remove_ovs_packages
+        sudo rm -f $OVS_RUNDIR/*
+
+        compile_ovs $OVN_BUILD_MODULES
+        if use_new_ovn_repository; then
+            compile_ovn $OVN_BUILD_MODULES
+        fi
+
+        sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
+        sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch
+        sudo mkdir -p $OVS_PREFIX/var/log/ovn
+        sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
+    else
+        fixup_ovn_centos
+        install_package $(get_packages openvswitch)
+        install_package $(get_packages ovn)
+    fi
+
+    # Ensure that the OVS commands are accessible in the PATH
+    export PATH=$OVS_BINDIR:$PATH
+
+    # Archive log files and create new
+    local log_archive_dir=$LOGDIR/archive
+    mkdir -p $log_archive_dir
+    for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do
+        if [ -f "$LOGDIR/$logfile" ] ; then
+            mv "$LOGDIR/$logfile"  "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}"
+        fi
+    done
+
+    # Install ovsdbapp from source if requested
+    if use_library_from_git "ovsdbapp"; then
+        git_clone_by_name "ovsdbapp"
+        setup_dev_lib "ovsdbapp"
+    fi
+
+    # Install ovs python module from ovs source.
+    if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then
+        sudo pip uninstall -y ovs
+        # Clone the OVS repository if it's not yet present
+        clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH
+        sudo pip install -e $DEST/$OVS_REPO_NAME/python
+    fi
+}
+
+# filter_network_api_extensions() - Remove non-supported API extensions by
+# the OVN driver from the list of enabled API extensions
+function filter_network_api_extensions {
+    SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \
+        'from neutron.common.ovn import extensions ;\
+        print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))')
+    SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \
+        'from neutron.common.ovn import extensions ;\
+        print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))')
+    if is_service_enabled q-qos neutron-qos ; then
+        SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos"
+    fi
+    NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS}
+    extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u)
+    supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u)
+    enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext"))
+    disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext"))
+
+    # Log a message in case some extensions had to be disabled because
+    # they are not supported by the OVN driver
+    if [ ! -z "$disabled_ext" ]; then
+        _disabled=$(echo $disabled_ext | tr ' ' ',')
+        echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled"
+    fi
+
+    # Export the final list of extensions that have been enabled and are
+    # supported by OVN
+    export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',')
+}
+
+function configure_ovn_plugin {
+    echo "Configuring Neutron for OVN"
+
+    if is_service_enabled q-svc ; then
+        filter_network_api_extensions
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE"
+        if is_service_enabled tls-proxy; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+        fi
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE"
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER"
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP"
+        inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver
+
+        if is_service_enabled q-log neutron-log; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT"
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT"
+            inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
+        fi
+
+        if is_service_enabled q-ovn-metadata-agent; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
+        else
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
+        fi
+
+        if is_service_enabled q-dns neutron-dns ; then
+            iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS"
+        fi
+
+        iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE
+    fi
+
+    if is_service_enabled q-dhcp neutron-dhcp ; then
+        iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True
+    else
+        iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False
+    fi
+
+    if is_service_enabled n-api-meta ; then
+        if is_service_enabled q-ovn-metadata-agent ; then
+            iniset $NOVA_CONF neutron service_metadata_proxy True
+        fi
+    fi
+}
+
+function configure_ovn {
+    echo "Configuring OVN"
+
+    if [ -z "$OVN_UUID" ] ; then
+        if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then
+            OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf)
+        else
+            OVN_UUID=$(uuidgen)
+            echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
+        fi
+    else
+        local ovs_uuid
+        ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf)
+        if [ "$ovs_uuid" != $OVN_UUID ]; then
+            echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
+        fi
+    fi
+
+    # Erase the pre-set configurations from packages. DevStack will
+    # configure OVS and OVN accordingly for its use.
+    if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then
+        sudo truncate -s 0 /etc/openvswitch/default.conf
+        sudo truncate -s 0 /etc/sysconfig/openvswitch
+        sudo truncate -s 0 /etc/sysconfig/ovn
+    fi
+
+    # Metadata
+    if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then
+        sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
+
+        mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
+        (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+        cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF
+        configure_root_helper_options $OVN_META_CONF
+
+        iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
+        iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
+        iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
+        iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
+        iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
+        if is_service_enabled tls-proxy; then
+            iniset $OVN_META_CONF ovn \
+                ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem
+            iniset $OVN_META_CONF ovn \
+                ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt
+            iniset $OVN_META_CONF ovn \
+                ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key
+        fi
+    fi
+}
+
+function init_ovn {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+
+    # Assumption: this is a dedicated test system and there is nothing important
+    # in the ovn, ovn-nb, or ovs databases.  We're going to trash them and
+    # create new ones on each devstack run.
+
+    _disable_libvirt_apparmor
+
+    mkdir -p $OVN_DATADIR
+    mkdir -p $OVS_DATADIR
+
+    rm -f $OVS_DATADIR/*.db
+    rm -f $OVS_DATADIR/.*.db.~lock~
+    rm -f $OVN_DATADIR/*.db
+    rm -f $OVN_DATADIR/.*.db.~lock~
+}
+
+function _start_ovs {
+    echo "Starting OVS"
+    if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then
+        # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names.
+        enable_service ovsdb-server
+        enable_service ovs-vswitchd
+
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            if [ ! -f $OVS_DATADIR/conf.db ]; then
+                ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema
+            fi
+
+            if is_service_enabled ovn-controller-vtep; then
+                if [ ! -f $OVS_DATADIR/vtep.db ]; then
+                    ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema
+                fi
+            fi
+
+            local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file"
+            dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options"
+            if is_service_enabled ovn-controller-vtep; then
+                dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
+            fi
+            dbcmd+=" $OVS_DATADIR/conf.db"
+            _run_process ovsdb-server "$dbcmd"
+
+            # Note: ovn-controller will create and configure br-int once it is started.
+            # So, no need to create it now because nothing depends on that bridge here.
+            local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
+            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVSDB_SERVER_SERVICE"
+            _start_process "$OVS_VSWITCHD_SERVICE"
+        fi
+
+        echo "Configuring OVSDB"
+        if is_service_enabled tls-proxy; then
+            sudo ovs-vsctl --no-wait set-ssl \
+                $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
+                $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \
+                $INT_CA_DIR/ca-chain.pem
+        fi
+
+        sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST
+        sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+        # Select this chassis to host gateway routers
+        if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
+            sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
+        fi
+
+        if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then
+            ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE
+            sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE}
+        fi
+
+        if is_service_enabled ovn-controller-vtep ; then
+            ovn_base_setup_bridge br-v
+            vtep-ctl add-ps br-v
+            vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+
+            enable_service ovs-vtep
+            local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
+            _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root"
+
+            vtep-ctl set-manager tcp:$HOST_IP:6640
+        fi
+    fi
+}
+
+function _start_ovn_services {
+    _start_process "$OVSDB_SERVER_SERVICE"
+    _start_process "$OVS_VSWITCHD_SERVICE"
+
+    if is_service_enabled ovn-northd ; then
+        _start_process "$OVN_NORTHD_SERVICE"
+    fi
+    if is_service_enabled ovn-controller ; then
+        _start_process "$OVN_CONTROLLER_SERVICE"
+    fi
+    if is_service_enabled ovn-controller-vtep ; then
+        _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+    fi
+    if is_service_enabled ovs-vtep ; then
+        _start_process "devstack@ovs-vtep.service"
+    fi
+    if is_service_enabled q-ovn-metadata-agent; then
+        _start_process "devstack@q-ovn-metadata-agent.service"
+    fi
+}
+
+# start_ovn() - Start running processes, including screen
+function start_ovn {
+    echo "Starting OVN"
+
+    _start_ovs
+
+    local SCRIPTDIR=$OVN_SCRIPTDIR
+    if ! use_new_ovn_repository; then
+        SCRIPTDIR=$OVS_SCRIPTDIR
+    fi
+
+    if is_service_enabled ovn-northd ; then
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
+            local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
+
+            _run_process ovn-northd "$cmd" "$stop_cmd"
+        else
+            _start_process "$OVN_NORTHD_SERVICE"
+        fi
+
+        # Wait for the service to be ready
+        wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
+        wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+
+        if is_service_enabled tls-proxy; then
+            sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+        fi
+        sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+        sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+    fi
+
+    if is_service_enabled ovn-controller ; then
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
+            local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
+
+            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVN_CONTROLLER_SERVICE"
+        fi
+    fi
+
+    if is_service_enabled ovn-controller-vtep ; then
+        if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+            local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
+            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+        else
+            _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+        fi
+    fi
+
+    if is_service_enabled q-ovn-metadata-agent; then
+        run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF"
+        # Format logging
+        setup_logging $OVN_META_CONF
+    fi
+
+    _start_ovn_services
+}
+
+function _stop_ovs_dp {
+    sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp
+    modprobe -q -r vport_geneve vport_vxlan openvswitch || true
+}
+
+function _stop_process {
+    local service=$1
+    echo "Stopping process $service"
+    if $SYSTEMCTL is-enabled $service; then
+        $SYSTEMCTL stop $service
+        $SYSTEMCTL disable $service
+    fi
+}
+
+function stop_ovn {
+    if is_service_enabled q-ovn-metadata-agent; then
+        sudo pkill -9 -f haproxy || :
+        _stop_process "devstack@q-ovn-metadata-agent.service"
+    fi
+    if is_service_enabled ovn-controller-vtep ; then
+        _stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
+    fi
+    if is_service_enabled ovn-controller ; then
+        _stop_process "$OVN_CONTROLLER_SERVICE"
+    fi
+    if is_service_enabled ovn-northd ; then
+        _stop_process "$OVN_NORTHD_SERVICE"
+    fi
+    if is_service_enabled ovs-vtep ; then
+        _stop_process "devstack@ovs-vtep.service"
+    fi
+
+    _stop_process "$OVS_VSWITCHD_SERVICE"
+    _stop_process "$OVSDB_SERVER_SERVICE"
+
+    _stop_ovs_dp
+}
+
+function _cleanup {
+    local path=${1:-$DEST/$OVN_REPO_NAME}
+    pushd $path
+    cd $path
+    sudo make uninstall
+    sudo make distclean
+    popd
+}
+
+# cleanup_ovn() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ovn {
+    local ovn_path=$DEST/$OVN_REPO_NAME
+    local ovs_path=$DEST/$OVS_REPO_NAME
+
+    if [ -d $ovn_path ]; then
+        _cleanup $ovn_path
+    fi
+
+    if [ -d $ovs_path ]; then
+        _cleanup $ovs_path
+    fi
+
+    sudo rm -f $OVN_RUNDIR
+}
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 50b9ae5..2e63fe3 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -69,17 +69,24 @@
         restart_service openvswitch
         sudo systemctl enable openvswitch
     elif is_suse; then
-        if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+        if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
             restart_service openvswitch-switch
         else
-            restart_service openvswitch
+            # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
+            if [[ $DISTRO =~ "tumbleweed" ]]; then
+                sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
+            fi
+            restart_service openvswitch || {
+                journalctl -xe || :
+                systemctl status openvswitch
+            }
         fi
     fi
 }
 
 function _neutron_ovs_base_configure_firewall_driver {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver openvswitch
         if ! running_in_container; then
             enable_kernel_bridge_firewall
         fi
@@ -89,10 +96,6 @@
 }
 
 function _neutron_ovs_base_configure_l3_agent {
-    if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then
-        iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
-    fi
-
     neutron-ovs-cleanup --config-file $NEUTRON_CONF
     if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then
         ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 ||
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
new file mode 100644
index 0000000..08951d1
--- /dev/null
+++ b/lib/neutron_plugins/ovs_source
@@ -0,0 +1,215 @@
+#!/bin/bash
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Defaults
+# --------
+
+# Set variables for building OVS from source
+OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git}
+OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.')
+OVS_REPO_NAME=${OVS_REPO_NAME:-ovs}
+OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5}
+
+# Functions
+
+# load_module() - Load module using modprobe module given by argument and dies
+#                 on failure
+#               - fatal argument is optional and says whether function should
+#                 exit if module can't be loaded
+function load_module {
+    local module=$1
+    local fatal=$2
+
+    if [ "$(trueorfalse True fatal)" == "True" ]; then
+        sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module")
+    else
+        sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg)
+    fi
+}
+
+# prepare_for_compilation() - Fetch ovs git repository and install packages needed for
+#                             compilation.
+function prepare_for_ovs_compilation {
+    local build_modules=${1:-False}
+    OVS_DIR=$DEST/$OVS_REPO_NAME
+
+    if [ ! -d $OVS_DIR ] ; then
+        # We can't use git_clone here because we want to ignore ERROR_ON_CLONE
+        git_timed clone $OVS_REPO $OVS_DIR
+        cd $OVS_DIR
+        git checkout $OVS_BRANCH
+    else
+        # Even though the directory already exists, call git_clone to update it
+        # if needed based on the RECLONE option
+        git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH
+        cd $OVS_DIR
+    fi
+
+    # TODO: Can you create package list files like you can inside devstack?
+    install_package autoconf automake libtool gcc patch make
+
+    # If build_modules is False, we don't need to install the kernel-*
+    # packages. Just return.
+    if [[ "$build_modules" == "False" ]]; then
+        return
+    fi
+
+    KERNEL_VERSION=`uname -r`
+    if is_fedora ; then
+        # is_fedora covers Fedora, RHEL, CentOS, etc...
+        if [[ "$os_VENDOR" == "Fedora" ]]; then
+            install_package elfutils-libelf-devel
+            KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1`
+        elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then
+            # dash is illegal character in rpm version so replace
+            # them with underscore like it is done in the kernel
+            # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25
+            # but only for latest series of the kernel, not 3.x
+
+            KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _`
+        fi
+
+        echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation
+        echo failed, please, provide a repository with the package, or yum update / reboot
+        echo your machine to get the latest kernel.
+
+        install_package kernel-devel-$KERNEL_VERSION
+        install_package kernel-headers-$KERNEL_VERSION
+
+    elif is_ubuntu ; then
+        install_package linux-headers-$KERNEL_VERSION
+    fi
+}
+
+# load_ovs_kernel_modules() - load openvswitch kernel module
+function load_ovs_kernel_modules {
+    load_module openvswitch
+    load_module vport-geneve False
+    dmesg | tail
+}
+
+# reload_ovs_kernel_modules() - reload openvswitch kernel module
+function reload_ovs_kernel_modules {
+    set +e
+    ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system)
+    if [ -n "$ovs_system" ]; then
+        sudo ovs-dpctl del-dp ovs-system
+    fi
+    set -e
+    sudo modprobe -r vport_geneve
+    sudo modprobe -r openvswitch
+    load_ovs_kernel_modules
+}
+
+# compile_ovs() - Compile OVS from source and load needed modules.
+#                 Accepts two parameters:
+#                   - first one is False by default and means that modules are not built and installed.
+#                   - second optional parameter defines prefix for ovs compilation
+#                   - third optional parameter defines localstatedir for ovs single machine runtime
+#                 Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set
+function compile_ovs {
+    local _pwd=$PWD
+    local build_modules=${1:-False}
+    local prefix=$2
+    local localstatedir=$3
+
+    if [ -n "$prefix" ]; then
+        prefix="--prefix=$prefix"
+    fi
+
+    if [ -n "$localstatedir" ]; then
+        localstatedir="--localstatedir=$localstatedir"
+    fi
+
+    prepare_for_ovs_compilation $build_modules
+
+    KERNEL_VERSION=$(uname -r)
+    major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1)
+    patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2)
+    if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then
+        echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling "
+        echo "Kernel module for version higher than 5.5. Skipping module compilation..."
+        build_modules="False"
+    fi
+
+    if [ ! -f configure ] ; then
+        ./boot.sh
+    fi
+    if [ ! -f config.status ] || [ configure -nt config.status ] ; then
+        if [[ "$build_modules" == "True" ]]; then
+            ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build
+        else
+            ./configure $prefix $localstatedir
+        fi
+    fi
+    make -j$(($(nproc) + 1))
+    sudo make install
+    if [[ "$build_modules" == "True" ]]; then
+        sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install
+        reload_ovs_kernel_modules
+    else
+        load_ovs_kernel_modules
+    fi
+
+    cd $_pwd
+}
+
+# action_service - call an action over openvswitch service
+#                  Accepts one parameter that can be either
+#                  'start', 'restart' and 'stop'.
+function action_openvswitch {
+    local action=$1
+
+    if is_ubuntu; then
+        ${action}_service openvswitch-switch
+    elif is_fedora; then
+        ${action}_service openvswitch
+    elif is_suse; then
+        if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+            ${action}_service openvswitch-switch
+        else
+            ${action}_service openvswitch
+        fi
+    fi
+}
+
+# start_new_ovs() - removes old ovs database, creates a new one and starts ovs
+function start_new_ovs {
+    sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~
+    sudo /usr/share/openvswitch/scripts/ovs-ctl start
+}
+
+# stop_new_ovs() - stops ovs
+function stop_new_ovs {
+    local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl'
+
+    if [ -x $ovs_ctl ] ; then
+        sudo $ovs_ctl stop
+    fi
+}
+
+# remove_ovs_packages() - removes old ovs packages from the system
+function remove_ovs_packages {
+    for package in openvswitch openvswitch-switch openvswitch-common; do
+        if is_package_installed $package; then
+            uninstall_package $package
+        fi
+    done
+}
+
+
+# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module
+function load_conntrack_gre_module {
+    load_module nf_conntrack_proto_gre False
+}
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 98315b7..98b96ac 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -39,9 +39,9 @@
 Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True}
 
 
-# Use flat providernet for public network
+# Use providernet for public network
 #
-# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network
+# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network
 # for external interface of neutron l3-agent.  In that case,
 # PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value
 # used for the network.  In case of ofagent, you should add the
@@ -59,6 +59,10 @@
 #    Q_USE_PROVIDERNET_FOR_PUBLIC=True
 #    PUBLIC_PHYSICAL_NETWORK=public
 #    OVS_BRIDGE_MAPPINGS=public:br-ex
+#
+# The provider-network-type defaults to flat, however, the values
+# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could
+# be set to specify the parameters for an alternate network type.
 Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
 
@@ -97,9 +101,8 @@
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
 default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
 
-default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}')
+default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
 
 function _determine_config_l3 {
     local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
@@ -120,21 +123,7 @@
 
     neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE
 
-    # If we've given a PUBLIC_INTERFACE to take over, then we assume
-    # that we can own the whole thing, and privot it into the OVS
-    # bridge. If we are not, we're probably on a single interface
-    # machine, and we just setup NAT so that fixed guests can get out.
-    if [[ -n "$PUBLIC_INTERFACE" ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
-
-        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
-        fi
-    else
-        for d in $default_v4_route_devs; do
-            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
-        done
-    fi
+    _configure_public_network_connectivity
 }
 
 # Explicitly set router id in l3 agent configuration
@@ -188,7 +177,7 @@
             if [ -z $SUBNETPOOL_V4_ID ]; then
                 fixed_range_v4=$FIXED_RANGE
             fi
-            SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2)
+            SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -198,7 +187,7 @@
             if [ -z $SUBNETPOOL_V6_ID ]; then
                 fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
             fi
-            IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID --subnet-range $fixed_range_v6 | grep ' id ' | get_field 2)
+            IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
             die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -240,7 +229,7 @@
         fi
         # Create an external network, and a subnet. Configure the external network as router gw
         if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
-            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
         else
             EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
         fi
@@ -340,8 +329,8 @@
     # Configure the external network as the default router gateway
     openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
 
-    # This logic is specific to using the l3-agent for layer 3
-    if is_service_enabled q-l3 || is_service_enabled neutron-l3;  then
+    # This logic is specific to using OVN or the l3-agent for layer 3
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
         # Configure and enable public bridge
         local ext_gw_interface="none"
         if is_neutron_ovs_base_plugin; then
@@ -389,8 +378,10 @@
         openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
     fi
 
-    # This logic is specific to using the l3-agent for layer 3
-    if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+    # This logic is specific to using OVN or the l3-agent for layer 3
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+        # if the Linux host considers itself to be a router then it will
+        # ignore all router advertisements
         # Ensure IPv6 RAs are accepted on interfaces with a default route.
         # This is needed for neutron-based devstack clouds to work in
         # IPv6-only clouds in the gate. Please do not remove this without
@@ -415,6 +406,11 @@
 
             # Configure interface for public bridge
             sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
+            # Any IPv6 private subnet that uses the default IPV6 subnet pool
+            # and that is plugged into the default router (Q_ROUTER_NAME) will
+            # be reachable from the devstack node (ex: ipv6-private-subnet).
+            # Some scenario tests (such as octavia-tempest-plugin) rely heavily
+            # on this feature.
             local replace_range=${SUBNETPOOL_PREFIX_V6}
             if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then
                 replace_range=${FIXED_RANGE_V6}
diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk
new file mode 100644
index 0000000..8e0f694
--- /dev/null
+++ b/lib/neutron_plugins/services/trunk
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+function configure_trunk_extension {
+    neutron_service_plugin_class_add "trunk"
+}
diff --git a/lib/nova b/lib/nova
index ea0d2f7..9aae2c4 100644
--- a/lib/nova
+++ b/lib/nova
@@ -46,15 +46,12 @@
 NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
 # INSTANCES_PATH is the previous name for this
 NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
-NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
 
 NOVA_CONF_DIR=/etc/nova
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
-NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
 NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf
 NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
-NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
 NOVA_API_DB=${NOVA_API_DB:-nova_api}
 NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
 NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
@@ -82,32 +79,36 @@
     NOVA_SERVICE_PROTOCOL="https"
 fi
 
+# Whether to use TLS for comms between the VNC/SPICE/serial proxy
+# services and the compute node
+NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False}
+
+# Validate configuration
+if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then
+    die $LINENO "enabling TLS for the console proxy requires the tls-proxy service"
+fi
+
 # Public facing bits
 NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
-NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
+NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
 
-# Nova supports pluggable schedulers.  The default ``FilterScheduler``
-# should work in most cases.
-SCHEDULER=${SCHEDULER:-filter_scheduler}
-
-# The following FILTERS contains SameHostFilter and DifferentHostFilter with
+# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
-FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
 # Set default defaults here as some hypervisor drivers override these
 PUBLIC_INTERFACE_DEFAULT=br100
-FLAT_NETWORK_BRIDGE_DEFAULT=br100
 # Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
 # the default isn't completely crazy. This will match ``eth*``, ``em*``, or
 # the new ``p*`` interfaces, then basically picks the first
@@ -133,51 +134,30 @@
     source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
 fi
 
-
-# Nova Network Configuration
-# --------------------------
-
-NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
-
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
-
-# If you are using the FlatDHCP network mode on multiple hosts, set the
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
-# have an IP or you risk breaking things.
-#
-# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
-# hiccup while the network is moved from the flat interface to the flat network
-# bridge.  This will happen when you launch your first instance.  Upon launch
-# you will lose all connectivity to the node, and the VM launch will probably
-# fail.
-#
-# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set ``FLAT_INTERFACE=``
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
-# allows network operations and routing for a VM to occur on the server that is
-# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=$(trueorfalse False MULTI_HOST)
-
-# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
-# where there are at least two nova-computes.
-NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)
-
-# Test floating pool and range are used for testing.  They are defined
-# here until the admin APIs can replace nova-manage
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-
 # Other Nova configurations
 # ----------------------------
 
 # ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
 # user token while communicating to external RESP API's like Neutron, Cinder
 # and Glance.
-NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
+NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN)
+
+# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
+# where there are at least two nova-computes.
+NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)
+
+# Enable debugging levels for iscsid service (goes from 0-8)
+ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG)
+ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4}
+
+# Format for notifications. Nova defaults to "unversioned" since Train.
+# Other options include "versioned" and "both".
+NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned}
+
+# Timeout for servers to gracefully shutdown the OS during operations
+# like shelve, rescue, stop, rebuild. Defaults to 0 since the default
+# image in devstack is CirrOS.
+NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0}
 
 # Functions
 # ---------
@@ -190,10 +170,10 @@
     return 1
 }
 
-# Test if any Nova Cell services are enabled
-# is_nova_enabled
-function is_n-cell_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
+# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy
+# service has TLS enabled
+function is_nova_console_proxy_compute_tls_enabled {
+    [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0
     return 1
 }
 
@@ -240,7 +220,7 @@
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
 
-    sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
+    sudo rm -rf $NOVA_STATE_PATH
 
     # NOTE(dtroyer): This really should be called from here but due to the way
     #                nova abuses the _cleanup() function we're moving it
@@ -280,6 +260,8 @@
                 if [ ! -e /dev/kvm ]; then
                     echo "WARNING: Switching to QEMU"
                     LIBVIRT_TYPE=qemu
+                    LIBVIRT_CPU_MODE=custom
+                    LIBVIRT_CPU_MODEL=Nehalem
                     if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
                         # https://bugzilla.redhat.com/show_bug.cgi?id=753589
                         sudo setsebool virt_use_execmem on
@@ -292,17 +274,6 @@
             # to simulate multiple systems.
             if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
                 if is_ubuntu; then
-                    if [[ ! "$DISTRO" > natty ]]; then
-                        local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
-                        sudo mkdir -p /cgroup
-                        if ! grep -q cgroup /etc/fstab; then
-                            echo "$cgline" | sudo tee -a /etc/fstab
-                        fi
-                        if ! mount -n | grep -q cgroup; then
-                            sudo mount /cgroup
-                        fi
-                    fi
-
                     # enable nbd for lxc unless you're using an lvm backend
                     # otherwise you can't boot instances
                     if [[ "$NOVA_BACKEND" != "LVM" ]]; then
@@ -327,10 +298,25 @@
                 sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
             fi
         fi
-        if is_suse; then
-            # iscsid is not started by default
-            start_service iscsid
+
+        # Ensure each compute host uses a unique iSCSI initiator
+        echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
+
+        if [[ ${ISCSID_DEBUG} == "True" ]]; then
+            # Install an override that starts iscsid with debugging
+            # enabled.
+            cat > /tmp/iscsid.override <<EOF
+[Service]
+ExecStart=
+ExecStart=/usr/sbin/iscsid -d${ISCSID_DEBUG_LEVEL}
+EOF
+            sudo mkdir -p /etc/systemd/system/iscsid.service.d
+            sudo mv /tmp/iscsid.override /etc/systemd/system/iscsid.service.d/override.conf
+            sudo systemctl daemon-reload
         fi
+
+        # ensure that iscsid is started, even when disabled by default
+        restart_service iscsid
     fi
 
     # Rebuild the config file from scratch
@@ -387,7 +373,7 @@
     fi
 
     # S3
-    if is_service_enabled swift3; then
+    if is_service_enabled s3api; then
         get_or_create_service "s3" "s3" "S3"
         get_or_create_endpoint \
             "s3" \
@@ -411,20 +397,20 @@
     fi
     iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
-    iniset $NOVA_CONF scheduler driver "$SCHEDULER"
-    iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS"
-    iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
+    iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
+    iniset $NOVA_CONF scheduler workers "$API_WORKERS"
+    iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
     if [[ $SERVICE_IP_VERSION == 6 ]]; then
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
-        iniset $NOVA_CONF DEFAULT use_ipv6 "True"
     else
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
     fi
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
+    iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT
 
-    iniset $NOVA_CONF key_manager api_class nova.keymgr.conf_key_mgr.ConfKeyManager
+    iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
 
     if is_fedora || is_suse; then
         # nova defaults to /usr/local/bin, but fedora and suse pip like to
@@ -432,8 +418,8 @@
         iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
     fi
 
-    # only setup database connections if there are services that
-    # require them running on the host. The ensures that n-cpu doesn't
+    # only setup database connections and cache backend if there are services
+    # that require them running on the host. The ensures that n-cpu doesn't
     # leak a need to use the db in a multinode scenario.
     if is_service_enabled n-api n-cond n-sched; then
         # If we're in multi-tier cells mode, we want our control services pointing
@@ -450,6 +436,13 @@
 
         iniset $NOVA_CONF database connection `database_connection_url $db`
         iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
+
+        # Cache related settings
+        # Those settings aren't really needed in n-cpu thus it is configured
+        # only on nodes which runs controller services
+        iniset $NOVA_CONF cache enabled $NOVA_ENABLE_CACHE
+        iniset $NOVA_CONF cache backend $CACHE_BACKEND
+        iniset $NOVA_CONF cache memcache_servers $MEMCACHE_SERVERS
     fi
 
     if is_service_enabled n-api; then
@@ -464,15 +457,11 @@
             iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
         fi
 
-        configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
+        configure_keystone_authtoken_middleware $NOVA_CONF nova
     fi
 
     if is_service_enabled cinder; then
-        if is_service_enabled tls-proxy; then
-            CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
-            CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
-            iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
-        fi
+        configure_cinder_access
     fi
 
     if [ -n "$NOVA_STATE_PATH" ]; then
@@ -482,21 +471,25 @@
     if [ -n "$NOVA_INSTANCES_PATH" ]; then
         iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
     fi
-    if [ "$MULTI_HOST" != "False" ]; then
-        iniset $NOVA_CONF DEFAULT multi_host "True"
-        iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
-    fi
     if [ "$SYSLOG" != "False" ]; then
         iniset $NOVA_CONF DEFAULT use_syslog "True"
     fi
     if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
         iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
     fi
+
+    # nova defaults to genisoimage but only mkisofs is available for 15.0+
+    if is_suse; then
+        iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
+    fi
+
     # Format logging
     setup_logging $NOVA_CONF
 
+    iniset $NOVA_CONF upgrade_levels compute "auto"
+
     write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
-    write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}"
+    write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
 
     if is_service_enabled ceilometer; then
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -504,81 +497,25 @@
         iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
     fi
 
-    # All nova-compute workers need to know the vnc configuration options
-    # These settings don't hurt anything if n-xvnc and n-novnc are disabled
-    if is_service_enabled n-cpu; then
-        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
-        iniset $NOVA_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
-        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
-        iniset $NOVA_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
-        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
-        iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
-    fi
-
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
-        # Address on which instance vncservers will listen on compute hosts.
-        # For multi-host, this should be the management ip of the compute host.
-        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
-        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF vnc server_listen "$VNCSERVER_LISTEN"
-        iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
-        iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-        iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-    else
-        iniset $NOVA_CONF vnc enabled false
-    fi
-
-    if is_service_enabled n-spice; then
-        # Address on which instance spiceservers will listen on compute hosts.
-        # For multi-host, this should be the management ip of the compute host.
-        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF spice enabled true
-        iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
-        iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
-        iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-    fi
-
     # Set the oslo messaging driver to the typical default. This does not
     # enable notifications, but it will allow them to function when enabled.
     iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2"
     iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url)
+    iniset $NOVA_CONF notifications notification_format "$NOVA_NOTIFICATION_FORMAT"
     iniset_rpc_backend nova $NOVA_CONF
-    iniset $NOVA_CONF glance api_servers "$GLANCE_URL"
 
     iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
     iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
     # don't let the conductor get out of control now that we're using a pure python db driver
     iniset $NOVA_CONF conductor workers "$API_WORKERS"
 
-    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
-
     if is_service_enabled tls-proxy; then
         iniset $NOVA_CONF DEFAULT glance_protocol https
         iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
     fi
 
-    if is_service_enabled n-sproxy; then
-        iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-        iniset $NOVA_CONF serial_console enabled True
-    fi
     iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
-    # Setup logging for nova-dhcpbridge command line
-    sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
-
-    if is_service_enabled n-net; then
-        local service="n-dhcp"
-        local logfile="${service}.log.${CURRENT_LOG_TIME}"
-        local real_logfile="${LOGDIR}/${logfile}"
-        if [[ -n ${LOGDIR} ]]; then
-            bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
-            iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
-        fi
-
-        iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
-    fi
-
     if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then
         init_nova_service_user_conf
     fi
@@ -589,6 +526,8 @@
             local vhost
             conf=$(conductor_conf $i)
             vhost="nova_cell${i}"
+            # clean old conductor conf
+            rm -f $conf
             iniset $conf database connection `database_connection_url nova_cell${i}`
             iniset $conf conductor workers "$API_WORKERS"
             iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
@@ -598,11 +537,184 @@
             else
                 rpc_backend_add_vhost $vhost
                 iniset_rpc_backend nova $conf DEFAULT $vhost
+                # When running in superconductor mode, the cell conductor
+                # must be configured to talk to the placement service for
+                # reschedules to work.
+                if is_service_enabled placement placement-client; then
+                    configure_placement_nova_compute $conf
+                fi
             fi
             # Format logging
             setup_logging $conf
         done
     fi
+
+    # Console proxy configuration has to go after conductor configuration
+    # because the per cell config file nova_cellN.conf is cleared out as part
+    # of conductor configuration.
+    if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+        configure_console_proxies
+    else
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            local conf
+            local offset
+            conf=$(conductor_conf $i)
+            offset=$((i - 1))
+            configure_console_proxies $conf $offset
+        done
+    fi
+}
+
+# Configure access to placement from a nova service, usually
+# compute, but sometimes conductor.
+function configure_placement_nova_compute {
+    # Use the provided config file path or default to $NOVA_CONF.
+    local conf=${1:-$NOVA_CONF}
+    iniset $conf placement auth_type "password"
+    iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
+    iniset $conf placement username placement
+    iniset $conf placement password "$SERVICE_PASSWORD"
+    iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf placement project_name "$SERVICE_TENANT_NAME"
+    iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf placement region_name "$REGION_NAME"
+}
+
+# Configure access to cinder.
+function configure_cinder_access {
+    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
+    iniset $NOVA_CONF cinder auth_type "password"
+    iniset $NOVA_CONF cinder auth_url "$KEYSTONE_SERVICE_URI"
+    # NOTE(mriedem): This looks a bit weird but we use the nova user here
+    # since it has the admin role and the cinder user does not. This is
+    # similar to using the nova user in init_nova_service_user_conf. We need
+    # to use a user with the admin role for background tasks in nova to
+    # be able to GET block-storage API resources owned by another project
+    # since cinder has low-level "is_admin" checks in its DB API.
+    iniset $NOVA_CONF cinder username nova
+    iniset $NOVA_CONF cinder password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF cinder user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $NOVA_CONF cinder project_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF cinder project_domain_name "$SERVICE_DOMAIN_NAME"
+    if is_service_enabled tls-proxy; then
+        CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+        CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+        iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
+    fi
+}
+
+function configure_console_compute {
+    # If we are running multiple cells (and thus multiple console proxies) on a
+    # single host, we offset the ports to avoid collisions.  We need to
+    # correspondingly configure the console proxy port for nova-compute and we
+    # can use the NOVA_CPU_CELL variable to know which cell we are for
+    # calculating the offset.
+    # Stagger the offset based on the total number of possible console proxies
+    # (novnc, spice, serial) so that their ports will not collide if
+    # all are enabled.
+    local offset
+    offset=$(((NOVA_CPU_CELL - 1) * 3))
+
+    # Use the host IP instead of the service host because for multi-node, the
+    # service host will be the controller only.
+    local default_proxyclient_addr
+    default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip)
+
+    # All nova-compute workers need to know the vnc configuration options
+    # These settings don't hurt anything if n-novnc is disabled
+    if is_service_enabled n-cpu; then
+        if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then
+            # Use the old URL when installing novnc packages.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"}
+        elif vercmp ${NOVNC_BRANCH} "<" "1.0.0"; then
+            # Use the old URL when installing older novnc source.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"}
+        else
+            # Use the new URL when building >=v1.0.0 from source.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"}
+        fi
+        iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
+        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"}
+        iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
+    fi
+
+    if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+        # Address on which instance vncservers will listen on compute hosts.
+        # For multi-host, this should be the management ip of the compute host.
+        VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr}
+        iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN"
+        iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+    else
+        iniset $NOVA_CPU_CONF vnc enabled false
+    fi
+
+    if is_service_enabled n-spice; then
+        # Address on which instance spiceservers will listen on compute hosts.
+        # For multi-host, this should be the management ip of the compute host.
+        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr}
+        SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
+        iniset $NOVA_CPU_CONF spice enabled true
+        iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN"
+        iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
+    fi
+
+    if is_service_enabled n-sproxy; then
+        iniset $NOVA_CPU_CONF serial_console enabled True
+        iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/"
+    fi
+}
+
+function configure_console_proxies {
+    # Use the provided config file path or default to $NOVA_CONF.
+    local conf=${1:-$NOVA_CONF}
+    local offset=${2:-0}
+    # Stagger the offset based on the total number of possible console proxies
+    # (novnc, spice, serial) so that their ports will not collide if
+    # all are enabled.
+    offset=$((offset * 3))
+
+    if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+        iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf vnc novncproxy_port $((6080 + offset))
+
+        if is_nova_console_proxy_compute_tls_enabled ; then
+            iniset $conf vnc auth_schemes "vencrypt"
+            iniset $conf vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem"
+            iniset $conf vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem"
+            iniset $conf vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem"
+
+            sudo mkdir -p /etc/pki/nova-novnc
+            deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem
+            deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem
+            # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+            # default, and the deploy_int* methods use 'sudo cp' to copy the
+            # files, making them owned by root:root.
+            # Change ownership of everything under /etc/pki/nova-novnc to
+            # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read
+            # the key file.
+            sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc
+            # This is needed to enable TLS in the proxy itself, example log:
+            # WebSocket server settings:
+            #   - Listen on 0.0.0.0:6080
+            #   - Flash security policy server
+            #   - Web server (no directory listings). Web root: /usr/share/novnc
+            #   - SSL/TLS support
+            #   - proxying from 0.0.0.0:6080 to None:None
+            iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem"
+            iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem"
+        fi
+    fi
+
+    if is_service_enabled n-spice; then
+        iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf spice html5proxy_port $((6081 + offset))
+    fi
+
+    if is_service_enabled n-sproxy; then
+        iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf serial_console serialproxy_port $((6082 + offset))
+    fi
 }
 
 function init_nova_service_user_conf {
@@ -622,94 +734,55 @@
     echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf"
 }
 
-function init_nova_cells {
-    if is_service_enabled n-cell; then
-        cp $NOVA_CONF $NOVA_CELLS_CONF
-        iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB`
-        rpc_backend_add_vhost child_cell
-        iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell
-        iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
-        iniset $NOVA_CELLS_CONF cells enable True
-        iniset $NOVA_CELLS_CONF cells cell_type compute
-        iniset $NOVA_CELLS_CONF cells name child
-
-        iniset $NOVA_CONF cells enable True
-        iniset $NOVA_CONF cells cell_type api
-        iniset $NOVA_CONF cells name region
-
-        if is_service_enabled n-api-meta; then
-            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
-            iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
-            iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
-        fi
-
-        # Cells v1 conductor should be the nova-cells.conf
-        NOVA_COND_CONF=$NOVA_CELLS_CONF
-
-        time_start "dbsync"
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
-        time_stop "dbsync"
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
-        $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
-
-        # Creates the single cells v2 cell for the child cell (v1) nova db.
-        nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \
-            --transport-url $(get_transport_url child_cell) --name 'cell1'
-    fi
-}
-
-# create_nova_cache_dir() - Part of the init_nova() process
-function create_nova_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR
-    rm -f $NOVA_AUTH_CACHE_DIR/*
-}
-
-function create_nova_conf_nova_network {
-    local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
-    iniset $NOVA_CONF DEFAULT public_interface "$public_interface"
-    iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
-    iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
-    if [ -n "$FLAT_INTERFACE" ]; then
-        iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
-    fi
-    iniset $NOVA_CONF DEFAULT use_neutron False
-}
-
 # create_nova_keys_dir() - Part of the init_nova() process
 function create_nova_keys_dir {
     # Create keys dir
     sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys
 }
 
+function init_nova_db {
+    local dbname="$1"
+    local conffile="$2"
+    recreate_database $dbname
+    $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell
+}
+
 # init_nova() - Initialize databases, etc.
 function init_nova {
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
+        # (Re)create nova databases
+        if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+            # If we are doing singleconductor mode, we have some strange
+            # interdependencies. in that the main config refers to cell1
+            # instead of cell0. In that case, just make sure the cell0 database
+            # is created before we need it below, but don't db_sync it until
+            # after the cellN databases are there.
+            recreate_database nova_cell0
+        else
+            async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF
+        fi
+
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i)
+        done
+
         recreate_database $NOVA_API_DB
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
 
-        recreate_database nova_cell0
-
         # map_cell0 will create the cell mapping record in the nova_api DB so
-        # this needs to come after the api_db sync happens. We also want to run
-        # this before the db sync below since that will migrate both the nova
-        # and nova_cell0 databases.
-        nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
+        # this needs to come after the api_db sync happens.
+        $NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
 
-        # (Re)create nova databases
-        for i in $(seq 1 $NOVA_NUM_CELLS); do
-            recreate_database nova_cell${i}
-            $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync
+        # Wait for DBs to finish from above
+        for i in $(seq 0 $NOVA_NUM_CELLS); do
+            async_wait nova-cell-$i
         done
 
-        # Migrate nova and nova_cell0 databases.
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
-
-        if is_service_enabled n-cell; then
-            recreate_database $NOVA_CELLS_DB
+        if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+            # We didn't db sync cell0 above, so run it now
+            $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
         fi
 
         # Run online migrations on the new databases
@@ -718,11 +791,10 @@
 
         # create the cell1 cell for the main nova db where the hosts live
         for i in $(seq 1 $NOVA_NUM_CELLS); do
-            nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i"
+            $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i"
         done
     fi
 
-    create_nova_cache_dir
     create_nova_keys_dir
 
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
@@ -804,7 +876,7 @@
             start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
         fi
     else
-        run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
+        run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
         nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
     fi
 
@@ -816,15 +888,6 @@
     export PATH=$old_path
 }
 
-# Detect and setup conditions under which singleconductor setup is
-# needed. Notably cellsv1.
-function _set_singleconductor {
-    # NOTE(danms): Don't setup conductor fleet for cellsv1
-    if is_service_enabled n-cell; then
-        CELLSV2_SETUP="singleconductor"
-    fi
-}
-
 
 # start_nova_compute() - Start the compute process
 function start_nova_compute {
@@ -832,20 +895,19 @@
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
-    if is_service_enabled n-cell; then
-        local compute_cell_conf=$NOVA_CELLS_CONF
-    else
-        local compute_cell_conf=$NOVA_CONF
-    fi
+    local compute_cell_conf=$NOVA_CONF
+
+    # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF...
+    cp $compute_cell_conf $NOVA_CPU_CONF
+    # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF
+    merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF'
 
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so
         # skip these bits and use the normal config.
-        NOVA_CPU_CONF=$compute_cell_conf
         echo "Skipping multi-cell conductor fleet setup"
     else
         # "${CELLSV2_SETUP}" is "superconductor"
-        cp $compute_cell_conf $NOVA_CPU_CONF
         # FIXME(danms): Should this be configurable?
         iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
         # Since the nova-compute service cannot reach nova-scheduler over
@@ -854,6 +916,25 @@
         iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
     fi
 
+    # Make sure we nuke any database config
+    inidelete $NOVA_CPU_CONF database connection
+    inidelete $NOVA_CPU_CONF api_database connection
+
+    # Console proxies were configured earlier in create_nova_conf. Now that the
+    # nova-cpu.conf has been created, configure the console settings required
+    # by the compute process.
+    configure_console_compute
+
+    # Configure the OVSDB connection for os-vif
+    if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
+        iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
+    fi
+
+    # Workaround bug #1939108
+    if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then
+        iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True
+    fi
+
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # ``sg`` is used in run_process to execute nova-compute as a member of the
@@ -870,7 +951,7 @@
             # creating or modifying real configurations. Each fake
             # gets its own configuration and own log file.
             local fake_conf="${NOVA_FAKE_CONF}-${i}"
-            iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
+            iniset $fake_conf DEFAULT host "${HOSTNAME}${i}"
             run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf"
         done
     else
@@ -890,35 +971,50 @@
     export PATH=$NOVA_BIN_DIR:$PATH
 
     local api_cell_conf=$NOVA_CONF
-    if is_service_enabled n-cell; then
-        local compute_cell_conf=$NOVA_CELLS_CONF
-    else
-        local compute_cell_conf=$NOVA_CONF
-    fi
-
-    # ``run_process`` checks ``is_service_enabled``, it is not needed here
-    run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
-    run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
-
-    if is_service_enabled n-net; then
-        if ! running_in_container; then
-            enable_kernel_bridge_firewall
-        fi
-    fi
-    run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+    local compute_cell_conf=$NOVA_CONF
 
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
     if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
         run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
     else
-        run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
+        run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
     fi
 
-    run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
-    run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
-    run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
-    run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
-    run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
+    export PATH=$old_path
+}
+
+function enable_nova_console_proxies {
+    for i in $(seq 1 $NOVA_NUM_CELLS); do
+        for srv in n-novnc n-spice n-sproxy; do
+            if is_service_enabled $srv; then
+                enable_service ${srv}-cell${i}
+            fi
+        done
+    done
+}
+
+function start_nova_console_proxies {
+    # Hack to set the path for rootwrap
+    local old_path=$PATH
+    # This is needed to find the nova conf
+    export PATH=$NOVA_BIN_DIR:$PATH
+
+    local api_cell_conf=$NOVA_CONF
+    # console proxies run globally for singleconductor, else they run per cell
+    if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+        run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
+        run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
+        run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
+    else
+        enable_nova_console_proxies
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            local conf
+            conf=$(conductor_conf $i)
+            run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR"
+            run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR"
+            run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf"
+        done
+    fi
 
     export PATH=$old_path
 }
@@ -959,31 +1055,18 @@
     # happen between here and the script ending. However, in multinode
     # tests this can very often not be the case. So ensure that the
     # compute is up before we move on.
-    if is_service_enabled n-cell; then
-        # cells v1 can't complete the check below because it munges
-        # hostnames with cell information (grumble grumble).
-        return
-    fi
-    # TODO(sdague): honestly, this probably should be a plug point for
-    # an external system.
-    if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
-        # xenserver encodes information in the hostname of the compute
-        # because of the dom0/domU split. Just ignore for now.
-        return
-    fi
-    wait_for_compute 60
+    wait_for_compute $NOVA_READY_TIMEOUT
 }
 
 function start_nova {
-    # this catches the cells v1 case early
-    _set_singleconductor
     start_nova_rest
+    start_nova_console_proxies
     start_nova_conductor
     start_nova_compute
     if is_service_enabled n-api; then
         # dump the cell mapping to ensure life is good
         echo "Dumping cells_v2 mapping"
-        nova-manage cell_v2 list_cells --verbose
+        $NOVA_BIN_DIR/nova-manage cell_v2 list_cells --verbose
     fi
 }
 
@@ -1003,11 +1086,26 @@
 
 function stop_nova_rest {
     # Kill the non-compute nova processes
-    for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-sproxy; do
+    for serv in n-api n-api-meta n-sch; do
         stop_process $serv
     done
 }
 
+function stop_nova_console_proxies {
+    if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+        for srv in n-novnc n-spice n-sproxy; do
+            stop_process $srv
+        done
+    else
+        enable_nova_console_proxies
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            for srv in n-novnc n-spice n-sproxy; do
+                stop_process ${srv}-cell${i}
+            done
+        done
+    fi
+}
+
 function stop_nova_conductor {
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         stop_process n-cond
@@ -1025,6 +1123,7 @@
 # stop_nova() - Stop running processes
 function stop_nova {
     stop_nova_rest
+    stop_nova_console_proxies
     stop_nova_conductor
     stop_nova_compute
 }
@@ -1034,19 +1133,19 @@
     if is_service_enabled n-api; then
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then
             # Note that danms hates these flavors and apologizes for sdague
-            openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256
-            openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M
-            openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G
-            openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G
-            openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G
+            openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256
+            openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M
+            openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G
+            openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G
+            openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G
         fi
 
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then
-            openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny
-            openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small
-            openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium
-            openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large
-            openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge
+            openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny
+            openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small
+            openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium
+            openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large
+            openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge
         fi
     fi
 }
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 8d74c77..63882e0 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -24,17 +24,14 @@
 # Currently fairly specific to OpenStackCI hosts
 DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS)
 
-# Only Xenial is left with libvirt-bin.  Everywhere else is libvirtd
-if is_ubuntu && [ ! -f /etc/init.d/libvirtd ]; then
-    LIBVIRT_DAEMON=libvirt-bin
-else
-    LIBVIRT_DAEMON=libvirtd
-fi
+# Enable the Fedora Virtualization Preview Copr repo that provides the latest
+# rawhide builds of QEMU, Libvirt and other virt tools.
+ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO)
 
 # Enable coredumps for libvirt
 #  Bug: https://bugs.launchpad.net/nova/+bug/1643911
 function _enable_coredump {
-    local confdir=/etc/systemd/system/${LIBVIRT_DAEMON}.service.d
+    local confdir=/etc/systemd/system/libvirtd.service.d
     local conffile=${confdir}/coredump.conf
 
     # Create a coredump directory, and instruct the kernel to save to
@@ -59,27 +56,42 @@
 
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
+    # NOTE(yoctozepto): The common consensus [1] is that libvirt-python should
+    # be installed from distro packages. However, various projects might be
+    # trying to ensure it is installed using pip AND use upper-constraints
+    # with that, causing pip to try to upgrade it and to fail.
+    # The following line removes libvirt-python from upper-constraints and
+    # avoids the situation described above. Now only if installed packages
+    # explicitly depend on a newer (or, in general, incompatible) libvirt-python
+    # version, will pip try to reinstall it.
+    # [1] https://review.opendev.org/c/openstack/devstack/+/798514
+    $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
+            $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python
 
     if is_ubuntu; then
-        install_package qemu-system
-        if [[ ${DISTRO} == "xenial" ]]; then
-            install_package libvirt-bin libvirt-dev
-        else
-            install_package libvirt-clients libvirt-daemon-system libvirt-dev
+        install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt
+        if is_arch "aarch64"; then
+            install_package qemu-efi
         fi
-        # uninstall in case the libvirt version changed
-        pip_uninstall libvirt-python
-        pip_install_gr libvirt-python
         #pip_install_gr <there-si-no-guestfs-in-pypi>
     elif is_fedora || is_suse; then
-        # On "KVM for IBM z Systems", kvm does not have its own package
-        if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then
-            install_package qemu-kvm
+
+        # Optionally enable the virt-preview repo when on Fedora
+        if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then
+            # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/
+            sudo dnf copr enable -y @virtmaint-sig/virt-preview
         fi
 
-        install_package libvirt libvirt-devel
-        pip_uninstall libvirt-python
-        pip_install_gr libvirt-python
+        # Note that in CentOS/RHEL this needs to come from the RDO
+        # repositories (qemu-kvm-ev ... which provides this package)
+        # as the base system version is too old.  We should have
+        # pre-installed these
+        install_package qemu-kvm
+        install_package libvirt libvirt-devel python3-libvirt
+
+        if is_arch "aarch64"; then
+            install_package edk2.git-aarch64
+        fi
     fi
 
     if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then
@@ -147,9 +159,25 @@
         fi
     fi
 
+    if is_nova_console_proxy_compute_tls_enabled ; then
+        echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
+        echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
+
+        sudo mkdir -p /etc/pki/libvirt-vnc
+        deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
+        deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
+        # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+        # default and the deploy_int* methods use 'sudo cp' to copy the
+        # files, making them owned by root:root.
+        # Change ownership of everything under /etc/pki/libvirt-vnc to
+        # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key
+        # file.
+        sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
+    fi
+
     # Service needs to be started on redhat/fedora -- do a restart for
     # sanity after fiddling the config.
-    restart_service $LIBVIRT_DAEMON
+    restart_service libvirtd
 
     # Restart virtlogd companion service to ensure it is running properly
     #  https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index 49c8dee..87ee49f 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -38,18 +38,7 @@
 function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver"
     # Disable arbitrary limits
-    iniset $NOVA_CONF DEFAULT quota_instances -1
-    iniset $NOVA_CONF DEFAULT quota_cores -1
-    iniset $NOVA_CONF DEFAULT quota_ram -1
-    iniset $NOVA_CONF DEFAULT quota_floating_ips -1
-    iniset $NOVA_CONF DEFAULT quota_fixed_ips -1
-    iniset $NOVA_CONF DEFAULT quota_metadata_items -1
-    iniset $NOVA_CONF DEFAULT quota_injected_files -1
-    iniset $NOVA_CONF DEFAULT quota_injected_file_path_length -1
-    iniset $NOVA_CONF DEFAULT quota_security_groups -1
-    iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
-    iniset $NOVA_CONF DEFAULT quota_key_pairs -1
-    iniset $NOVA_CONF filter_scheduler enabled_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter,RamFilter,DiskFilter"
+    iniset $NOVA_CONF quota driver nova.quota.NoopQuotaDriver
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index ee1a0e0..f058e9b 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -36,28 +36,34 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor {
-    configure_libvirt
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+    if ! is_ironic_hardware; then
+        configure_libvirt
+    fi
 
     iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
-    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-
-    if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then
-        iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
-        iniset $NOVA_CONF filter_scheduler use_baremetal_filters True
-        iniset $NOVA_CONF filter_scheduler host_subset_size 999
-        iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
-        iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
-    fi
 
     # ironic section
     iniset $NOVA_CONF ironic auth_type password
     iniset $NOVA_CONF ironic username admin
     iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
-    iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI
-    iniset $NOVA_CONF ironic project_domain_id default
+    iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI
+    if is_ironic_enforce_scope; then
+        iniset $NOVA_CONF ironic system_scope all
+    else
+        iniset $NOVA_CONF ironic project_domain_id default
+        iniset $NOVA_CONF ironic project_name demo
+    fi
     iniset $NOVA_CONF ironic user_domain_id default
-    iniset $NOVA_CONF ironic project_name demo
+    iniset $NOVA_CONF ironic region_name $REGION_NAME
+
+    # These are used with crufty legacy ironicclient
+    iniset $NOVA_CONF ironic api_max_retries 300
+    iniset $NOVA_CONF ironic api_retry_interval 5
+    # These are used with shiny new openstacksdk
+    iniset $NOVA_CONF ironic connect_retries 300
+    iniset $NOVA_CONF ironic connect_retry_delay 5
+    iniset $NOVA_CONF ironic status_code_retries 300
+    iniset $NOVA_CONF ironic status_code_retry_delay 5
 }
 
 # install_nova_hypervisor() - Install external components
@@ -66,13 +72,6 @@
         return
     fi
     install_libvirt
-    if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] && is_ubuntu; then
-        # Ubuntu packaging+apparmor issue prevents libvirt from loading
-        # the ROM from /usr/share/misc.  Workaround by installing it directly
-        # to a directory that it can read from. (LP: #1393548)
-        sudo rm -rf /usr/share/qemu/sgabios.bin
-        sudo cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
-    fi
 }
 
 # start_nova_hypervisor - Start any required external services
@@ -87,7 +86,6 @@
     :
 }
 
-
 # Restore xtrace
 $_XTRACE_HYP_IRONIC
 
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 3d676b9..c1cd132 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -39,14 +39,15 @@
 function configure_nova_hypervisor {
     configure_libvirt
     iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
-    iniset $NOVA_CONF libvirt cpu_mode "none"
+    iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
+    if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then
+        iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL"
+    fi
     # Do not enable USB tablet input devices to avoid QEMU CPU overhead.
     iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
     iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
     iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
     iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
     # Power architecture currently does not support graphical consoles.
     if is_arch "ppc64"; then
         iniset $NOVA_CONF vnc enabled "false"
@@ -54,8 +55,6 @@
 
     # arm64-specific configuration
     if is_arch "aarch64"; then
-        # arm64 architecture currently does not support graphical consoles.
-        iniset $NOVA_CONF vnc enabled "false"
         iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
     fi
 
@@ -104,7 +103,7 @@
 
     if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
         if is_ubuntu; then
-            install_package python-guestfs
+            install_package python3-guestfs
             # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs:
             # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725)
             INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)"
@@ -119,7 +118,7 @@
             # Workaround for missing dependencies in python-libguestfs
             install_package python-libguestfs guestfs-data augeas augeas-lenses
         elif is_fedora; then
-            install_package python-libguestfs
+            install_package python3-libguestfs
         fi
     fi
 }
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index 58ab5c1..57dc45c 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -38,8 +38,6 @@
 function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
     iniset $NOVA_CONF DEFAULT connection_type "openvz"
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
deleted file mode 100644
index 6f79e4f..0000000
--- a/lib/nova_plugins/hypervisor-xenserver
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-#
-# lib/nova_plugins/hypervisor-xenserver
-# Configure the XenServer hypervisor
-
-# Enable with:
-# VIRT_DRIVER=xenserver
-
-# Dependencies:
-# ``functions`` file
-# ``nova`` configuration
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-_XTRACE_XENSERVER=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
-FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
-
-VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
-    if [ -z "$XENAPI_CONNECTION_URL" ]; then
-        die $LINENO "XENAPI_CONNECTION_URL is not specified"
-    fi
-
-    # Check os-xenapi plugin is enabled
-    local plugins="${DEVSTACK_PLUGINS}"
-    local plugin
-    local found=0
-    for plugin in ${plugins//,/ }; do
-        if [[ "$plugin" = "os-xenapi" ]]; then
-            found=1
-            break
-        fi
-    done
-    if [[ $found -ne 1 ]]; then
-        die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf"
-    fi
-
-    read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
-    iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
-    iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL"
-    iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER"
-    iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD"
-    iniset $NOVA_CONF DEFAULT flat_injected "False"
-    # Need to avoid crash due to new firewall support
-    XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
-
-    local dom0_ip
-    dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
-
-    local ssh_dom0
-    ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
-
-    # install console logrotate script
-    tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
-        $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest'
-
-    # Create a cron job that will rotate guest logs
-    $ssh_dom0 crontab - << CRONTAB
-* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1
-CRONTAB
-
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
-    # xenapi functionality is now included in os-xenapi library which houses the plugin
-    # so this function intentionally left blank
-    :
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
-    # This function intentionally left blank
-    :
-}
-
-
-# Restore xtrace
-$_XTRACE_XENSERVER
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/os-vif b/lib/os-vif
new file mode 100644
index 0000000..865645c
--- /dev/null
+++ b/lib/os-vif
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# support vsctl or native.
+# until bug #1929446 is resolved we override the os-vif default
+# and fall back to the legacy "vsctl" driver.
+OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"}
+
+function is_ml2_ovs {
+    if [[ "${Q_AGENT}" == "openvswitch" ]]; then
+        echo "True"
+    fi
+    echo "False"
+}
+
+# This should be true for any ml2/ovs job but should be set to false for
+# all other ovs based jobs e.g. ml2/ovn
+OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)}
+OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF)
+
+function configure_os_vif {
+    if [[ -e ${NOVA_CONF} ]]; then
+        iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
+        iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
+    fi
+    if [[ -e ${NEUTRON_CONF} ]]; then
+        iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
+        iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
+    fi
+}
diff --git a/lib/placement b/lib/placement
index d3fb8c8..b779866 100644
--- a/lib/placement
+++ b/lib/placement
@@ -3,9 +3,6 @@
 # lib/placement
 # Functions to control the configuration and operation of the **Placement** service
 #
-# Currently the placement service is embedded in nova. Eventually we
-# expect this to change so this file is started as a separate entity
-# despite making use of some *NOVA* variables and files.
 
 # Dependencies:
 #
@@ -29,25 +26,20 @@
 # Defaults
 # --------
 
-PLACEMENT_CONF_DIR=/etc/nova
-PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf
-PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement}
-# Nova virtual environment
+PLACEMENT_DIR=$DEST/placement
+PLACEMENT_CONF_DIR=/etc/placement
+PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf
+PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone}
+# Placement virtual environment
 if [[ ${USE_VENV} = True ]]; then
-    PROJECT_VENV["nova"]=${NOVA_DIR}.venv
-    PLACEMENT_BIN_DIR=${PROJECT_VENV["nova"]}/bin
+    PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv
+    PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin
 else
     PLACEMENT_BIN_DIR=$(get_python_exec_prefix)
 fi
-PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/nova-placement-api
+PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api
 PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini
 
-# The placement service can optionally use a separate database
-# connection. Set PLACEMENT_DB_ENABLED to True to use it.
-# NOTE(cdent): This functionality depends on some code that is not
-# yet merged in nova but is coming soon.
-PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED)
-
 if is_service_enabled tls-proxy; then
     PLACEMENT_SERVICE_PROTOCOL="https"
 fi
@@ -69,29 +61,27 @@
 # cleanup_placement() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_placement {
-    sudo rm -f $(apache_site_config_for nova-placement-api)
     sudo rm -f $(apache_site_config_for placement-api)
+    remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
 }
 
 # _config_placement_apache_wsgi() - Set WSGI config files
 function _config_placement_apache_wsgi {
     local placement_api_apache_conf
     local venv_path=""
-    local nova_bin_dir=""
-    nova_bin_dir=$(get_python_exec_prefix)
+    local placement_bin_dir=""
+    placement_bin_dir=$(get_python_exec_prefix)
     placement_api_apache_conf=$(apache_site_config_for placement-api)
 
-    # reuse nova's venv if there is one as placement code lives
-    # there
     if [[ ${USE_VENV} = True ]]; then
-        venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
-        nova_bin_dir=${PROJECT_VENV["nova"]}/bin
+        venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages"
+        placement_bin_dir=${PROJECT_VENV["placement"]}/bin
     fi
 
     sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
     sudo sed -e "
         s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
+        s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g;
         s|%SSLENGINE%|$placement_ssl|g;
         s|%SSLCERTFILE%|$placement_certfile|g;
         s|%SSLKEYFILE%|$placement_keyfile|g;
@@ -101,28 +91,20 @@
     " -i $placement_api_apache_conf
 }
 
-function configure_placement_nova_compute {
-    iniset $NOVA_CONF placement auth_type "password"
-    iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $NOVA_CONF placement username placement
-    iniset $NOVA_CONF placement password "$SERVICE_PASSWORD"
-    iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME"
-    iniset $NOVA_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $NOVA_CONF placement os_region_name "$REGION_NAME"
-    # TODO(cdent): auth_strategy, which is common to see in these
-    # blocks is not currently used here. For the time being the
-    # placement api uses the auth_strategy configuration setting
-    # established by the nova api. This avoids, for the time, being,
-    # creating redundant configuration items that are just used for
-    # testing.
+# create_placement_conf() - Write config
+function create_placement_conf {
+    rm -f $PLACEMENT_CONF
+    iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+    iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
+    iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY
+    configure_keystone_authtoken_middleware $PLACEMENT_CONF placement
+    setup_logging $PLACEMENT_CONF
 }
 
 # configure_placement() - Set config files, create data dirs, etc
 function configure_placement {
-    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
-        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
-    fi
+    sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR
+    create_placement_conf
 
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
         write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement"
@@ -144,15 +126,9 @@
 }
 
 # init_placement() - Create service user and endpoints
-# If PLACEMENT_DB_ENABLED is true, create the separate placement db
-# using, for now, the api_db migrations.
 function init_placement {
-    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
-        recreate_database placement
-        time_start "dbsync"
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
-        time_stop "dbsync"
-    fi
+    recreate_database placement
+    $PLACEMENT_BIN_DIR/placement-manage db sync
     create_placement_accounts
 }
 
@@ -160,18 +136,18 @@
 function install_placement {
     install_apache_wsgi
     # Install the openstackclient placement client plugin for CLI
-    # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r.
-    pip_install osc-placement
+    pip_install_gr osc-placement
+    git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH
+    setup_develop $PLACEMENT_DIR
 }
 
 # start_placement_api() - Start the API processes ahead of other things
 function start_placement_api {
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
-        run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
+        run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
     else
         enable_apache_site placement-api
         restart_apache_server
-        tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
     fi
 
     echo "Waiting for placement-api to start..."
@@ -188,7 +164,6 @@
 function stop_placement {
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
         stop_process "placement-api"
-        remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
     else
         disable_apache_site placement-api
         restart_apache_server
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 44d0717..743b4ae 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -52,8 +52,26 @@
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         install_package rabbitmq-server
-        if is_fedora; then
-            sudo systemctl enable rabbitmq-server
+        if is_suse; then
+            install_package rabbitmq-server-plugins
+            # the default systemd socket activation only listens on the loopback interface
+            # which causes rabbitmq to try to start its own epmd
+            sudo mkdir -p /etc/systemd/system/epmd.socket.d
+            cat <<EOF | sudo tee /etc/systemd/system/epmd.socket.d/ports.conf >/dev/null
+[Socket]
+ListenStream=
+ListenStream=[::]:4369
+EOF
+            sudo systemctl daemon-reload
+            sudo systemctl restart epmd.socket epmd.service
+        fi
+        if is_fedora || is_suse; then
+            # NOTE(jangutter): If rabbitmq is not running (as in a fresh
+            # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with
+            # socket activation. This fails the first time and does not get
+            # cleared. It is benign, but the workaround is to start rabbitmq a
+            # bit earlier for RPM based distros.
+            sudo systemctl --now enable rabbitmq-server
         fi
     fi
 }
diff --git a/lib/swift b/lib/swift
index 1601e2b..b376993 100644
--- a/lib/swift
+++ b/lib/swift
@@ -37,6 +37,7 @@
 
 # Set up default directories
 GITDIR["python-swiftclient"]=$DEST/python-swiftclient
+SWIFT_DIR=$DEST/swift
 
 # Swift virtual environment
 if [[ ${USE_VENV} = True ]]; then
@@ -46,17 +47,13 @@
     SWIFT_BIN_DIR=$(get_python_exec_prefix)
 fi
 
-
-SWIFT_DIR=$DEST/swift
-SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
 SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
-SWIFT3_DIR=$DEST/swift3
 
 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
 SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
 SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
-SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 
 # TODO: add logging to different location.
 
@@ -69,8 +66,8 @@
 # Default is ``/etc/swift``.
 SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift}
 
-if is_service_enabled s-proxy && is_service_enabled swift3; then
-    # If we are using ``swift3``, we can default the S3 port to swift instead
+if is_service_enabled s-proxy && is_service_enabled s3api; then
+    # If we are using ``s3api``, we can default the S3 port to swift instead
     # of nova-objectstore
     S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT}
 fi
@@ -338,10 +335,9 @@
     local node_number
     local swift_node_config
     local swift_log_dir
-    local user_group
 
     # Make sure to kill all swift processes first
-    swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+    $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
     sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}
     sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server
@@ -356,7 +352,7 @@
     # partitions (which make more sense when you have a multi-node
     # setup) we configure it with our version of rsync.
     sed -e "
-        s/%GROUP%/${USER_GROUP}/;
+        s/%GROUP%/$(id -g -n ${STACK_USER})/;
         s/%USER%/${STACK_USER}/;
         s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
     " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
@@ -369,6 +365,7 @@
 
     SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf
     cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
+    cp ${SWIFT_DIR}/etc/internal-client.conf-sample ${SWIFT_CONF_DIR}/internal-client.conf
 
     # To run container sync feature introduced in Swift ver 1.12.0,
     # container sync "realm" is added in container-sync-realms.conf
@@ -423,16 +420,22 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
 
     # By default Swift will be installed with Keystone and tempauth middleware
-    # and add the swift3 middleware if its configured for it. The token for
+    # and add the s3api middleware if its configured for it. The token for
     # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
     # token for keystoneauth would have the standard reseller_prefix `AUTH_`
-    if is_service_enabled swift3;then
-        swift_pipeline+=" swift3 s3token "
+    if is_service_enabled s3api;then
+        swift_pipeline+=" s3api"
+    fi
+    if is_service_enabled keystone; then
+        swift_pipeline+=" authtoken"
+        if is_service_enabled s3api;then
+            swift_pipeline+=" s3token"
+            iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3}
+            iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
+        fi
+        swift_pipeline+=" keystoneauth"
     fi
 
-    if is_service_enabled keystone; then
-        swift_pipeline+=" authtoken keystoneauth"
-    fi
     swift_pipeline+=" tempauth "
 
     sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
@@ -450,7 +453,7 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
 
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory
-    configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken
+    configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False
@@ -467,22 +470,6 @@
     # Allow both reseller prefixes to be used with domain_remap
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH"
 
-    if is_service_enabled swift3; then
-        cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
-[filter:s3token]
-paste.filter_factory = keystonemiddleware.s3_token:filter_factory
-auth_uri = ${KEYSTONE_AUTH_URI}
-cafile = ${SSL_BUNDLE_FILE}
-admin_user = swift
-admin_tenant_name = ${SERVICE_PROJECT_NAME}
-admin_password = ${SERVICE_PASSWORD}
-
-[filter:swift3]
-use = egg:swift3#swift3
-location = ${REGION_NAME}
-EOF
-    fi
-
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
@@ -534,12 +521,12 @@
         local auth_vers
         auth_vers=$(iniget ${testfile} func_test auth_version)
         iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
-        if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then
+        if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then
             iniset ${testfile} func_test auth_port 443
         else
             iniset ${testfile} func_test auth_port 80
         fi
-        iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI}
+        iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI}
         if [[ "$auth_vers" == "3" ]]; then
             iniset ${testfile} func_test auth_prefix /identity/v3/
         else
@@ -557,7 +544,11 @@
 
     local swift_log_dir=${SWIFT_DATA_DIR}/logs
     sudo rm -rf ${swift_log_dir}
-    sudo install -d -o ${STACK_USER} -g adm ${swift_log_dir}/hourly
+    local swift_log_group=adm
+    if is_suse; then
+        swift_log_group=root
+    fi
+    sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly
 
     if [[ $SYSLOG != "False" ]]; then
         sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
@@ -603,7 +594,7 @@
     # Mount the disk with mount options to make it as efficient as possible
     mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
     if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+        sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8  \
             ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
     fi
 
@@ -700,7 +691,7 @@
 function init_swift {
     local node_number
     # Make sure to kill all swift processes first
-    swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+    $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
     # Forcibly re-create the backing filesystem
     create_swift_disk
@@ -711,9 +702,9 @@
 
         rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
 
-        swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-        swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-        swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        $SWIFT_BIN_DIR/swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        $SWIFT_BIN_DIR/swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        $SWIFT_BIN_DIR/swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
 
         # The ring will be created on each node, and because the order of
         # nodes is identical we can use a seed for rebalancing, making it
@@ -724,36 +715,34 @@
             node_number=1
 
             for node in ${SWIFT_STORAGE_IPS}; do
-                swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1
-                swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1
-                swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${node}:${OBJECT_PORT_BASE}/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${node}:${CONTAINER_PORT_BASE}/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${node}:${ACCOUNT_PORT_BASE}/sdb1 1
                 let "node_number=node_number+1"
             done
 
         else
 
             for node_number in ${SWIFT_REPLICAS_SEQ}; do
-                swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
-                swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
-                swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+                $SWIFT_BIN_DIR/swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
             done
         fi
 
         # We use a seed for rebalancing. Doing this allows us to create
         # identical rings on multiple nodes if SWIFT_STORAGE_IPS is the same
-        swift-ring-builder object.builder rebalance 42
-        swift-ring-builder container.builder rebalance 42
-        swift-ring-builder account.builder rebalance 42
+        $SWIFT_BIN_DIR/swift-ring-builder object.builder rebalance 42
+        $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42
+        $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42
     } && popd >/dev/null
-
-    # Create cache dir
-    sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR
-    rm -f $SWIFT_AUTH_CACHE_DIR/*
 }
 
 function install_swift {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
-    setup_develop $SWIFT_DIR
+    # keystonemiddleware needs to be installed via keystone extras as defined
+    # in setup.cfg, see bug #1909018 for more details.
+    setup_develop $SWIFT_DIR keystone
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
         install_apache_wsgi
     fi
@@ -799,7 +788,7 @@
         # Apache should serve the "PACO" a.k.a "main" services
         restart_apache_server
         # The rest of the services should be started in backgroud
-        swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
+        $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
         return 0
     fi
 
@@ -823,15 +812,16 @@
         done
 
         if [[ "$SWIFT_START_ALL_SERVICES" == "True" ]]; then
-            swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
+            $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
         else
             # The container-sync daemon is strictly needed to pass the container
             # sync Tempest tests.
-            swift-init --run-dir=${SWIFT_DATA_DIR}/run container-sync start
+            enable_service s-container-sync
+            run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf"
         fi
     else
-        swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
-        swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
+        $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
+        $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
     fi
 
     if is_service_enabled tls-proxy; then
@@ -858,12 +848,12 @@
     local type
 
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
-        swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
+        $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
     fi
 
     # screen normally killed by ``unstack.sh``
-    if type -p swift-init >/dev/null; then
-        swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+    if type -p $SWIFT_BIN_DIR/swift-init >/dev/null; then
+        $SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
     # Maintain the iteration as stop_process() has some desirable side-effects
diff --git a/lib/tcpdump b/lib/tcpdump
new file mode 100644
index 0000000..16e8269
--- /dev/null
+++ b/lib/tcpdump
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# lib/tcpdump
+# Functions to start and stop a tcpdump
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - start_tcpdump
+# - stop_tcpdump
+
+# Save trace setting
+_XTRACE_TCPDUMP=$(set +o | grep xtrace)
+set +o xtrace
+
+TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap}
+
+# e.g. for iscsi
+#  "-i any tcp port 3260"
+TCPDUMP_ARGS=${TCPDUMP_ARGS:-""}
+
+# start_tcpdump() - Start running processes
+function start_tcpdump {
+    # Run a tcpdump with given arguments and save the packet capture
+    if is_service_enabled tcpdump; then
+        if [[ -z "${TCPDUMP_ARGS}" ]]; then
+            die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set"
+        fi
+        touch ${TCPDUMP_OUTPUT}
+        run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root
+    fi
+}
+
+# stop_tcpdump() stop tcpdump process
+function stop_tcpdump {
+    stop_process tcpdump
+}
+
+# Restore xtrace
+$_XTRACE_TCPDUMP
diff --git a/lib/tempest b/lib/tempest
index bdbaaa5..8fd54c5 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -27,6 +27,7 @@
 # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
 # - ``DEFAULT_INSTANCE_TYPE``
 # - ``DEFAULT_INSTANCE_USER``
+# - ``DEFAULT_INSTANCE_ALT_USER``
 # - ``CINDER_ENABLED_BACKENDS``
 # - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
 #
@@ -102,6 +103,29 @@
     remove_disabled_services "$extensions_list" "$disabled_exts"
 }
 
+# image_size_in_gib - converts an image size from bytes to GiB, rounded up
+# Takes an image ID parameter as input
+function image_size_in_gib {
+    local size
+    size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value)
+    echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
+}
+
+function set_tempest_venv_constraints {
+    local tmp_c
+    tmp_c=$1
+    if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
+        (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+    else
+        echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
+        cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
+        # NOTE: setting both tox env var and once Tempest start using new var
+        # TOX_CONSTRAINTS_FILE then we can remove the old one.
+        export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+        export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+    fi
+}
+
 # configure_tempest() - Set config files, create data dirs, etc
 function configure_tempest {
     if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -122,9 +146,12 @@
     local available_flavors
     local flavors_ref
     local flavor_lines
+    local flavor_ref_size
+    local flavor_ref_alt_size
     local public_network_id
     local public_router_id
     local ssh_connect_method="floating"
+    local disk
 
     # Save IFS
     ifs=$IFS
@@ -146,7 +173,7 @@
                 image_uuid_alt="$IMAGE_UUID"
             fi
             images+=($IMAGE_UUID)
-        done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+        done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
 
         case "${#images[*]}" in
             0)
@@ -182,19 +209,23 @@
     local alt_username=${ALT_USERNAME:-alt_demo}
     local alt_project_name=${ALT_TENANT_NAME:-alt_demo}
     local admin_project_id
-    admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }")
+    admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }")
 
     if is_service_enabled nova; then
         # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior
         # Tempest creates its own instance types
-        available_flavors=$(nova flavor-list)
+        available_flavors=$(openstack --os-cloud devstack-admin flavor list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
+                # Determine the flavor disk size based on the image size.
+                disk=$(image_size_in_gib $image_uuid)
+                openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
+                # Determine the alt flavor disk size based on the alt image size.
+                disk=$(image_size_in_gib $image_uuid_alt)
+                openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -220,11 +251,24 @@
             fi
             flavor_ref=${flavors[0]}
             flavor_ref_alt=$flavor_ref
+            flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}")
 
             # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values.
             # Some resize instance in tempest tests depends on this.
             for f in ${flavors[@]:1}; do
                 if [[ "$f" != "$flavor_ref" ]]; then
+                    #
+                    # NOTE(sdatko): Resize is only possible when target flavor
+                    #               is not smaller than the original one. For
+                    #               Tempest tests, in case there was a bigger
+                    #               flavor selected as default, e.g. m1.small,
+                    #               we need to perform additional check.
+                    #
+                    flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}")
+                    if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then
+                        continue
+                    fi
+
                     flavor_ref_alt=$f
                     break
                 fi
@@ -241,7 +285,10 @@
     # If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created
     # and the public_network_id should not be set.
     if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
-        public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
+        public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME)
+        # make sure shared network presence does not confuses the tempest tests
+        openstack --os-cloud devstack-admin network create --share shared
+        openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -263,8 +310,6 @@
     iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
     iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION
     iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
-    # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation
-    iniset $TEMPEST_CONFIG identity admin_domain_scope True
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
@@ -279,8 +324,8 @@
         iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
     fi
     iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3}
-    if [[ "$TEMPEST_AUTH_VERSION" != "v2.0" ]]; then
-        # we're going to disable v2 admin unless we're using v2.0 by default.
+    if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then
+        # we're going to disable v2 admin unless we're using v2 by default.
         iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False
     fi
 
@@ -299,20 +344,28 @@
         iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True
     fi
 
+    # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike
+    # as this is supported in Queens and beyond.
+    iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True
+
+    # In Queens and later, application credentials are enabled by default
+    # so remove this once Tempest no longer supports Pike.
+    iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True
+
+    # In Train and later, access rules for application credentials are enabled
+    # by default so remove this once Tempest no longer supports Stein.
+    iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True
+
     # Image
     # We want to be able to override this variable in the gate to avoid
     # doing an external HTTP fetch for this test.
     if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
         iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
     fi
-    if [ "$VIRT_DRIVER" = "xenserver" ]; then
-        iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
-    fi
-
-    # Image Features
-    iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
-    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
-        iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False
+    iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW
+    iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True
+    if is_service_enabled g-api-r; then
+        iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote
     fi
 
     # Compute
@@ -321,7 +374,7 @@
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
     iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
-    if ! is_service_enabled n-cell && ! is_service_enabled neutron; then
+    if ! is_service_enabled neutron; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
 
@@ -365,22 +418,16 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
-    if is_service_enabled n-cell; then
-        # Cells doesn't support shelving/unshelving
-        iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
-        # Cells doesn't support hot-plugging virtual interfaces.
-        iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
-        # Cells v1 doesn't support the rescue/unrescue tests in Tempest
-        iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
 
-        if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
-            # Cells supports resize but does not currently work with devstack
-            # because of the custom flavors created for Tempest runs which are
-            # not in the cells database.
-            # TODO(mriedem): work on adding a nova-manage command to sync
-            # flavors into the cells database.
-            iniset $TEMPEST_CONFIG compute-feature-enabled resize False
-        fi
+    # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes
+    iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True
+
+    if [[ -n "$NOVA_FILTERS" ]]; then
+        iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS}
+    fi
+
+    if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
+        iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True
     fi
 
     if is_service_enabled n-novnc; then
@@ -397,17 +444,9 @@
     iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
 
     # Scenario
-    if [ "$VIRT_DRIVER" = "xenserver" ]; then
-        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
-        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
-        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
-        iniset $TEMPEST_CONFIG scenario img_container_format ovf
-    else
-        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
-        SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
-    fi
-    iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR
-    iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE
+    SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+    SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
+    iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE
 
     # If using provider networking, use the physical network for validation rather than private
     TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
@@ -418,7 +457,8 @@
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True}
     iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
     iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
+    iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros}
+    iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER}
     iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
 
     # Volume
@@ -438,9 +478,11 @@
         TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
     fi
     iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
-    # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
-    iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
-    iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1)
+    # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends
+    if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then
+        TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
+    fi
+    iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
     local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
     local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
     if [ "$tempest_volume_min_microversion" == "None" ]; then
@@ -491,6 +533,24 @@
         iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL"
     fi
 
+    # Placement Features
+    # Set the microversion range for placement.
+    # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests.
+    # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion"
+    #       for stable branch on each release which should be changed from "latest" to max supported version of that release.
+    local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None}
+    local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"}
+    if [ "$tempest_placement_min_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG placement min_microversion
+    else
+        iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion
+    fi
+    if [ "$tempest_placement_max_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG placement max_microversion
+    else
+        iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion
+    fi
+
     # Baremetal
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
@@ -514,19 +574,24 @@
             iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
             iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
             iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
-        elif ! is_service_enabled n-cell; then
-            # cells v1 does not support swapping volumes
+        else
+            iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True
+            iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True
             iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True
         fi
     fi
 
     # ``service_available``
     #
-    # this tempest service list needs to be all the services that
-    # tempest supports, otherwise we can have an erroneous set of
+    # this tempest service list needs to be the services that
+    # tempest own, otherwise we can have an erroneous set of
     # defaults (something defaulting true in Tempest, but not listed here).
+    # services tested by tempest plugins needs to be set on service devstack
+    # plugin side as devstack cannot keep track of all the tempest plugins
+    # services. Refer Bug#1743688 for more details.
+    # 'horizon' is also kept here as no devtack plugin for horizon.
     local service
-    local tempest_services="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove"
+    local tempest_services="key,glance,nova,neutron,cinder,swift,horizon"
     for service in ${tempest_services//,/ }; do
         if is_service_enabled $service ; then
             iniset $TEMPEST_CONFIG service_available $service "True"
@@ -535,6 +600,10 @@
         fi
     done
 
+    iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
+
+    iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+
     if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
         # libvirt-lxc does not support boot from volume or attaching volumes
         # so basically anything with cinder is out of the question.
@@ -551,15 +620,19 @@
     if [[ "$OFFLINE" != "True" ]]; then
         tox -revenv-tempest --notest
     fi
-    tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
+
+    local tmp_u_c_m
+    tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+    set_tempest_venv_constraints $tmp_u_c_m
+    tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
+    rm -f $tmp_u_c_m
 
     # Auth:
-    iniset $TEMPEST_CONFIG auth tempest_roles "Member"
     if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
         if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
-            tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
+            tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
         else
-            tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
+            tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
         fi
         iniset $TEMPEST_CONFIG auth use_dynamic_credentials False
         iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
@@ -594,6 +667,9 @@
         # Remove disabled extensions
         network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS)
     fi
+    if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then
+        network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS"
+    fi
     iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions
     # Swift API Extensions
     local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
@@ -623,12 +699,22 @@
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
     pip_install 'tox!=2.8.0'
     pushd $TEMPEST_DIR
+    # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
+    # is tag name not master. git_clone would not checkout tag because
+    # TEMPEST_DIR already exist until RECLONE is true.
+    git checkout $TEMPEST_BRANCH
+
+    local tmp_u_c_m
+    tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+    set_tempest_venv_constraints $tmp_u_c_m
+
     tox -r --notest -efull
     # NOTE(mtreinish) Respect constraints in the tempest full venv, things that
     # are using a tox job other than full will not be respecting constraints but
     # running pip install -U on tempest requirements
-    $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
+    $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt
     PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest
+    rm -f $tmp_u_c_m
     popd
 }
 
@@ -636,7 +722,11 @@
 function install_tempest_plugins {
     pushd $TEMPEST_DIR
     if [[ $TEMPEST_PLUGINS != 0 ]] ; then
-        tox -evenv-tempest -- pip install $TEMPEST_PLUGINS
+        local tmp_u_c_m
+        tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+        set_tempest_venv_constraints $tmp_u_c_m
+        tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS
+        rm -f $tmp_u_c_m
         echo "Checking installed Tempest plugins:"
         tox -evenv-tempest -- tempest list-plugins
     fi
diff --git a/lib/tls b/lib/tls
index 0baf86c..b3cc0b4 100644
--- a/lib/tls
+++ b/lib/tls
@@ -37,7 +37,7 @@
 
 if is_service_enabled tls-proxy; then
     # TODO(dtroyer): revisit this below after the search for HOST_IP has been done
-    TLS_IP=${TLS_IP:-$SERVICE_IP}
+    TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)}
 fi
 
 DEVSTACK_HOSTNAME=$(hostname -f)
@@ -67,9 +67,9 @@
     # build common config file
 
     # Verify ``TLS_IP`` is good
-    if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then
+    if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then
         # auto-discover has changed the IP
-        TLS_IP=$HOST_IP
+        TLS_IP=$(ipv6_unquote $SERVICE_HOST)
     fi
 }
 
@@ -227,8 +227,10 @@
 function init_cert {
     if [[ ! -r $DEVSTACK_CERT ]]; then
         if [[ -n "$TLS_IP" ]]; then
-            # Lie to let incomplete match routines work
-            TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
+            TLS_IP="IP:$TLS_IP"
+            if [[ -n "$HOST_IPV6" ]]; then
+                TLS_IP="$TLS_IP,IP:$HOST_IPV6"
+            fi
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -246,13 +248,12 @@
     local alt_names=$4
 
     if [ "$common_name" != "$SERVICE_HOST" ]; then
-        if [[ -z "$alt_names" ]]; then
-            alt_names="DNS:$SERVICE_HOST"
-        else
-            alt_names="$alt_names,DNS:$SERVICE_HOST"
-        fi
         if is_ipv4_address "$SERVICE_HOST" ; then
-            alt_names="$alt_names,IP:$SERVICE_HOST"
+            if [[ -z "$alt_names" ]]; then
+                alt_names="IP:$SERVICE_HOST"
+            else
+                alt_names="$alt_names,IP:$SERVICE_HOST"
+            fi
         fi
     fi
 
@@ -340,6 +341,24 @@
     fi
 }
 
+# Deploy the service cert & key to a service specific
+# location
+function deploy_int_cert {
+    local cert_target_file=$1
+    local key_target_file=$2
+
+    sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file"
+    sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file"
+}
+
+# Deploy the intermediate CA cert bundle file to a service
+# specific location
+function deploy_int_CA {
+    local ca_target_file=$1
+
+    sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file"
+}
+
 # If a non-system python-requests is installed then it will use the
 # built-in CA certificate store rather than the distro-specific
 # CA certificate store. Detect this and symlink to the correct
@@ -348,8 +367,7 @@
 function fix_system_ca_bundle_path {
     if is_service_enabled tls-proxy; then
         local capath
-        local python_cmd=${1:-python}
-        capath=$($python_cmd -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+        capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
 
         if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
             if is_fedora; then
@@ -529,6 +547,9 @@
     LimitRequestFieldSize $f_header_size
     RequestHeader set X-Forwarded-Proto "https"
 
+    # Avoid races (at the cost of performance) to re-use a pooled connection
+    # where the connection is closed (bug 1807518).
+    SetEnv proxy-initial-not-pooled
     <Location />
         ProxyPass http://$b_host:$b_port/ retry=0 nocanon
         ProxyPassReverse http://$b_host:$b_port/
@@ -549,14 +570,6 @@
     restart_apache_server
 }
 
-# Follow TLS proxy
-function follow_tls_proxy {
-    sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
-    tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
-    sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
-    tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
-}
-
 # Cleanup Functions
 # =================
 
@@ -564,6 +577,20 @@
 # using tls configuration are down.
 function stop_tls_proxy {
     stop_apache_server
+
+    # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but
+    # necessary so that we can restart after an unstack.  A better
+    # solution would be to ensure that each service calling
+    # start_tls_proxy will call stop_tls_proxy with the same
+    # parameters on shutdown so we can use the disable_apache_site
+    # function and remove individual files there.
+    if is_ubuntu; then
+        sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf
+    else
+        for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do
+            sudo mv $i $i.disabled
+        done
+    fi
 }
 
 # Clean up the CA files
diff --git a/openrc b/openrc
index 37724c5..beeaebe 100644
--- a/openrc
+++ b/openrc
@@ -29,6 +29,7 @@
 # Load the last env variables if available
 if [[ -r $RC_DIR/.stackenv ]]; then
     source $RC_DIR/.stackenv
+    export OS_CACERT
 fi
 
 # Get some necessary configuration
@@ -86,9 +87,9 @@
 
 # If you don't have a working .stackenv, this is the backup position
 KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
-KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP}
+KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP}
 
-export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_AUTH_URI}
+export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI}
 
 # Currently, in order to use openstackclient with Identity API v3,
 # we need to set the domain which the user and project belong to.
@@ -108,5 +109,5 @@
 
 # Currently cinderclient needs you to specify the *volume api* version. This
 # needs to match the config of your catalog returned by Keystone.
-export CINDER_VERSION=${CINDER_VERSION:-2}
+export CINDER_VERSION=${CINDER_VERSION:-3}
 export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION}
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
deleted file mode 100755
index fefd454..0000000
--- a/pkg/elasticsearch.sh
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash -xe
-
-# basic reference point for things like filecache
-#
-# TODO(sdague): once we have a few of these I imagine the download
-# step can probably be factored out to something nicer
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-FILES=$TOP_DIR/files
-source $TOP_DIR/stackrc
-
-# Package source and version, all pkg files are expected to have
-# something like this, as well as a way to override them.
-ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5}
-ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch}
-
-# Elastic search actual implementation
-function wget_elasticsearch {
-    local file=${1}
-
-    if [ ! -f ${FILES}/${file} ]; then
-        wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file}
-    fi
-
-    if [ ! -f ${FILES}/${file}.sha1.txt ]; then
-        wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt
-    fi
-
-    pushd ${FILES};  sha1sum ${file} > ${file}.sha1.gen;  popd
-
-    if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then
-        echo "Invalid elasticsearch download. Could not install."
-        return 1
-    fi
-    return 0
-}
-
-function download_elasticsearch {
-    if is_ubuntu; then
-        wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb
-    elif is_fedora; then
-        wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
-    fi
-}
-
-function configure_elasticsearch {
-    # currently a no op
-    :
-}
-
-function _check_elasticsearch_ready {
-    # poll elasticsearch to see if it's started
-    if ! wait_for_service 30 http://localhost:9200; then
-        die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
-    fi
-}
-
-function start_elasticsearch {
-    if is_ubuntu; then
-        sudo /etc/init.d/elasticsearch start
-        _check_elasticsearch_ready
-    elif is_fedora; then
-        sudo /bin/systemctl start elasticsearch.service
-        _check_elasticsearch_ready
-    else
-        echo "Unsupported architecture...can not start elasticsearch."
-    fi
-}
-
-function stop_elasticsearch {
-    if is_ubuntu; then
-        sudo /etc/init.d/elasticsearch stop
-    elif is_fedora; then
-        sudo /bin/systemctl stop elasticsearch.service
-    else
-        echo "Unsupported architecture...can not stop elasticsearch."
-    fi
-}
-
-function install_elasticsearch {
-    pip_install_gr elasticsearch
-    if is_package_installed elasticsearch; then
-        echo "Note: elasticsearch was already installed."
-        return
-    fi
-    if is_ubuntu; then
-        is_package_installed default-jre-headless || install_package default-jre-headless
-
-        sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
-        sudo update-rc.d elasticsearch defaults 95 10
-    elif is_fedora; then
-        is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
-        yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
-        sudo /bin/systemctl daemon-reload
-        sudo /bin/systemctl enable elasticsearch.service
-    else
-        echo "Unsupported install of elasticsearch on this architecture."
-    fi
-}
-
-function uninstall_elasticsearch {
-    if is_package_installed elasticsearch; then
-        if is_ubuntu; then
-            sudo apt-get purge elasticsearch
-        elif is_fedora; then
-            sudo yum remove elasticsearch
-        else
-            echo "Unsupported install of elasticsearch on this architecture."
-        fi
-    fi
-}
-
-# The PHASE dispatcher. All pkg files are expected to basically cargo
-# cult the case statement.
-PHASE=$1
-echo "Phase is $PHASE"
-
-case $PHASE in
-    download)
-        download_elasticsearch
-        ;;
-    install)
-        install_elasticsearch
-        ;;
-    configure)
-        configure_elasticsearch
-        ;;
-    start)
-        start_elasticsearch
-        ;;
-    stop)
-        stop_elasticsearch
-        ;;
-    uninstall)
-        uninstall_elasticsearch
-        ;;
-esac
diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml
index ede8382..d090638 100644
--- a/playbooks/devstack.yaml
+++ b/playbooks/devstack.yaml
@@ -1,3 +1,7 @@
 - hosts: all
+  # This is the default strategy, however since orchestrate-devstack requires
+  # "linear", it is safer to enforce it in case this is running in an
+  # environment configured with a different default strategy.
+  strategy: linear
   roles:
-    - run-devstack
+    - orchestrate-devstack
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 6f5126f..9e66f20 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -1,4 +1,32 @@
 - hosts: all
+  become: True
+  vars:
+    devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/"
+    devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/"
+    devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}"
+  tasks:
+    # NOTE(andreaf) If the tempest service is enabled, a tempest.log is
+    # generated as part of lib/tempest, as a result of verify_tempest_config
+    - name: Check if a tempest log exits
+      stat:
+        path: "{{ devstack_conf_dir }}/tempest.log"
+      register: tempest_log
+    - name: Link post-devstack tempest.log
+      file:
+        src: "{{ devstack_conf_dir }}/tempest.log"
+        dest: "{{ stage_dir }}/verify_tempest_conf.log"
+        state: hard
+      when: tempest_log.stat.exists
   roles:
     - export-devstack-journal
-    - fetch-devstack-log-dir
+    - apache-logs-conf
+    - devstack-project-conf
+    # capture-system-logs should be the last role before stage-output
+    - capture-system-logs
+    - role: stage-output
+    # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job
+    # starts pulling logs for us from {{ ansible_user_dir }}/logs.
+    # Meanwhile we already store things in ansible_user_dir and use
+    # fetch-devstack-log-dir setting devstack_base_dir
+    - role: fetch-devstack-log-dir
+      devstack_base_dir: "{{ ansible_user_dir }}"
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
index 4d07960..68cb1d8 100644
--- a/playbooks/pre.yaml
+++ b/playbooks/pre.yaml
@@ -1,4 +1,31 @@
 - hosts: all
+  pre_tasks:
+    - name: Fix the permissions of the zuul home directory
+      # Make sure that the zuul home can be traversed,
+      # so that all users can access the sources placed there.
+      # Some distributions create it with 700 by default.
+      file:
+        path: "{{ ansible_user_dir }}"
+        mode: a+x
+    - name: Gather minimum local MTU
+      set_fact:
+        local_mtu: >
+          {% set mtus = [] -%}
+          {% for interface in ansible_interfaces -%}
+            {% set interface_variable = 'ansible_' + interface -%}
+            {% if interface_variable in hostvars[inventory_hostname] -%}
+              {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%}
+            {% endif -%}
+          {% endfor -%}
+          {{- mtus|min -}}
+    - name: Calculate external_bridge_mtu
+      # 30 bytes is overhead for vxlan (which is greater than GRE
+      # allowing us to use either overlay option with this MTU.
+      # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay.
+      # TODO(andreaf) This should work, but it may have to be reconcilied with
+      # the MTU setting used by the multinode setup roles in multinode pre.yaml
+      set_fact:
+        external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}"
   roles:
     - configure-swap
     - setup-stack-user
@@ -8,15 +35,3 @@
     - setup-devstack-cache
     - start-fresh-logging
     - write-devstack-local-conf
-  # TODO(jeblair): remove when configure-mirrors is fixed  
-  tasks:
-    - name: Hack mirror_info
-      shell:
-        _raw_params: |
-          mkdir /etc/ci
-          cat << "EOF" > /etc/ci/mirror_info.sh
-          export NODEPOOL_UCA_MIRROR=http://mirror.dfw.rax.openstack.org/ubuntu-cloud-archive
-          EOF
-      args:
-        executable: /bin/bash
-      become: true
diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml
new file mode 100644
index 0000000..7f0cb19
--- /dev/null
+++ b/playbooks/tox/post.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+  roles:
+    - fetch-tox-output
+    - fetch-subunit-output
diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml
new file mode 100644
index 0000000..d7e4670
--- /dev/null
+++ b/playbooks/tox/pre.yaml
@@ -0,0 +1,8 @@
+- hosts: all
+  roles:
+    # Run bindep and test-setup after devstack so that they won't interfere
+    - role: bindep
+      bindep_profile: test
+      bindep_dir: "{{ zuul_work_dir }}"
+    - test-setup
+    - ensure-tox
diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml
new file mode 100644
index 0000000..e4043d8
--- /dev/null
+++ b/playbooks/tox/run-both.yaml
@@ -0,0 +1,11 @@
+- hosts: all
+  roles:
+    - run-devstack
+    # Run bindep and test-setup after devstack so that they won't interfere
+    - role: bindep
+      bindep_profile: test
+      bindep_dir: "{{ zuul_work_dir }}"
+    - test-setup
+    - ensure-tox
+    - get-devstack-os-environment
+    - tox
diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml
new file mode 100644
index 0000000..0d065c6
--- /dev/null
+++ b/playbooks/tox/run.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+  roles:
+    - get-devstack-os-environment
+    - tox
diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml
new file mode 100644
index 0000000..cfa1676
--- /dev/null
+++ b/playbooks/unit-tests/pre.yaml
@@ -0,0 +1,13 @@
+- hosts: all
+
+  tasks:
+
+    - name: Install prerequisites
+      shell:
+        chdir: '{{ zuul.project.src_dir }}'
+        executable: /bin/bash
+        cmd: |
+          set -e
+          set -x
+          echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc
+          ./tools/install_prereqs.sh
diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml
new file mode 100644
index 0000000..181521f
--- /dev/null
+++ b/playbooks/unit-tests/run.yaml
@@ -0,0 +1,12 @@
+- hosts: all
+
+  tasks:
+
+    - name: Run run_tests.sh
+      shell:
+        chdir: '{{ zuul.project.src_dir }}'
+        executable: /bin/bash
+        cmd: |
+          set -e
+          set -x
+          ./run_tests.sh
diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst
new file mode 100644
index 0000000..eccee40
--- /dev/null
+++ b/roles/apache-logs-conf/README.rst
@@ -0,0 +1,12 @@
+Prepare apache configs and logs for staging
+
+Make sure apache config files and log files are available in a linux flavor
+independent location. Note that this relies on hard links, to the staging
+directory must be in the same partition where the logs and configs are.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory.
diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml
new file mode 100644
index 0000000..1fb04fe
--- /dev/null
+++ b/roles/apache-logs-conf/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml
new file mode 100644
index 0000000..bd64574
--- /dev/null
+++ b/roles/apache-logs-conf/tasks/main.yaml
@@ -0,0 +1,89 @@
+- name: Ensure {{ stage_dir }}/apache exists
+  file:
+    path: "{{ stage_dir }}/apache"
+    state: directory
+
+- name: Link apache logs on Debian/SuSE
+  block:
+  - name: Find logs
+    find:
+      path: "/var/log/apache2"
+      file_type: any
+    register: debian_suse_apache_logs
+
+  - name: Dereference files
+    stat:
+      path: "{{ item.path }}"
+    with_items: "{{ debian_suse_apache_logs.files }}"
+    register: debian_suse_apache_deref_logs
+
+  - name: Create hard links
+    file:
+      src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+      dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}"
+      state: hard
+    with_items: "{{ debian_suse_apache_deref_logs.results }}"
+    when:
+      - item.stat.isreg or item.stat.islnk
+  when: ansible_os_family in ('Debian', 'Suse')
+  no_log: true
+
+- name: Link apache logs on RedHat
+  block:
+  - name: Find logs
+    find:
+      path: "/var/log/httpd"
+      file_type: any
+    register: redhat_apache_logs
+
+  - name: Dereference files
+    stat:
+      path: "{{ item.path }}"
+    with_items: "{{ redhat_apache_logs.files }}"
+    register: redhat_apache_deref_logs
+
+  - name: Create hard links
+    file:
+      src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+      dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}"
+      state: hard
+    with_items: "{{ redhat_apache_deref_logs.results }}"
+    when:
+      - item.stat.isreg or item.stat.islnk
+  when: ansible_os_family == 'RedHat'
+  no_log: true
+
+- name: Ensure {{ stage_dir }}/apache_config apache_config exists
+  file:
+    path: "{{ stage_dir }}/apache_config"
+    state: directory
+
+- name: Define config paths
+  set_fact:
+    apache_config_paths:
+      'Debian': '/etc/apache2/sites-enabled/'
+      'Suse': '/etc/apache2/conf.d/'
+      'RedHat': '/etc/httpd/conf.d/'
+
+- name: Discover configurations
+  find:
+    path: "{{ apache_config_paths[ansible_os_family] }}"
+    file_type: any
+  register: apache_configs
+  no_log: true
+
+- name: Dereference configurations
+  stat:
+    path: "{{ item.path }}"
+  with_items: "{{ apache_configs.files }}"
+  register: apache_configs_deref
+  no_log: true
+
+- name: Link configurations
+  file:
+    src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+    dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}"
+    state: hard
+  with_items: "{{ apache_configs_deref.results }}"
+  when: item.stat.isreg or item.stat.islnk
+  no_log: true
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
new file mode 100644
index 0000000..c284124
--- /dev/null
+++ b/roles/capture-system-logs/README.rst
@@ -0,0 +1,20 @@
+Stage a number of system type logs
+
+Stage a number of different logs / reports:
+- snapshot of iptables
+- disk space available
+- pip[2|3] freeze
+- installed packages (dpkg/rpm)
+- ceph, openswitch, gluster
+- coredumps
+- dns resolver
+- listen53
+- unbound.log
+- deprecation messages
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory.
diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/capture-system-logs/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
new file mode 100644
index 0000000..905806d
--- /dev/null
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -0,0 +1,50 @@
+# TODO(andreaf) Make this into proper Ansible
+- name: Stage various logs and reports
+  shell:
+    executable: /bin/bash
+    cmd: |
+      sudo iptables-save > {{ stage_dir }}/iptables.txt
+      df -h > {{ stage_dir }}/df.txt
+
+      for py_ver in 2 3; do
+          if [[ `which python${py_ver}` ]]; then
+              python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt
+          fi
+      done
+
+      if [ `command -v dpkg` ]; then
+          dpkg -l> {{ stage_dir }}/dpkg-l.txt
+      fi
+      if [ `command -v rpm` ]; then
+          rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
+      fi
+
+      # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
+      # failed to start due to denials from SELinux — useful for CentOS
+      # and Fedora machines.  For Ubuntu (which runs AppArmor), DevStack
+      # already captures the contents of /var/log/kern.log (via
+      # `journalctl -t kernel` redirected into syslog.txt.gz), which
+      # contains AppArmor-related messages.
+      if [ -f /var/log/audit/audit.log ] ; then
+          sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log &&
+          chmod +r {{ stage_dir }}/audit.log;
+      fi
+
+      # gzip and save any coredumps in /var/core
+      if [ -d /var/core ]; then
+          sudo gzip -r /var/core
+          sudo cp -r /var/core {{ stage_dir }}/
+      fi
+
+      sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt
+
+      # NOTE(andreaf) Service logs are already in logs/ thanks for the
+      # export-devstack-journal log. Apache logs are under apache/ thans to the
+      # apache-logs-conf role.
+      grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \
+          sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \
+          sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \
+          sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' |
+          sed -r 's/\[.*\]/ /g' | \
+          sed -r 's/\s[0-9]+\s/ /g' | \
+          awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log
diff --git a/roles/configure-swap/README.rst b/roles/configure-swap/README.rst
deleted file mode 100644
index eaba5cf..0000000
--- a/roles/configure-swap/README.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Configure a swap partition
-
-Creates a swap partition on the ephemeral block device (the rest of which
-will be mounted on /opt).
-
-**Role Variables**
-
-.. zuul:rolevar:: configure_swap_size
-   :default: 8192
-
-   The size of the swap partition, in MiB.
diff --git a/roles/configure-swap/defaults/main.yaml b/roles/configure-swap/defaults/main.yaml
deleted file mode 100644
index 4d62232..0000000
--- a/roles/configure-swap/defaults/main.yaml
+++ /dev/null
@@ -1 +0,0 @@
-configure_swap_size: 8192
diff --git a/roles/configure-swap/tasks/ephemeral.yaml b/roles/configure-swap/tasks/ephemeral.yaml
deleted file mode 100644
index c2316ea..0000000
--- a/roles/configure-swap/tasks/ephemeral.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-# Configure attached ephemeral devices for storage and swap
-
-- assert:
-    that:
-      - "ephemeral_device is defined"
-
-- name: Set partition names
-  set_fact:
-    swap_partition: "{{ ephemeral_device}}1"
-    opt_partition: "{{ ephemeral_device}}2"
-
-- name: Ensure ephemeral device is unmounted
-  become: yes
-  mount:
-    name: "{{ ephemeral_device }}"
-    state: unmounted
-
-- name: Get existing partitions
-  become: yes
-  parted:
-    device: "{{ ephemeral_device }}"
-    unit: MiB
-  register: ephemeral_partitions
-
-- name: Remove any existing partitions
-  become: yes
-  parted:
-    device: "{{ ephemeral_device }}"
-    number: "{{ item.num }}"
-    state: absent
-  with_items:
-    - "{{ ephemeral_partitions.partitions }}"
-
-- name: Create new disk label
-  become: yes
-  parted:
-    label: msdos
-    device: "{{ ephemeral_device }}"
-
-- name: Create swap partition
-  become: yes
-  parted:
-    device: "{{ ephemeral_device }}"
-    number: 1
-    state: present
-    part_start: '0%'
-    part_end: "{{ configure_swap_size }}MiB"
-
-- name: Create opt partition
-  become: yes
-  parted:
-    device: "{{ ephemeral_device }}"
-    number: 2
-    state: present
-    part_start: "{{ configure_swap_size }}MiB"
-    part_end: "100%"
-
-- name: Make swap on partition
-  become: yes
-  command: "mkswap {{ swap_partition }}"
-
-- name: Write swap to fstab
-  become: yes
-  mount:
-    path: none
-    src: "{{ swap_partition }}"
-    fstype: swap
-    opts: sw
-    passno: 0
-    dump: 0
-    state: present
-
-# XXX: does "parted" plugin ensure the partition is available
-# before moving on?  No udev settles here ...
-
-- name: Add all swap
-  become: yes
-  command: swapon -a
-
-- name: Create /opt filesystem
-  become: yes
-  filesystem:
-    fstype: ext4
-    dev: "{{ opt_partition }}"
-
-# Rackspace at least does not have enough room for two devstack
-# installs on the primary partition.  We copy in the existing /opt to
-# the new partition on the ephemeral device, and then overmount /opt
-# to there for the test runs.
-#
-# NOTE(ianw): the existing "mount" touches fstab.  There is currently (Sep2017)
-# work in [1] to split mount & fstab into separate parts, but for now we bundle
-# it into an atomic shell command
-# [1] https://github.com/ansible/ansible/pull/27174
-- name: Copy old /opt
-  become: yes
-  shell: |
-    mount {{ opt_partition }} /mnt
-    find /opt/ -mindepth 1 -maxdepth 1 -exec mv {} /mnt/ \;
-    umount /mnt
-
-# This overmounts any existing /opt
-- name: Add opt to fstab and mount
-  become: yes
-  mount:
-    path: /opt
-    src: "{{ opt_partition }}"
-    fstype: ext4
-    opts: noatime
-    state: mounted
diff --git a/roles/configure-swap/tasks/main.yaml b/roles/configure-swap/tasks/main.yaml
deleted file mode 100644
index 8960c72..0000000
--- a/roles/configure-swap/tasks/main.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-# On RAX hosts, we have a small root partition and a large,
-# unallocated ephemeral device attached at /dev/xvde
-- name: Set ephemeral device if /dev/xvde exists
-  when: ansible_devices["xvde"] is defined
-  set_fact:
-    ephemeral_device: "/dev/xvde"
-
-# On other providers, we have a device called "ephemeral0".
-#
-# NOTE(ianw): Once [1] is in our ansible (2.4 era?), we can figure
-# this out more directly by walking the device labels in the facts
-#
-# [1] https://github.com/ansible/ansible/commit/d46dd99f47c0ee5081d15bc5b741e9096d8bfd3e
-- name: Set ephemeral device by label
-  when: ephemeral_device is undefined
-  block:
-    - name: Get ephemeral0 device node
-      command: /sbin/blkid -L ephemeral0
-      register: ephemeral0
-      # If this doesn't exist, returns !0
-      ignore_errors: yes
-      changed_when: False
-
-    - name: Set ephemeral device if LABEL exists
-      when: "ephemeral0.rc == 0"
-      set_fact:
-        ephemeral_device: "{{ ephemeral0.stdout }}"
-
-# If we have ephemeral storage and we don't appear to have setup swap,
-# we will create a swap and move /opt to a large data partition there.
-- include: ephemeral.yaml
-  static: no
-  when:
-    - ephemeral_device is defined
-    - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size
-
-# If no ephemeral device and no swap, then we will setup some swap
-# space on the root device to ensure all hosts a consistent memory
-# environment.
-- include: root.yaml
-  static: no
-  when:
-    - ephemeral_device is undefined
-    - ansible_memory_mb['swap']['total'] | int + 10 <= configure_swap_size
-
-# ensure a standard level of swappiness.  Some platforms
-# (rax+centos7) come with swappiness of 0 (presumably because the
-# vm doesn't come with swap setup ... but we just did that above),
-# which depending on the kernel version can lead to the OOM killer
-# kicking in on some processes despite swap being available;
-# particularly things like mysql which have very high ratio of
-# anonymous-memory to file-backed mappings.
-#
-# This sets swappiness low; we really don't want to be relying on
-# cloud I/O based swap during our runs if we can help it
-- name: Set swappiness
-  become: yes
-  sysctl:
-    name: vm.swappiness
-    value: 30
-    state: present
-
-- debug:  var=ephemeral_device
diff --git a/roles/configure-swap/tasks/root.yaml b/roles/configure-swap/tasks/root.yaml
deleted file mode 100644
index f22b537..0000000
--- a/roles/configure-swap/tasks/root.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-# If no ephemeral devices are available, use root filesystem
-
-- name: Calculate required swap
-  set_fact:
-    swap_required: "{{ configure_swap_size - ansible_memory_mb['swap']['total'] | int }}"
-
-- block:
-    - name: Get root filesystem
-      shell: df --output='fstype' /root | tail -1
-      register: root_fs
-
-    - name: Save root filesystem
-      set_fact:
-        root_filesystem: "{{ root_fs.stdout }}"
-
-    - debug: var=root_filesystem
-
-# Note, we don't use a sparse device to avoid wedging when disk space
-# and memory are both unavailable.
-
-# Cannot fallocate on filesystems like XFS, so use slower dd
-- name: Create swap backing file for non-EXT fs
-  when: '"ext" not in root_filesystem'
-  become: yes
-  command: dd if=/dev/zero of=/root/swapfile bs=1M count={{ swap_required }}
-  args:
-    creates: /root/swapfile
-
-- name: Create sparse swap backing file for EXT fs
-  when: '"ext" in root_filesystem'
-  become: yes
-  command: fallocate -l {{ swap_required }}M /root/swapfile
-  args:
-    creates: /root/swapfile
-
-- name: Ensure swapfile perms
-  become: yes
-  file:
-    path: /root/swapfile
-    owner: root
-    group: root
-    mode: 0600
-
-- name: Make swapfile
-  become: yes
-  command: mkswap /root/swapfile
-
-- name: Write swap to fstab
-  become: yes
-  mount:
-    path: none
-    src: /root/swapfile
-    fstype: swap
-    opts: sw
-    passno: 0
-    dump: 0
-    state: present
-
-- name: Add all swap
-  become: yes
-  command: swapon -a
-
-- debug: var=swap_required
diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst
new file mode 100644
index 0000000..400a8da
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/README.rst
@@ -0,0 +1,16 @@
+Verify the IPv6-only deployments
+
+This role needs to be invoked from a playbook that
+run tests. This role verifies the IPv6 setting on
+devstack side and devstack deploy services on IPv6.
+This role is invoked before tests are run so that
+if any missing IPv6 setting or deployments can fail
+the job early.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml
new file mode 100644
index 0000000..59d3b79
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Verify the ipv6-only deployments
+  become: true
+  become_user: stack
+  shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh"
diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst
new file mode 100644
index 0000000..3f2d4c9
--- /dev/null
+++ b/roles/devstack-project-conf/README.rst
@@ -0,0 +1,11 @@
+Prepare OpenStack project configurations for staging
+
+Prepare all relevant config files for staging.
+This is helpful to avoid staging the entire /etc.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory.
diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml
new file mode 100644
index 0000000..f8fb8de
--- /dev/null
+++ b/roles/devstack-project-conf/defaults/main.yaml
@@ -0,0 +1 @@
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml
new file mode 100644
index 0000000..917cdbc
--- /dev/null
+++ b/roles/devstack-project-conf/tasks/main.yaml
@@ -0,0 +1,25 @@
+- name: Ensure {{ stage_dir }}/etc exists
+  file:
+    path: "{{ stage_dir }}/etc"
+    state: directory
+
+- name: Check which projects have a config folder
+  stat:
+    path: "/etc/{{ item.value.short_name }}"
+  with_dict: "{{ zuul.projects }}"
+  register: project_configs
+  no_log: true
+
+- name: Copy configuration files
+  command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }}
+  when: item.stat.exists
+  with_items: "{{ project_configs.results }}"
+
+- name: Check if openstack has a config folder
+  stat:
+    path: "/etc/openstack"
+  register: openstack_configs
+
+- name: Copy configuration files
+  command: cp -pRL /etc/openstack {{ stage_dir }}/etc/
+  when: openstack_configs.stat.exists
diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst
index 5f00592..9e3c919 100644
--- a/roles/export-devstack-journal/README.rst
+++ b/roles/export-devstack-journal/README.rst
@@ -1,15 +1,25 @@
 Export journal files from devstack services
 
-Export the systemd journal for every devstack service in native
-journal format as well as text.  Also, export a syslog-style file with
-kernal and sudo messages.
+This performs a number of logging collection services
 
-Writes the output to the ``logs/`` subdirectory of
-``devstack_base_dir``.
+* Export the systemd journal in native format
+* For every devstack service, export logs to text in a file named
+  ``screen-*`` to maintain legacy compatability when devstack services
+  used to run in a screen session and were logged separately.
+* Export a syslog-style file with kernel and sudo messages for legacy
+  compatability.
+
+Writes the output to the ``logs/`` subdirectory of ``stage_dir``.
 
 **Role Variables**
 
 .. zuul:rolevar:: devstack_base_dir
    :default: /opt/stack
 
-   The devstack base directory.
+   The devstack base directory. This is used to obtain the
+   ``log-start-timestamp.txt``, used to filter the systemd journal.
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory.
diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml
index fea05c8..1fb04fe 100644
--- a/roles/export-devstack-journal/defaults/main.yaml
+++ b/roles/export-devstack-journal/defaults/main.yaml
@@ -1 +1,2 @@
 devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index b9af02a..db38b10 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -1,29 +1,54 @@
-# TODO: convert this to ansible
-- name: Export journal files
+# NOTE(andreaf) This bypasses the stage-output role
+- name: Ensure {{ stage_dir }}/logs exists
+  become: true
+  file:
+    path: "{{ stage_dir }}/logs"
+    state: directory
+    owner: "{{ ansible_user }}"
+
+- name: Export legacy stack screen log files
   become: true
   shell:
     cmd: |
       u=""
       name=""
-      for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do
+      for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do
         name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
-        journalctl -o short-precise --unit $u | tee {{ devstack_base_dir }}/logs/$name.txt > /dev/null
+        journalctl -o short-precise --unit $u  > {{ stage_dir }}/logs/$name.txt
       done
 
-      # Export the journal in export format to make it downloadable
-      # for later searching. It can then be rewritten to a journal native
-      # format locally using systemd-journal-remote. This makes a class of
-      # debugging much easier. We don't do the native conversion here as
-      # some distros do not package that tooling.
-      journalctl -u 'devstack@*' -o export | \
-          xz --threads=0 - > {{ devstack_base_dir }}/logs/devstack.journal.xz
-
-      # The journal contains everything running under systemd, we'll
-      # build an old school version of the syslog with just the
-      # kernel and sudo messages.
+- name: Export legacy syslog.txt
+  become: true
+  shell:
+    # The journal contains everything running under systemd, we'll
+    # build an old school version of the syslog with just the
+    # kernel and sudo messages.
+    cmd: |
       journalctl \
           -t kernel \
           -t sudo \
           --no-pager \
           --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
-        | tee {{ devstack_base_dir }}/logs/syslog.txt > /dev/null
+         > {{ stage_dir }}/logs/syslog.txt
+
+# TODO: convert this to ansible
+#  - make a list of the above units
+#  - iterate the list here
+- name: Export journal
+  become: true
+  shell:
+    # Export the journal in export format to make it downloadable
+    # for later searching. It can then be rewritten to a journal native
+    # format locally using systemd-journal-remote. This makes a class of
+    # debugging much easier. We don't do the native conversion here as
+    # some distros do not package that tooling.
+    cmd: |
+      journalctl -o export \
+          --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
+        | gzip > {{ stage_dir }}/logs/devstack.journal.gz
+
+- name: Save journal README
+  become: true
+  template:
+    src: devstack.journal.README.txt.j2
+    dest: '{{ stage_dir }}/logs/devstack.journal.README.txt'
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
new file mode 100644
index 0000000..30519f6
--- /dev/null
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -0,0 +1,33 @@
+Devstack systemd journal
+========================
+
+The devstack.journal file is a copy of the systemd journal during the
+devstack run.
+
+To use it, you will need to convert it so journalctl can read it
+locally.  After downloading the file:
+
+ $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal
+
+Note this binary is not in the regular path.  On Debian/Ubuntu
+platforms, you will need to have the "systemd-journal-remote" package
+installed.
+
+It should result in something like:
+
+ Finishing after writing <large number> entries
+
+You can then use journalctl to examine this file.  For example, to see
+all devstack services try:
+
+ $ journalctl --file ./output.journal -u 'devstack@*'
+
+To see just cinder API server logs restrict the match with
+
+ $ journalctl --file ./output.journal -u 'devstack@c-api'
+
+There may be many types of logs available in the journal, a command like
+
+ $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u
+
+can help you find interesting things to filter on.
\ No newline at end of file
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
index 5a198b2..276c4e0 100644
--- a/roles/fetch-devstack-log-dir/tasks/main.yaml
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -1,5 +1,10 @@
+# as the user in the guest may not exist on the executor
+# we do not preserve the group or owner of the copied logs.
+
 - name: Collect devstack logs
   synchronize:
     dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
     mode: pull
     src: "{{ devstack_base_dir }}/logs"
+    group: no
+    owner: no
diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst
new file mode 100644
index 0000000..68ddce8
--- /dev/null
+++ b/roles/get-devstack-os-environment/README.rst
@@ -0,0 +1,40 @@
+Reads the OS_* variables set by devstack through openrc
+for the specified user and project and exports them as
+the os_env_vars fact.
+
+**WARNING**: this role is meant to be used as porting aid
+for the non-unified python-<service>client jobs which
+are already around, as those clients do not use clouds.yaml
+as openstackclient does.
+When those clients and their jobs are deprecated and removed,
+or anyway when the new code is able to read from clouds.yaml
+directly, this role should be removed as well.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: openrc_file
+   :default: {{ devstack_base_dir }}/devstack/openrc
+
+   The location of the generated openrc file.
+
+.. zuul:rolevar:: openrc_user
+   :default: admin
+
+   The user whose credentials should be retrieved.
+
+.. zuul:rolevar:: openrc_project
+   :default: admin
+
+   The project (which openrc_user is part of) whose
+   access data should be retrieved.
+
+.. zuul:rolevar:: openrc_enable_export
+   :default: false
+
+   Set it to true to export os_env_vars.
diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml
new file mode 100644
index 0000000..f68ea56
--- /dev/null
+++ b/roles/get-devstack-os-environment/defaults/main.yaml
@@ -0,0 +1,6 @@
+devstack_base_dir: "/opt/stack"
+openrc_file: "{{ devstack_base_dir }}/devstack/openrc"
+openrc_user: admin
+openrc_project: admin
+openrc_enable_export: false
+tox_environment: {}
diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml
new file mode 100644
index 0000000..b2c5e93
--- /dev/null
+++ b/roles/get-devstack-os-environment/tasks/main.yaml
@@ -0,0 +1,14 @@
+- when: openrc_enable_export
+  block:
+    - name: Extract the OS_ environment variables
+      shell:
+        cmd: |
+          source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null
+          env | awk -F= 'BEGIN {print "---" } /^OS_/ { print "  "$1": \""$2"\""} '
+      args:
+        executable: "/bin/bash"
+      register: env_os
+
+    - name: Append the the OS_ environment variables to tox_environment
+      set_fact:
+        tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}"
diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst
new file mode 100644
index 0000000..097dcea
--- /dev/null
+++ b/roles/orchestrate-devstack/README.rst
@@ -0,0 +1,25 @@
+Orchestrate a devstack
+
+Runs devstack in a multinode scenario, with one controller node
+and a group of subnodes.
+
+The reason for this role is so that jobs in other repository may
+run devstack in their plays with no need for re-implementing the
+orchestration logic.
+
+The "run-devstack" role is available to run devstack with no
+orchestration.
+
+This role sets up the controller and CA first, it then pushes CA
+data to sub-nodes and run devstack there. The only requirement for
+this role is for the controller inventory_hostname to be "controller"
+and for all sub-nodes to be defined in a group called "subnode".
+
+This role needs to be invoked from a playbook that uses a "linear" strategy.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/orchestrate-devstack/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml
new file mode 100644
index 0000000..2b8ae01
--- /dev/null
+++ b/roles/orchestrate-devstack/tasks/main.yaml
@@ -0,0 +1,49 @@
+- name: Run devstack on the controller
+  include_role:
+    name: run-devstack
+  when: inventory_hostname == 'controller'
+
+- name: Setup devstack on sub-nodes
+  block:
+
+  - name: Distribute the build sshkey for the user "stack"
+    include_role:
+      name: copy-build-sshkey
+    vars:
+      copy_sshkey_target_user: 'stack'
+
+  - name: Sync CA data to subnodes (when any)
+    # Only do this if the tls-proxy service is defined and enabled
+    include_role:
+      name: sync-devstack-data
+    when: devstack_services['tls-proxy']|default(false)
+
+  - name: Sync controller ceph.conf and key rings to subnode
+    include_role:
+      name: sync-controller-ceph-conf-and-keys
+    when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins
+
+  - name: Run devstack on the sub-nodes
+    include_role:
+      name: run-devstack
+    when: inventory_hostname in groups['subnode']
+
+  - name: Discover hosts
+    # Discovers compute nodes (subnodes) and maps them to cells. Only run
+    # on the controller node.
+    # NOTE(mriedem): We want to remove this if/when nova supports
+    # auto-registration of computes with cells, but that's not happening in
+    # Ocata.
+    # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts
+    # function in devstack gate. Since this is now in devstack, which is
+    # branched, we know that the discover_hosts tool exists.
+    become: true
+    become_user: stack
+    shell: ./tools/discover_hosts.sh
+    args:
+      chdir: "{{ devstack_base_dir }}/devstack"
+    when: inventory_hostname == 'controller'
+
+  when:
+    - '"controller" in hostvars'
+    - '"subnode" in groups'
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
new file mode 100644
index 0000000..a8447d2
--- /dev/null
+++ b/roles/process-stackviz/README.rst
@@ -0,0 +1,22 @@
+Generate stackviz report.
+
+Generate stackviz report using subunit and dstat data, using
+the stackviz archive embedded in test images.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: stage_dir
+   :default: "{{ ansible_user_dir }}"
+
+   The stage directory where the input data can be found and
+   the output will be produced.
+
+.. zuul:rolevar:: zuul_work_dir
+   :default: {{ devstack_base_dir }}/tempest
+
+   Directory to work in. It has to be a fully qualified path.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
new file mode 100644
index 0000000..f3bc32b
--- /dev/null
+++ b/roles/process-stackviz/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
+zuul_work_dir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
new file mode 100644
index 0000000..3ba3d9c
--- /dev/null
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -0,0 +1,73 @@
+- name: Process Stackviz
+  block:
+
+  - name: Devstack checks if stackviz archive exists
+    stat:
+      path: "/opt/cache/files/stackviz-latest.tar.gz"
+    register: stackviz_archive
+
+  - debug:
+      msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
+    when: not stackviz_archive.stat.exists
+
+  - name: Check if subunit data exists
+    stat:
+      path: "{{ zuul_work_dir }}/testrepository.subunit"
+    register: subunit_input
+
+  - debug:
+      msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
+    when: not subunit_input.stat.exists
+
+  - name: Install stackviz
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+    block:
+      - include_role:
+          name: ensure-pip
+
+      - pip:
+          name: "file://{{ stackviz_archive.stat.path }}"
+          virtualenv: /tmp/stackviz
+          virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
+          extra_args: -U
+
+  - name: Deploy stackviz static html+js
+    command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+
+  - name: Check if dstat data exists
+    stat:
+      path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
+    register: dstat_input
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+
+  - name: Run stackviz with dstat
+    shell: |
+      cat {{ subunit_input.stat.path }} | \
+        /tmp/stackviz/bin/stackviz-export \
+          --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
+          --env --stdin \
+          {{ stage_dir }}/stackviz/data
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+      - dstat_input.stat.exists
+
+  - name: Run stackviz without dstat
+    shell: |
+      cat {{ subunit_input.stat.path }} | \
+        /tmp/stackviz/bin/stackviz-export \
+          --env --stdin \
+          {{ stage_dir }}/stackviz/data
+    when:
+      - stackviz_archive.stat.exists
+      - subunit_input.stat.exists
+      - not dstat_input.stat.exists
+
+  ignore_errors: yes
diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml
index bafebaf..f58b31d 100644
--- a/roles/run-devstack/tasks/main.yaml
+++ b/roles/run-devstack/tasks/main.yaml
@@ -1,5 +1,10 @@
 - name: Run devstack
-  command: ./stack.sh
+  shell:
+    cmd: |
+      ./stack.sh 2>&1
+      rc=$?
+      echo "*** FINISHED ***"
+      exit $rc
   args:
     chdir: "{{devstack_base_dir}}/devstack"
   become: true
diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml
index b9f38df..d8e8cfe 100644
--- a/roles/setup-devstack-log-dir/tasks/main.yaml
+++ b/roles/setup-devstack-log-dir/tasks/main.yaml
@@ -2,4 +2,7 @@
   file:
     path: '{{ devstack_base_dir }}/logs'
     state: directory
+    mode: 0755
+    owner: stack
+    group: stack
   become: yes
diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst
index 4ebf839..0aa048b 100644
--- a/roles/setup-devstack-source-dirs/README.rst
+++ b/roles/setup-devstack-source-dirs/README.rst
@@ -9,3 +9,8 @@
    :default: /opt/stack
 
    The devstack base directory.
+
+.. zuul:rolevar:: devstack_sources_branch
+   :default: None
+
+   The target branch to be setup (where available).
diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml
index fea05c8..77a74d7 100644
--- a/roles/setup-devstack-source-dirs/defaults/main.yaml
+++ b/roles/setup-devstack-source-dirs/defaults/main.yaml
@@ -1 +1,9 @@
 devstack_base_dir: /opt/stack
+devstack_source_dirs:
+  - src/opendev.org/opendev
+  - src/opendev.org/openstack
+  - src/opendev.org/openstack-dev
+  - src/opendev.org/openstack-infra
+  - src/opendev.org/starlingx
+  - src/opendev.org/x
+  - src/opendev.org/zuul
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index e6bbae2..294c29c 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -1,9 +1,6 @@
-- name: Find all source repos used by this job
+- name: Find all OpenStack source repos used by this job
   find:
-    paths:
-      - src/git.openstack.org/openstack
-      - src/git.openstack.org/openstack-dev
-      - src/git.openstack.org/openstack-infra
+    paths: "{{ devstack_source_dirs }}"
     file_type: directory
   register: found_repos
 
@@ -12,6 +9,59 @@
   with_items: '{{ found_repos.files }}'
   become: yes
 
+# Github projects are github.com/username/repo (username might be a
+# top-level project too), so we have to do a two-step swizzle to just
+# get the full repo path (ansible's find module doesn't help with this
+# :/)
+- name: Find top level github projects
+  find:
+    paths:
+      - src/github.com
+    file_type: directory
+  register: found_github_projects
+
+- name: Find actual github repos
+  find:
+    paths: '{{ found_github_projects.files | map(attribute="path") | list }}'
+    file_type: directory
+  register: found_github_repos
+  when: found_github_projects.files
+
+- name: Copy github repos into devstack working directory
+  command: rsync -a {{ item.path }} {{ devstack_base_dir }}
+  with_items: '{{ found_github_repos.files }}'
+  become: yes
+  when: found_github_projects.files
+
+- name: Setup refspec for repos into devstack working directory
+  shell:
+    # Copied almost "as-is" from devstack-gate setup-workspace function
+    # but removing the dependency on functions.sh
+    # TODO this should be rewritten as a python module.
+    cmd: |
+      cd {{ devstack_base_dir }}/{{ item.path | basename }}
+      base_branch={{ devstack_sources_branch }}
+      if git branch -a | grep "$base_branch" > /dev/null ; then
+          git checkout $base_branch
+      elif [[ "$base_branch" == stable/* ]]; then
+          # Look for an eol tag for the stable branch.
+          eol_tag=${base_branch#stable/}-eol
+          if git tag -l |grep $eol_tag >/dev/null; then
+              git checkout $eol_tag
+              git reset --hard $eol_tag
+              if ! git clean -x -f -d -q ; then
+                  sleep 1
+                  git clean -x -f -d -q
+              fi
+          fi
+      else
+          git checkout master
+      fi
+  args:
+    executable: /bin/bash
+  with_items: '{{ found_repos.files }}'
+  when: devstack_sources_branch is defined
+
 - name: Set ownership of repos
   file:
     path: '{{ devstack_base_dir }}'
diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml
index 8384515..0fc7c2d 100644
--- a/roles/setup-stack-user/tasks/main.yaml
+++ b/roles/setup-stack-user/tasks/main.yaml
@@ -21,10 +21,12 @@
     group: stack
   become: yes
 
-- name: Set stack user home directory permissions
+- name: Set stack user home directory permissions and ownership
   file:
     path: '{{ devstack_stack_home_dir }}'
     mode: 0755
+    owner: stack
+    group: stack
   become: yes
 
 - name: Copy 50_stack_sh file to /etc/sudoers.d
@@ -36,7 +38,7 @@
     group: root
   become: yes
 
-- name: Create new/.cache folder within BASE
+- name: Create .cache folder within BASE
   file:
     path: '{{ devstack_stack_home_dir }}/.cache'
     state: directory
diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst
new file mode 100644
index 0000000..e3d2bb4
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/README.rst
@@ -0,0 +1,3 @@
+Sync ceph config and keys between controller and subnodes
+
+Simply copy the contents of /etc/ceph on the controller to subnodes.
diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
new file mode 100644
index 0000000..71ece57
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
@@ -0,0 +1,15 @@
+- name: Ensure /etc/ceph exists on subnode
+  become: true
+  file:
+    path: /etc/ceph
+    state: directory
+
+- name: Copy /etc/ceph from controller to subnode
+  become: true
+  synchronize:
+    owner: yes
+    group: yes
+    perms: yes
+    src: /etc/ceph/
+    dest: /etc/ceph/
+  delegate_to: controller
diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst
new file mode 100644
index 0000000..388625c
--- /dev/null
+++ b/roles/sync-devstack-data/README.rst
@@ -0,0 +1,19 @@
+Sync devstack data for multinode configurations
+
+Sync any data files which include certificates to be used if TLS is enabled.
+This role must be executed on the controller and it pushes data to all
+subnodes.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+   :default: /opt/stack
+
+   The devstack base directory.
+
+.. zuul:rolevar:: devstack_data_base_dir
+   :default: {{ devstack_base_dir }}
+
+   The devstack base directory for data/.
+   Useful for example when multiple executions of devstack (i.e. grenade)
+   share the same data directory.
diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml
new file mode 100644
index 0000000..6b5017b
--- /dev/null
+++ b/roles/sync-devstack-data/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_data_base_dir: "{{ devstack_base_dir }}"
diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml
new file mode 100644
index 0000000..a1d37c3
--- /dev/null
+++ b/roles/sync-devstack-data/tasks/main.yaml
@@ -0,0 +1,59 @@
+- name: Ensure the data folder exists
+  become: true
+  file:
+    path: "{{ devstack_data_base_dir }}/data"
+    state: directory
+    owner: stack
+    group: stack
+    mode: 0755
+  when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Ensure the CA folder exists
+  become: true
+  file:
+    path: "{{ devstack_data_base_dir }}/data/CA"
+    state: directory
+    owner: stack
+    group: stack
+    mode: 0755
+  when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Pull the CA certificate and folder
+  become: true
+  synchronize:
+    src: "{{ item }}"
+    dest: "{{ zuul.executor.work_root }}/{{ item | basename }}"
+    mode: pull
+  with_items:
+    - "{{ devstack_data_base_dir }}/data/ca-bundle.pem"
+    - "{{ devstack_data_base_dir }}/data/CA"
+  when: inventory_hostname == 'controller'
+
+- name: Push the CA certificate
+  become: true
+  become_user: stack
+  synchronize:
+    src: "{{ zuul.executor.work_root }}/ca-bundle.pem"
+    dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem"
+    mode: push
+  when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Push the CA folder
+  become: true
+  become_user: stack
+  synchronize:
+    src: "{{ zuul.executor.work_root }}/CA/"
+    dest: "{{ devstack_data_base_dir }}/data/"
+    mode: push
+  when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Ensure the data folder and subfolders have the correct permissions
+  become: true
+  file:
+    path: "{{ devstack_data_base_dir }}/data"
+    state: directory
+    owner: stack
+    group: stack
+    mode: 0755
+    recurse: yes
+  when: 'inventory_hostname in groups["subnode"]|default([])'
diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst
index e30dfa1..d0a51e7 100644
--- a/roles/write-devstack-local-conf/README.rst
+++ b/roles/write-devstack-local-conf/README.rst
@@ -20,6 +20,15 @@
    bash shell variables, and will be ordered so that variables used by
    later entries appear first.
 
+   As a special case, the variable ``LIBS_FROM_GIT`` will be
+   constructed automatically from the projects which appear in the
+   ``required-projects`` list defined by the job plus the project of
+   the change under test.  To instruct devstack to install a library
+   from source rather than pypi, simply add that library to the job's
+   ``required-projects`` list.  To override the
+   automatically-generated value, set ``LIBS_FROM_GIT`` in
+   ``devstack_localrc`` to the desired value.
+
 .. zuul:rolevar:: devstack_local_conf
    :type: dict
 
@@ -47,13 +56,27 @@
             This is a dictionary of key-value pairs which comprise
             this section of the INI file.
 
+.. zuul:rolevar:: devstack_base_services
+   :type: list
+   :default: {{ base_services | default(omit) }}
+
+   A list of base services which are enabled. Services can be added or removed
+   from this list via the ``devstack_services`` variable. This is ignored if
+   ``base`` is set to ``False`` in ``devstack_services``.
+
 .. zuul:rolevar:: devstack_services
    :type: dict
 
    A dictionary mapping service names to boolean values.  If the
    boolean value is ``false``, a ``disable_service`` line will be
    emitted for the service name.  If it is ``true``, then
-   ``enable_service`` will be emitted.  All other values are ignored.
+   ``enable_service`` will be emitted. All other values are ignored.
+
+   The special key ``base`` can be used to enable or disable the base set of
+   services enabled by default. If ``base`` is found, it will processed before
+   all other keys. If its value is ``False`` a ``disable_all_services`` will be
+   emitted; if its value is ``True`` services from ``devstack_base_services``
+   will be emitted via ``ENABLED_SERVICES``.
 
 .. zuul:rolevar:: devstack_plugins
    :type: dict
@@ -61,3 +84,16 @@
    A dictionary mapping a plugin name to a git repo location.  If the
    location is a non-empty string, then an ``enable_plugin`` line will
    be emmitted for the plugin name.
+
+   If a plugin declares a dependency on another plugin (via
+   ``plugin_requires`` in the plugin's settings file), this role will
+   automatically emit ``enable_plugin`` lines in the correct order.
+
+.. zuul:rolevar:: tempest_plugins
+   :type: list
+
+   A list of tempest plugins which are installed alongside tempest.
+
+   The list of values will be combined with the base devstack directory
+   and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable
+   already exists, its value is *not* changed.
diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml
index 491fa0f..7bc1dec 100644
--- a/roles/write-devstack-local-conf/defaults/main.yaml
+++ b/roles/write-devstack-local-conf/defaults/main.yaml
@@ -1,2 +1,3 @@
 devstack_base_dir: /opt/stack
 devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf"
+devstack_base_services: "{{ enabled_services | default(omit) }}"
diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py
index 4134beb..2f97d0e 100644
--- a/roles/write-devstack-local-conf/library/devstack_local_conf.py
+++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py
@@ -14,16 +14,69 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import os
 import re
 
 
-class VarGraph(object):
+class DependencyGraph(object):
     # This is based on the JobGraph from Zuul.
 
+    def __init__(self):
+        self._names = set()
+        self._dependencies = {}  # dependent_name -> set(parent_names)
+
+    def add(self, name, dependencies):
+        # Append the dependency information
+        self._dependencies.setdefault(name, set())
+        try:
+            for dependency in dependencies:
+                # Make sure a circular dependency is never created
+                ancestors = self._getParentNamesRecursively(
+                    dependency, soft=True)
+                ancestors.add(dependency)
+                if name in ancestors:
+                    raise Exception("Dependency cycle detected in {}".
+                                    format(name))
+                self._dependencies[name].add(dependency)
+        except Exception:
+            del self._dependencies[name]
+            raise
+
+    def getDependenciesRecursively(self, parent):
+        dependencies = []
+
+        current_dependencies = self._dependencies[parent]
+        for current in current_dependencies:
+            if current not in dependencies:
+                dependencies.append(current)
+            for dep in self.getDependenciesRecursively(current):
+                if dep not in dependencies:
+                    dependencies.append(dep)
+        return dependencies
+
+    def _getParentNamesRecursively(self, dependent, soft=False):
+        all_parent_items = set()
+        items_to_iterate = set([dependent])
+        while len(items_to_iterate) > 0:
+            current_item = items_to_iterate.pop()
+            current_parent_items = self._dependencies.get(current_item)
+            if current_parent_items is None:
+                if soft:
+                    current_parent_items = set()
+                else:
+                    raise Exception("Dependent item {} not found: ".format(
+                                    dependent))
+            new_parent_items = current_parent_items - all_parent_items
+            items_to_iterate |= new_parent_items
+            all_parent_items |= new_parent_items
+        return all_parent_items
+
+
+class VarGraph(DependencyGraph):
     def __init__(self, vars):
+        super(VarGraph, self).__init__()
         self.vars = {}
         self._varnames = set()
-        self._dependencies = {}  # dependent_var_name -> set(parent_var_names)
         for k, v in vars.items():
             self._varnames.add(k)
         for k, v in vars.items():
@@ -38,28 +91,21 @@
             raise Exception("Variable {} already added".format(key))
         self.vars[key] = value
         # Append the dependency information
-        self._dependencies.setdefault(key, set())
+        dependencies = set()
+        for dependency in self.getDependencies(value):
+            if dependency == key:
+                # A variable is allowed to reference itself; no
+                # dependency link needed in that case.
+                continue
+            if dependency not in self._varnames:
+                # It's not necessary to create a link for an
+                # external variable.
+                continue
+            dependencies.add(dependency)
         try:
-            for dependency in self.getDependencies(value):
-                if dependency == key:
-                    # A variable is allowed to reference itself; no
-                    # dependency link needed in that case.
-                    continue
-                if dependency not in self._varnames:
-                    # It's not necessary to create a link for an
-                    # external variable.
-                    continue
-                # Make sure a circular dependency is never created
-                ancestor_vars = self._getParentVarNamesRecursively(
-                    dependency, soft=True)
-                ancestor_vars.add(dependency)
-                if any((key == anc_var) for anc_var in ancestor_vars):
-                    raise Exception("Dependency cycle detected in var {}".
-                                    format(key))
-                self._dependencies[key].add(dependency)
+            self.add(key, dependencies)
         except Exception:
             del self.vars[key]
-            del self._dependencies[key]
             raise
 
     def getVars(self):
@@ -67,63 +113,133 @@
         keys = sorted(self.vars.keys())
         seen = set()
         for key in keys:
-            dependencies = self.getDependentVarsRecursively(key)
+            dependencies = self.getDependenciesRecursively(key)
             for var in dependencies + [key]:
                 if var not in seen:
                     ret.append((var, self.vars[var]))
                     seen.add(var)
         return ret
 
-    def getDependentVarsRecursively(self, parent_var):
-        dependent_vars = []
 
-        current_dependent_vars = self._dependencies[parent_var]
-        for current_var in current_dependent_vars:
-            if current_var not in dependent_vars:
-                dependent_vars.append(current_var)
-            for dep in self.getDependentVarsRecursively(current_var):
-                if dep not in dependent_vars:
-                    dependent_vars.append(dep)
-        return dependent_vars
+class PluginGraph(DependencyGraph):
+    def __init__(self, base_dir, plugins):
+        super(PluginGraph, self).__init__()
+        # The dependency trees expressed by all the plugins we found
+        # (which may be more than those the job is using).
+        self._plugin_dependencies = {}
+        self.loadPluginNames(base_dir)
 
-    def _getParentVarNamesRecursively(self, dependent_var, soft=False):
-        all_parent_vars = set()
-        vars_to_iterate = set([dependent_var])
-        while len(vars_to_iterate) > 0:
-            current_var = vars_to_iterate.pop()
-            current_parent_vars = self._dependencies.get(current_var)
-            if current_parent_vars is None:
-                if soft:
-                    current_parent_vars = set()
-                else:
-                    raise Exception("Dependent var {} not found: ".format(
-                                    dependent_var))
-            new_parent_vars = current_parent_vars - all_parent_vars
-            vars_to_iterate |= new_parent_vars
-            all_parent_vars |= new_parent_vars
-        return all_parent_vars
+        self.plugins = {}
+        self._pluginnames = set()
+        for k, v in plugins.items():
+            self._pluginnames.add(k)
+        for k, v in plugins.items():
+            self._addPlugin(k, str(v))
+
+    def loadPluginNames(self, base_dir):
+        if base_dir is None:
+            return
+        git_roots = []
+        for root, dirs, files in os.walk(base_dir):
+            if '.git' not in dirs:
+                continue
+            # Don't go deeper than git roots
+            dirs[:] = []
+            git_roots.append(root)
+        for root in git_roots:
+            devstack = os.path.join(root, 'devstack')
+            if not (os.path.exists(devstack) and os.path.isdir(devstack)):
+                continue
+            settings = os.path.join(devstack, 'settings')
+            if not (os.path.exists(settings) and os.path.isfile(settings)):
+                continue
+            self.loadDevstackPluginInfo(settings)
+
+    define_re = re.compile(r'^define_plugin\s+(\S+).*')
+    require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*')
+    def loadDevstackPluginInfo(self, fn):
+        name = None
+        reqs = set()
+        with open(fn) as f:
+            for line in f:
+                m = self.define_re.match(line)
+                if m:
+                    name = m.group(1)
+                m = self.require_re.match(line)
+                if m:
+                    if name == m.group(1):
+                        reqs.add(m.group(2))
+        if name and reqs:
+            self._plugin_dependencies[name] = reqs
+
+    def getDependencies(self, value):
+        return self._plugin_dependencies.get(value, [])
+
+    def _addPlugin(self, key, value):
+        if key in self.plugins:
+            raise Exception("Plugin {} already added".format(key))
+        self.plugins[key] = value
+        # Append the dependency information
+        dependencies = set()
+        for dependency in self.getDependencies(key):
+            if dependency == key:
+                continue
+            dependencies.add(dependency)
+        try:
+            self.add(key, dependencies)
+        except Exception:
+            del self.plugins[key]
+            raise
+
+    def getPlugins(self):
+        ret = []
+        keys = sorted(self.plugins.keys())
+        seen = set()
+        for key in keys:
+            dependencies = self.getDependenciesRecursively(key)
+            for plugin in dependencies + [key]:
+                if plugin not in seen:
+                    ret.append((plugin, self.plugins[plugin]))
+                    seen.add(plugin)
+        return ret
 
 
 class LocalConf(object):
 
-    def __init__(self, localrc, localconf, services, plugins):
+    def __init__(self, localrc, localconf, base_services, services, plugins,
+                 base_dir, projects, project, tempest_plugins):
         self.localrc = []
+        self.warnings = []
         self.meta_sections = {}
+        self.plugin_deps = {}
+        self.base_dir = base_dir
+        self.projects = projects
+        self.project = project
+        self.tempest_plugins = tempest_plugins
+        if services or base_services:
+            self.handle_services(base_services, services or {})
+        self.handle_localrc(localrc)
+        # Plugins must be the last items in localrc, otherwise
+        # the configuration lines which follows them in the file are
+        # not applied to the plugins (for example, the value of DEST.)
         if plugins:
             self.handle_plugins(plugins)
-        if services:
-            self.handle_services(services)
-        if localrc:
-            self.handle_localrc(localrc)
         if localconf:
             self.handle_localconf(localconf)
 
     def handle_plugins(self, plugins):
-        for k, v in plugins.items():
+        pg = PluginGraph(self.base_dir, plugins)
+        for k, v in pg.getPlugins():
             if v:
                 self.localrc.append('enable_plugin {} {}'.format(k, v))
 
-    def handle_services(self, services):
+    def handle_services(self, base_services, services):
+        enable_base_services = services.pop('base', True)
+        if enable_base_services and base_services:
+            self.localrc.append('ENABLED_SERVICES={}'.format(
+                ",".join(base_services)))
+        else:
+            self.localrc.append('disable_all_services')
         for k, v in services.items():
             if v is False:
                 self.localrc.append('disable_service {}'.format(k))
@@ -131,9 +247,46 @@
                 self.localrc.append('enable_service {}'.format(k))
 
     def handle_localrc(self, localrc):
-        vg = VarGraph(localrc)
-        for k, v in vg.getVars():
-            self.localrc.append('{}={}'.format(k, v))
+        lfg = False
+        tp = False
+        if localrc:
+            vg = VarGraph(localrc)
+            for k, v in vg.getVars():
+                # Avoid double quoting
+                if len(v) and v[0]=='"':
+                    self.localrc.append('{}={}'.format(k, v))
+                else:
+                    self.localrc.append('{}="{}"'.format(k, v))
+                if k == 'LIBS_FROM_GIT':
+                    lfg = True
+                elif k == 'TEMPEST_PLUGINS':
+                    tp = True
+
+        if not lfg and (self.projects or self.project):
+            required_projects = []
+            if self.projects:
+                for project_name, project_info in self.projects.items():
+                    if project_info.get('required'):
+                        required_projects.append(project_info['short_name'])
+            if self.project:
+                if self.project['short_name'] not in required_projects:
+                    required_projects.append(self.project['short_name'])
+            if required_projects:
+                self.localrc.append('LIBS_FROM_GIT={}'.format(
+                    ','.join(required_projects)))
+
+        if self.tempest_plugins:
+            if not tp:
+                tp_dirs = []
+                for tempest_plugin in self.tempest_plugins:
+                    tp_dirs.append(os.path.join(self.base_dir, tempest_plugin))
+                self.localrc.append('TEMPEST_PLUGINS="{}"'.format(
+                        ' '.join(tp_dirs)))
+            else:
+                self.warnings.append('TEMPEST_PLUGINS already defined ({}),'
+                                     'requested value {} ignored'.format(
+                                         tp, self.tempest_plugins))
+
 
     def handle_localconf(self, localconf):
         for phase, phase_data in localconf.items():
@@ -161,25 +314,38 @@
     module = AnsibleModule(
         argument_spec=dict(
             plugins=dict(type='dict'),
+            base_services=dict(type='list'),
             services=dict(type='dict'),
             localrc=dict(type='dict'),
             local_conf=dict(type='dict'),
+            base_dir=dict(type='path'),
             path=dict(type='str'),
+            projects=dict(type='dict'),
+            project=dict(type='dict'),
+            tempest_plugins=dict(type='list'),
         )
     )
 
     p = module.params
     lc = LocalConf(p.get('localrc'),
                    p.get('local_conf'),
+                   p.get('base_services'),
                    p.get('services'),
-                   p.get('plugins'))
+                   p.get('plugins'),
+                   p.get('base_dir'),
+                   p.get('projects'),
+                   p.get('project'),
+                   p.get('tempest_plugins'))
     lc.write(p['path'])
 
-    module.exit_json()
+    module.exit_json(warnings=lc.warnings)
 
 
-from ansible.module_utils.basic import *  # noqa
-from ansible.module_utils.basic import AnsibleModule
+try:
+    from ansible.module_utils.basic import *  # noqa
+    from ansible.module_utils.basic import AnsibleModule
+except ImportError:
+    pass
 
 if __name__ == '__main__':
     main()
diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py
new file mode 100644
index 0000000..7c526b3
--- /dev/null
+++ b/roles/write-devstack-local-conf/library/test.py
@@ -0,0 +1,291 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from devstack_local_conf import LocalConf
+from collections import OrderedDict
+
+class TestDevstackLocalConf(unittest.TestCase):
+
+    @staticmethod
+    def _init_localconf(p):
+        lc = LocalConf(p.get('localrc'),
+                       p.get('local_conf'),
+                       p.get('base_services'),
+                       p.get('services'),
+                       p.get('plugins'),
+                       p.get('base_dir'),
+                       p.get('projects'),
+                       p.get('project'),
+                       p.get('tempest_plugins'))
+        return lc
+
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+
+    def tearDown(self):
+        shutil.rmtree(self.tmpdir)
+
+    def test_plugins(self):
+        "Test that plugins without dependencies work"
+        localrc = {'test_localrc': '1'}
+        local_conf = {'install':
+                      {'nova.conf':
+                       {'main':
+                        {'test_conf': '2'}}}}
+        services = {'cinder': True}
+        # We use ordereddict here to make sure the plugins are in the
+        # *wrong* order for testing.
+        plugins = OrderedDict([
+            ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
+            ('baz', 'https://git.openstack.org/openstack/baz-plugin'),
+            ])
+        p = dict(localrc=localrc,
+                 local_conf=local_conf,
+                 base_services=[],
+                 services=services,
+                 plugins=plugins,
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'))
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        plugins = []
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('enable_plugin'):
+                    plugins.append(line.split()[1])
+        self.assertEqual(['bar', 'baz', 'foo'], plugins)
+
+
+    def test_plugin_deps(self):
+        "Test that plugins with dependencies work"
+        os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
+        os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
+        os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
+        os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
+        with open(os.path.join(
+                self.tmpdir,
+                'foo-plugin', 'devstack', 'settings'), 'w') as f:
+            f.write('define_plugin foo-plugin\n')
+        with open(os.path.join(
+                self.tmpdir,
+                'bar-plugin', 'devstack', 'settings'), 'w') as f:
+            f.write('define_plugin bar-plugin\n')
+            f.write('plugin_requires bar-plugin foo-plugin\n')
+
+        localrc = {'test_localrc': '1'}
+        local_conf = {'install':
+                      {'nova.conf':
+                       {'main':
+                        {'test_conf': '2'}}}}
+        services = {'cinder': True}
+        # We use ordereddict here to make sure the plugins are in the
+        # *wrong* order for testing.
+        plugins = OrderedDict([
+            ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'),
+            ])
+        p = dict(localrc=localrc,
+                 local_conf=local_conf,
+                 base_services=[],
+                 services=services,
+                 plugins=plugins,
+                 base_dir=self.tmpdir,
+                 path=os.path.join(self.tmpdir, 'test.local.conf'))
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        plugins = []
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('enable_plugin'):
+                    plugins.append(line.split()[1])
+        self.assertEqual(['foo-plugin', 'bar-plugin'], plugins)
+
+    def test_libs_from_git(self):
+        "Test that LIBS_FROM_GIT is auto-generated"
+        projects = {
+            'git.openstack.org/openstack/nova': {
+                'required': True,
+                'short_name': 'nova',
+            },
+            'git.openstack.org/openstack/oslo.messaging': {
+                'required': True,
+                'short_name': 'oslo.messaging',
+            },
+            'git.openstack.org/openstack/devstack-plugin': {
+                'required': False,
+                'short_name': 'devstack-plugin',
+            },
+        }
+        project = {
+            'short_name': 'glance',
+        }
+        p = dict(base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 projects=projects,
+                 project=project)
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        lfg = None
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('LIBS_FROM_GIT'):
+                    lfg = line.strip().split('=')[1]
+        self.assertEqual('nova,oslo.messaging,glance', lfg)
+
+    def test_overridelibs_from_git(self):
+        "Test that LIBS_FROM_GIT can be overridden"
+        localrc = {'LIBS_FROM_GIT': 'oslo.db'}
+        projects = {
+            'git.openstack.org/openstack/nova': {
+                'required': True,
+                'short_name': 'nova',
+            },
+            'git.openstack.org/openstack/oslo.messaging': {
+                'required': True,
+                'short_name': 'oslo.messaging',
+            },
+            'git.openstack.org/openstack/devstack-plugin': {
+                'required': False,
+                'short_name': 'devstack-plugin',
+            },
+        }
+        p = dict(localrc=localrc,
+                 base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 projects=projects)
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        lfg = None
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('LIBS_FROM_GIT'):
+                    lfg = line.strip().split('=')[1]
+        self.assertEqual('"oslo.db"', lfg)
+
+    def test_avoid_double_quote(self):
+        "Test that there a no duplicated quotes"
+        localrc = {'TESTVAR': '"quoted value"'}
+        p = dict(localrc=localrc,
+                 base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 projects={})
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        testvar = None
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('TESTVAR'):
+                    testvar = line.strip().split('=')[1]
+        self.assertEqual('"quoted value"', testvar)
+
+    def test_plugin_circular_deps(self):
+        "Test that plugins with circular dependencies fail"
+        os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
+        os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
+        os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
+        os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
+        with open(os.path.join(
+                self.tmpdir,
+                'foo-plugin', 'devstack', 'settings'), 'w') as f:
+            f.write('define_plugin foo\n')
+            f.write('plugin_requires foo bar\n')
+        with open(os.path.join(
+                self.tmpdir,
+                'bar-plugin', 'devstack', 'settings'), 'w') as f:
+            f.write('define_plugin bar\n')
+            f.write('plugin_requires bar foo\n')
+
+        localrc = {'test_localrc': '1'}
+        local_conf = {'install':
+                      {'nova.conf':
+                       {'main':
+                        {'test_conf': '2'}}}}
+        services = {'cinder': True}
+        # We use ordereddict here to make sure the plugins are in the
+        # *wrong* order for testing.
+        plugins = OrderedDict([
+            ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
+            ])
+        p = dict(localrc=localrc,
+                 local_conf=local_conf,
+                 base_services=[],
+                 services=services,
+                 plugins=plugins,
+                 base_dir=self.tmpdir,
+                 path=os.path.join(self.tmpdir, 'test.local.conf'))
+        with self.assertRaises(Exception):
+            lc = self._init_localconf(p)
+            lc.write(p['path'])
+
+    def _find_tempest_plugins_value(self, file_path):
+        tp = None
+        with open(file_path) as f:
+            for line in f:
+                if line.startswith('TEMPEST_PLUGINS'):
+                    found = line.strip().split('=')[1]
+                    self.assertIsNone(tp,
+                        "TEMPEST_PLUGIN ({}) found again ({})".format(
+                            tp, found))
+                    tp = found
+        return tp
+
+    def test_tempest_plugins(self):
+        "Test that TEMPEST_PLUGINS is correctly populated."
+        p = dict(base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        tp = self._find_tempest_plugins_value(p['path'])
+        self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp)
+        self.assertEqual(len(lc.warnings), 0)
+
+    def test_tempest_plugins_not_overridden(self):
+        """Test that the existing value of TEMPEST_PLUGINS is not overridden
+        by the user-provided value, but a warning is emitted."""
+        localrc = {'TEMPEST_PLUGINS': 'someplugin'}
+        p = dict(localrc=localrc,
+                 base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        tp = self._find_tempest_plugins_value(p['path'])
+        self.assertEqual('"someplugin"', tp)
+        self.assertEqual(len(lc.warnings), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml
index 1d67616..bfd0860 100644
--- a/roles/write-devstack-local-conf/tasks/main.yaml
+++ b/roles/write-devstack-local-conf/tasks/main.yaml
@@ -4,6 +4,11 @@
   devstack_local_conf:
     path: "{{ devstack_local_conf_path }}"
     plugins: "{{ devstack_plugins|default(omit) }}"
+    base_services: "{{ devstack_base_services|default(omit) }}"
     services: "{{ devstack_services|default(omit) }}"
     localrc: "{{ devstack_localrc|default(omit) }}"
     local_conf: "{{ devstack_local_conf|default(omit) }}"
+    base_dir: "{{ devstack_base_dir|default(omit) }}"
+    projects: "{{ zuul.projects }}"
+    project: "{{ zuul.project }}"
+    tempest_plugins: "{{ tempest_plugins|default(omit) }}"
diff --git a/samples/local.sh b/samples/local.sh
index 9cd0bdc..a1c5c81 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -41,6 +41,13 @@
         fi
     done
 
+    # Update security default group
+    # -----------------------------
+
+    # Add tcp/22 and icmp to default security group
+    default=$(openstack security group list -f value -c ID)
+    openstack security group rule create $default --protocol tcp --dst-port 22
+    openstack security group rule create $default --protocol icmp
 
     # Create A Flavor
     # ---------------
@@ -57,12 +64,4 @@
         openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1
     fi
 
-
-    # Other Uses
-    # ----------
-
-    # Add tcp/22 and icmp to default security group
-    openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22
-    openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp
-
 fi
diff --git a/setup.cfg b/setup.cfg
index fcd2b13..a4e621f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,24 +1,12 @@
 [metadata]
 name = DevStack
 summary = OpenStack DevStack
-description-file =
+description_file =
     README.rst
 author = OpenStack
-author-email = openstack-dev@lists.openstack.org
-home-page = https://docs.openstack.org/devstack/latest
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://docs.openstack.org/devstack/latest
 classifier =
     Intended Audience :: Developers
     License :: OSI Approved :: Apache Software License
     Operating System :: POSIX :: Linux
-
-[build_sphinx]
-all_files = 1
-build-dir = doc/build
-source-dir = doc/source
-warning-is-error = 1
-
-[pbr]
-warnerrors = True
-
-[wheel]
-universal = 1
diff --git a/stack.sh b/stack.sh
index c545c56..b5ad81b 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
 # a multi-node developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
-# (16.04 Xenial or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
 # (7 or newer) machine. (It may work on other platforms but support for those
 # platforms is left to those who added them to DevStack.) It should work in
 # a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -32,7 +32,7 @@
 # Devstack is written in bash, and many functions used throughout
 # devstack process text coming off a command (like the ip command)
 # and do transforms using grep, sed, cut, awk on the strings that are
-# returned. Many of these programs are interationalized, which is
+# returned. Many of these programs are internationalized, which is
 # great for end users, but means that the strings that devstack
 # functions depend upon might not be there in other locales. We thus
 # need to pin the world to an english basis during the runs.
@@ -60,6 +60,9 @@
 LC_ALL=en_US.utf8
 export LC_ALL
 
+# Clear all OpenStack related envvars
+unset `env | grep -E '^OS_' | cut -d = -f 1`
+
 # Make sure umask is sane
 umask 022
 
@@ -93,19 +96,25 @@
 # templates and other useful files in the ``files`` subdirectory
 FILES=$TOP_DIR/files
 if [ ! -d $FILES ]; then
-    die $LINENO "missing devstack/files"
+    set +o xtrace
+    echo "missing devstack/files"
+    exit 1
 fi
 
 # ``stack.sh`` keeps function libraries here
 # Make sure ``$TOP_DIR/inc`` directory is present
 if [ ! -d $TOP_DIR/inc ]; then
-    die $LINENO "missing devstack/inc"
+    set +o xtrace
+    echo "missing devstack/inc"
+    exit 1
 fi
 
 # ``stack.sh`` keeps project libraries here
 # Make sure ``$TOP_DIR/lib`` directory is present
 if [ ! -d $TOP_DIR/lib ]; then
-    die $LINENO "missing devstack/lib"
+    set +o xtrace
+    echo "missing devstack/lib"
+    exit 1
 fi
 
 # Check if run in POSIX shell
@@ -164,9 +173,6 @@
 # Import common functions
 source $TOP_DIR/functions
 
-# Import config functions
-source $TOP_DIR/inc/meta-config
-
 # Import 'public' stack.sh functions
 source $TOP_DIR/lib/stack
 
@@ -221,7 +227,9 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|f26|opensuse-42.2|opensuse-42.3|rhel7|kvmibm1) ]]; then
+SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8"
+
+if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -244,7 +252,7 @@
 # --------------
 
 # We're not as **root** so make sure ``sudo`` is available
-is_package_installed sudo || install_package sudo
+is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo
 
 # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
 sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
@@ -282,47 +290,25 @@
 # Some distros need to add repos beyond the defaults provided by the vendor
 # to pick up required packages.
 
-function _install_epel_and_rdo {
-    # NOTE: We always remove and install latest -- some environments
-    # use snapshot images, and if EPEL version updates they break
-    # unless we update them to latest version.
-    if sudo yum repolist enabled epel | grep -q 'epel'; then
-        uninstall_package epel-release || true
+function _install_epel {
+    # epel-release is in extras repo which is enabled by default
+    install_package epel-release
+
+    # RDO repos are not tested with epel and may have incompatibilities so
+    # let's limit the packages fetched from epel to the ones not in RDO repos.
+    sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel
+}
+
+function _install_rdo {
+    if [[ "$TARGET_BRANCH" == "master" ]]; then
+        # rdo-release.el8.rpm points to latest RDO release, use that for master
+        sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+    else
+        # For stable branches use corresponding release rpm
+        rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
+        sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
     fi
-
-    # This trick installs the latest epel-release from a bootstrap
-    # repo, then removes itself (as epel-release installed the
-    # "real" repo).
-    #
-    # You would think that rather than this, you could use
-    # $releasever directly in .repo file we create below.  However
-    # RHEL gives a $releasever of "6Server" which breaks the path;
-    # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
-    cat <<EOF | sudo tee /etc/yum.repos.d/epel-bootstrap.repo
-[epel-bootstrap]
-name=Bootstrap EPEL
-mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=\$basearch
-failovermethod=priority
-enabled=0
-gpgcheck=0
-EOF
-    # Enable a bootstrap repo.  It is removed after finishing
-    # the epel-release installation.
-    is_package_installed yum-utils || install_package yum-utils
-    sudo yum-config-manager --enable epel-bootstrap
-    yum_install epel-release || \
-        die $LINENO "Error installing EPEL repo, cannot continue"
-    sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
-
-    # ... and also optional to be enabled
-    sudo yum-config-manager --enable rhel-7-server-optional-rpms
-
-    # install the lastest RDO
-    is_package_installed rdo-release || yum_install https://rdoproject.org/repos/rdo-release.rpm
-
-    if is_oraclelinux; then
-        sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
-    fi
+    sudo dnf -y update
 }
 
 
@@ -334,9 +320,12 @@
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
-sudo mkdir -p $DEST
-safe_chown -R $STACK_USER $DEST
-safe_chmod 0755 $DEST
+# If directory exists do not modify the permissions.
+if [[ ! -d $DEST ]]; then
+    sudo mkdir -p $DEST
+    safe_chown -R $STACK_USER $DEST
+    safe_chmod 0755 $DEST
+fi
 
 # Destination path for devstack logs
 if [[ -n ${LOGDIR:-} ]]; then
@@ -345,15 +334,20 @@
 
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
-safe_chmod 0755 $DATA_DIR
+if [[ ! -d $DATA_DIR ]]; then
+    sudo mkdir -p $DATA_DIR
+    safe_chown -R $STACK_USER $DATA_DIR
+    safe_chmod 0755 $DATA_DIR
+fi
+
+# Create and/or clean the async state directory
+async_init
 
 # Configure proper hostname
 # Certain services such as rabbitmq require that the local hostname resolves
 # correctly.  Make sure it exists in /etc/hosts so that is always true.
 LOCAL_HOSTNAME=`hostname -s`
-if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then
+if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
@@ -362,25 +356,40 @@
 # to speed things up
 SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
 
-# If we have /etc/nodepool/provider assume we're on a OpenStack CI
-# node, where EPEL is already pointing at our internal mirror and RDO
-# is pre-installed.
-if [[ -f /etc/nodepool/provider ]]; then
-    SKIP_EPEL_INSTALL=True
-    if is_fedora; then
-        # However, EPEL is not enabled by default.
-        sudo yum-config-manager --enable epel
+if [[ $DISTRO == "rhel8" ]]; then
+    # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI
+    # node, where EPEL is installed (but disabled) and already
+    # pointing at our internal mirror
+    if [[ -f /etc/ci/mirror_info.sh ]]; then
+        SKIP_EPEL_INSTALL=True
+        sudo dnf config-manager --set-enabled epel
     fi
-fi
 
-if is_fedora && [[ $DISTRO == "rhel7" ]] && \
-        [[ ${SKIP_EPEL_INSTALL} != True ]]; then
-    _install_epel_and_rdo
+    # PowerTools repo provides libyaml-devel required by devstack itself and
+    # EPEL packages assume that the PowerTools repository is enable.
+    sudo dnf config-manager --set-enabled PowerTools
+
+    # CentOS 8.3 changed the repository name to lower case.
+    sudo dnf config-manager --set-enabled powertools
+
+    if [[ ${SKIP_EPEL_INSTALL} != True ]]; then
+        _install_epel
+    fi
+    # Along with EPEL, CentOS (and a-likes) require some packages only
+    # available in RDO repositories (e.g. OVS, or later versions of
+    # kvm) to run.
+    _install_rdo
+
+    # NOTE(cgoncalves): workaround RHBZ#1154272
+    # dnf fails for non-privileged users when expired_repos.json doesn't exist.
+    # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
+    # Patch: https://github.com/rpm-software-management/dnf/pull/1448
+    echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
 fi
 
 # Ensure python is installed
 # --------------------------
-is_package_installed python || install_package python
+install_python
 
 
 # Configure Logging
@@ -388,6 +397,7 @@
 
 # Set up logging level
 VERBOSE=$(trueorfalse True VERBOSE)
+VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE)
 
 # Draw a spinner so the user knows something is happening
 function spinner {
@@ -453,15 +463,19 @@
     # stdout later.
     exec 3>&1
     if [[ "$VERBOSE" == "True" ]]; then
+        _of_args="-v"
+        if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then
+            _of_args="$_of_args --no-timestamp"
+        fi
         # Set fd 1 and 2 to write the log file
-        exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1
+        exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
         # Set fd 6 to summary log file
-        exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
+        exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
     else
         # Set fd 1 and 2 to primary logfile
-        exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
+        exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
         # Set fd 6 to summary logfile and stdout
-        exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
+        exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
     fi
 
     echo_summary "stack.sh log $LOGFILE"
@@ -478,7 +492,7 @@
         exec 1>/dev/null 2>&1
     fi
     # Always send summary fd to original stdout
-    exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
+    exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 )
 fi
 
 # Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
@@ -514,9 +528,9 @@
             generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
         fi
         if [[ -z $LOGDIR ]]; then
-            $TOP_DIR/tools/worlddump.py
+            ${PYTHON} $TOP_DIR/tools/worlddump.py
         else
-            $TOP_DIR/tools/worlddump.py -d $LOGDIR
+            ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR
         fi
     else
         # If we error before we've installed os-testr, this will fail.
@@ -585,7 +599,9 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/tcpdump
 source $TOP_DIR/lib/etcd3
+source $TOP_DIR/lib/os-vif
 
 # Extras Source
 # --------------
@@ -662,7 +678,14 @@
 # The available database backends are listed in ``DATABASE_BACKENDS`` after
 # ``lib/database`` is sourced. ``mysql`` is the default.
 
-initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
+if initialize_database_backends; then
+    echo "Using $DATABASE_TYPE database backend"
+    # Last chance for the database password. This must be handled here
+    # because read_password is not a library function.
+    read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
+else
+    echo "No database enabled"
+fi
 
 
 # Queue Configuration
@@ -733,37 +756,34 @@
 echo_summary "Installing package prerequisites"
 source $TOP_DIR/tools/install_prereqs.sh
 
-# Configure an appropriate Python environment
+# Configure an appropriate Python environment.
+#
+# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip
+# is very active and changes are not generally reflected in the LTS
+# distros.  This often involves important things like dependency or
+# conflict resolution, and has often been required because the
+# complicated constraints etc. used by openstack have tickled bugs in
+# distro versions of pip.  We want to find these problems as they
+# happen, rather than years later when we try to update our LTS
+# distro.  Whilst it is clear that global installations of upstream
+# pip are less and less common, with virtualenv's being the general
+# approach now; there are a lot of devstack plugins that assume a
+# global install environment.
 if [[ "$OFFLINE" != "True" ]]; then
     PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
 fi
 
+# Do the ugly hacks for broken packages and distros
+source $TOP_DIR/tools/fixup_stuff.sh
+fixup_all
+
 # Install subunit for the subunit output stream
 pip_install -U os-testr
 
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
-
-# Install Python packages into a virtualenv so that we can track them
-if [[ $TRACK_DEPENDS = True ]]; then
-    echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
-    pip_install -U virtualenv
-
-    rm -rf $DEST/.venv
-    virtualenv --system-site-packages $DEST/.venv
-    source $DEST/.venv/bin/activate
-    $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
-fi
-
-# Do the ugly hacks for broken packages and distros
-source $TOP_DIR/tools/fixup_stuff.sh
-
-if [[ "$USE_SYSTEMD" == "True" ]]; then
-    pip_install_gr systemd-python
-    # the default rate limit of 1000 messages / 30 seconds is not
-    # sufficient given how verbose our logging is.
-    iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
-    sudo systemctl restart systemd-journald
-fi
+# the default rate limit of 1000 messages / 30 seconds is not
+# sufficient given how verbose our logging is.
+iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
+sudo systemctl restart systemd-journald
 
 # Virtual Environment
 # -------------------
@@ -771,6 +791,18 @@
 # Install required infra support libraries
 install_infra
 
+# Install bindep
+$VIRTUALENV_CMD $DEST/bindep-venv
+# TODO(ianw) : optionally install from zuul checkout?
+$DEST/bindep-venv/bin/pip install bindep
+export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep
+
+# Install packages as defined in plugin bindep.txt files
+pkgs="$( _get_plugin_bindep_packages )"
+if [[ -n "${pkgs}" ]]; then
+    install_package ${pkgs}
+fi
+
 # Extras Pre-install
 # ------------------
 # Phase: pre-install
@@ -797,6 +829,25 @@
     install_etcd3
 fi
 
+# Setup TLS certs
+# ---------------
+
+# Do this early, before any webservers are set up to ensure
+# we don't run into problems with missing certs when apache
+# is restarted.
+if is_service_enabled tls-proxy; then
+    configure_CA
+    init_CA
+    init_cert
+fi
+
+# Dstat
+# -----
+
+# Install dstat services prerequisites
+install_dstat
+
+
 # Check Out and Install Source
 # ----------------------------
 
@@ -821,18 +872,11 @@
     install_neutronclient
 fi
 
-# Setup TLS certs
-if is_service_enabled tls-proxy; then
-    configure_CA
-    init_CA
-    init_cert
-fi
-
 # Install middleware
 install_keystonemiddleware
 
 if is_service_enabled keystone; then
-    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+    if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
         stack_install_service keystone
         configure_keystone
     fi
@@ -845,12 +889,10 @@
     stack_install_service swift
     configure_swift
 
-    # swift3 middleware to provide S3 emulation to Swift
-    if is_service_enabled swift3; then
+    # s3api middleware to provide S3 emulation to Swift
+    if is_service_enabled s3api; then
         # Replace the nova-objectstore port by the swift port
         S3_SERVICE_PORT=8080
-        git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
-        setup_develop $SWIFT3_DIR
     fi
 fi
 
@@ -894,17 +936,12 @@
 fi
 
 if is_service_enabled horizon; then
-    # django openstack_auth
-    install_django_openstack_auth
     # dashboard
     stack_install_service horizon
 fi
 
 if is_service_enabled tls-proxy; then
     fix_system_ca_bundle_path
-    if python3_enabled ; then
-        fix_system_ca_bundle_path python3
-    fi
 fi
 
 # Extras Install
@@ -925,34 +962,21 @@
 # osc commands. Alias dies with stack.sh.
 install_oscwrap
 
-if [[ $TRACK_DEPENDS = True ]]; then
-    $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
-    if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
-        echo "Detect some changes for installed packages of pip, in depend tracking mode"
-        cat $DEST/requires.diff
-    fi
-    echo "Ran stack.sh in depend tracking mode, bailing out now"
-    exit 0
-fi
-
-
 # Syslog
 # ------
 
 if [[ $SYSLOG != "False" ]]; then
     if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then
         # Configure the master host to receive
-        cat <<EOF >/tmp/90-stack-m.conf
+        cat <<EOF | sudo tee /etc/rsyslog.d/90-stack-m.conf >/dev/null
 \$ModLoad imrelp
 \$InputRELPServerRun $SYSLOG_PORT
 EOF
-        sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d
     else
         # Set rsyslog to send to remote host
-        cat <<EOF >/tmp/90-stack-s.conf
+        cat <<EOF | sudo tee /etc/rsyslog.d/90-stack-s.conf >/dev/null
 *.*		:omrelp:$SYSLOG_HOST:$SYSLOG_PORT
 EOF
-        sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
     fi
 
     RSYSLOGCONF="/etc/rsyslog.conf"
@@ -1006,7 +1030,7 @@
 # be memory bound not cpu bound so enable KSM by default but allow people
 # to opt out if the CPU time is more important to them.
 
-if [[ "ENABLE_KSM" == "True" ]] ; then
+if [[ $ENABLE_KSM == "True" ]] ; then
     if [[ -f /sys/kernel/mm/ksm/run ]] ; then
         sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run"
     fi
@@ -1022,6 +1046,12 @@
 # A better kind of sysstat, with the top process per time slice
 start_dstat
 
+# Run a background tcpdump for debugging
+# Note: must set TCPDUMP_ARGS with the enabled service
+if is_service_enabled tcpdump; then
+    start_tcpdump
+fi
+
 # Etcd
 # -----
 
@@ -1043,7 +1073,7 @@
 
 # Set up password auth credentials now that Keystone is bootstrapped
 export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
 export OS_USERNAME=admin
 export OS_USER_DOMAIN_ID=default
 export OS_PASSWORD=$ADMIN_PASSWORD
@@ -1060,10 +1090,13 @@
 
 source $TOP_DIR/userrc_early
 
+# Write a clouds.yaml file
+write_clouds_yaml
+
 if is_service_enabled keystone; then
     echo_summary "Starting Keystone"
 
-    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+    if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
         init_keystone
         start_keystone
         bootstrap_keystone
@@ -1071,41 +1104,41 @@
 
     create_keystone_accounts
     if is_service_enabled nova; then
-        create_nova_accounts
+        async_runfunc create_nova_accounts
     fi
     if is_service_enabled glance; then
-        create_glance_accounts
+        async_runfunc create_glance_accounts
     fi
     if is_service_enabled cinder; then
-        create_cinder_accounts
+        async_runfunc create_cinder_accounts
     fi
     if is_service_enabled neutron; then
-        create_neutron_accounts
+        async_runfunc create_neutron_accounts
     fi
     if is_service_enabled swift; then
-        create_swift_accounts
+        async_runfunc create_swift_accounts
     fi
 
 fi
 
-# Write a clouds.yaml file
-write_clouds_yaml
-
 # Horizon
 # -------
 
 if is_service_enabled horizon; then
     echo_summary "Configuring Horizon"
-    configure_horizon
+    async_runfunc configure_horizon
 fi
 
+async_wait create_nova_accounts create_glance_accounts create_cinder_accounts
+async_wait create_neutron_accounts create_swift_accounts configure_horizon
 
 # Glance
 # ------
 
-if is_service_enabled g-reg; then
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
     echo_summary "Configuring Glance"
-    init_glance
+    async_runfunc init_glance
 fi
 
 
@@ -1116,16 +1149,18 @@
     echo_summary "Configuring Neutron"
 
     configure_neutron
+
     # Run init_neutron only on the node hosting the Neutron API server
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then
-        init_neutron
+        async_runfunc init_neutron
     fi
 fi
 
+
 # Nova
 # ----
 
-if is_service_enabled n-net q-dhcp; then
+if is_service_enabled q-dhcp; then
     # Delete traces of nova networks from prior runs
     # Do not kill any dnsmasq instance spawned by NetworkManager
     netman_pid=$(pidof NetworkManager || true)
@@ -1137,23 +1172,22 @@
 
     clean_iptables
 
-    if is_service_enabled n-net; then
-        rm -rf ${NOVA_STATE_PATH}/networks
-        sudo mkdir -p ${NOVA_STATE_PATH}/networks
-        safe_chown -R ${STACK_USER} ${NOVA_STATE_PATH}/networks
-    fi
-
     # Force IP forwarding on, just in case
     sudo sysctl -w net.ipv4.ip_forward=1
 fi
 
+# os-vif
+# ------
+if is_service_enabled nova neutron; then
+    configure_os_vif
+fi
 
 # Storage Service
 # ---------------
 
 if is_service_enabled swift; then
     echo_summary "Configuring Swift"
-    init_swift
+    async_runfunc init_swift
 fi
 
 
@@ -1162,9 +1196,23 @@
 
 if is_service_enabled cinder; then
     echo_summary "Configuring Cinder"
-    init_cinder
+    async_runfunc init_cinder
 fi
 
+# Placement Service
+# ---------------
+
+if is_service_enabled placement; then
+    echo_summary "Configuring placement"
+    async_runfunc init_placement
+fi
+
+# Wait for neutron and placement before starting nova
+async_wait init_neutron
+async_wait init_placement
+async_wait init_glance
+async_wait init_swift
+async_wait init_cinder
 
 # Compute Service
 # ---------------
@@ -1174,18 +1222,11 @@
     init_nova
 
     # Additional Nova configuration that is dependent on other services
+    # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
+    # not, remove the if here
     if is_service_enabled neutron; then
-        configure_neutron_nova
-    elif is_service_enabled n-net; then
-        create_nova_conf_nova_network
+        async_runfunc configure_neutron_nova
     fi
-
-    init_nova_cells
-fi
-
-if is_service_enabled placement; then
-    echo_summary "Configuring placement"
-    init_placement
 fi
 
 
@@ -1215,42 +1256,23 @@
     start_swift
 fi
 
-# Launch the Glance services
-if is_service_enabled glance; then
-    echo_summary "Starting Glance"
-    start_glance
+# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack
+# deployments.  This ensures the keys match across nova and cinder across all
+# hosts.
+FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec}
+if is_service_enabled cinder; then
+    iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
 fi
 
+async_wait configure_neutron_nova
 
-# Install Images
-# ==============
-
-# Upload an image to Glance.
-#
-# The default image is CirrOS, a small testing image which lets you login as **root**
-# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending
-# scripts as userdata.
-# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
-
-if is_service_enabled g-reg; then
-
-    echo_summary "Uploading images"
-
-    for image_url in ${IMAGE_URLS//,/ }; do
-        upload_image $image_url
-    done
-fi
-
-# Create a randomized default value for the key manager's fixed_key
-# NOTE(lyarwood): This is currently set to 36 as a workaround to the following
-# libvirt bug that incorrectly pads passphrases that are a multiple of 16 bytes
-# in length.
-# Unable to use LUKS passphrase that is exactly 16 bytes long
-# https://bugzilla.redhat.com/show_bug.cgi?id=1447297
+# NOTE(clarkb): This must come after async_wait configure_neutron_nova because
+# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If
+# we don't wait then these two ini updates race either other and can result
+# in unexpected configs.
 if is_service_enabled nova; then
-    key=$(generate_hex_string 36)
-    iniset $NOVA_CONF key_manager fixed_key "$key"
-    iniset $NOVA_CPU_CONF key_manager fixed_key "$key"
+    iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY"
+    iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY"
 fi
 
 # Launch the nova-api and wait for it to answer before continuing
@@ -1259,6 +1281,11 @@
     start_nova_api
 fi
 
+if is_service_enabled ovn-controller ovn-controller-vtep; then
+    echo_summary "Starting OVN services"
+    start_ovn_services
+fi
+
 if is_service_enabled neutron-api; then
     echo_summary "Starting Neutron"
     start_neutron_api
@@ -1266,20 +1293,6 @@
     echo_summary "Starting Neutron"
     configure_neutron_after_post_config
     start_neutron_service_and_check
-elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
-    NM_CONF=${NOVA_CONF}
-    if is_service_enabled n-cell; then
-        NM_CONF=${NOVA_CELLS_CONF}
-    fi
-
-    # Create a small network
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
-
-    # Create some floating ips
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
-
-    # Create a second pool
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
 # Start placement before any of the service that are likely to want
@@ -1295,13 +1308,20 @@
 # Once neutron agents are started setup initial network elements
 if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
     echo_summary "Creating initial neutron network elements"
-    create_neutron_initial_network
+    # Here's where plugins can wire up their own networks instead
+    # of the code in lib/neutron_plugins/services/l3
+    if type -p neutron_plugin_create_initial_networks > /dev/null; then
+        neutron_plugin_create_initial_networks
+    else
+        create_neutron_initial_network
+    fi
+
 fi
 
 if is_service_enabled nova; then
     echo_summary "Starting Nova"
     start_nova
-    create_flavors
+    async_runfunc create_flavors
 fi
 if is_service_enabled cinder; then
     echo_summary "Starting Cinder"
@@ -1309,6 +1329,41 @@
     create_volume_types
 fi
 
+# This sleep is required for cinder volume service to become active and
+# publish capabilities to cinder scheduler before creating the image-volume
+if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+    sleep 30
+fi
+
+# Launch the Glance services
+# NOTE (abhishekk): We need to start glance api service only after cinder
+# service has started as on glance startup glance-api queries cinder for
+# validating volume_type configured for cinder store of glance.
+if is_service_enabled glance; then
+    echo_summary "Starting Glance"
+    start_glance
+fi
+
+# Install Images
+# ==============
+
+# Upload an image to Glance.
+#
+# The default image is CirrOS, a small testing image which lets you login as **root**
+# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending
+# scripts as userdata.
+# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
+
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
+    echo_summary "Uploading images"
+
+    for image_url in ${IMAGE_URLS//,/ }; do
+        upload_image $image_url
+    done
+fi
+
+async_wait create_flavors
 
 if is_service_enabled horizon; then
     echo_summary "Starting Horizon"
@@ -1365,15 +1420,6 @@
 merge_config_group $TOP_DIR/local.conf post-extra
 
 
-# Run local script
-# ----------------
-
-# Run ``local.sh`` if it exists to perform user-managed tasks
-if [[ -x $TOP_DIR/local.sh ]]; then
-    echo "Running user script $TOP_DIR/local.sh"
-    $TOP_DIR/local.sh
-fi
-
 # Sanity checks
 # =============
 
@@ -1387,11 +1433,6 @@
 # Check the status of running services
 service_check
 
-# ensure that all the libraries we think we installed from git,
-# actually were.
-check_libs_from_git
-
-
 # Configure nova cellsv2
 # ----------------------
 
@@ -1406,13 +1447,31 @@
         # environment is up.
         echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment."
     fi
+    # Run the nova-status upgrade check command which can also be used
+    # to verify the base install. Note that this is good enough in a
+    # single node deployment, but in a multi-node setup it won't verify
+    # any subnodes - that would have to be driven from whatever tooling
+    # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job.
+    $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check
+fi
+
+# Run local script
+# ----------------
+
+# Run ``local.sh`` if it exists to perform user-managed tasks
+if [[ -x $TOP_DIR/local.sh ]]; then
+    echo "Running user script $TOP_DIR/local.sh"
+    $TOP_DIR/local.sh
 fi
 
 # Bash completion
 # ===============
 
 # Prepare bash completion for OSC
-openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+# Note we use "command" to avoid the timing wrapper
+# which isn't relevant here and floods logs
+command openstack complete \
+    | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
 
 # If cinder is configured, set global_filter for PV devices
 if is_service_enabled cinder; then
@@ -1448,8 +1507,12 @@
     exec 1>&3
 fi
 
+# Make sure we didn't leak any background tasks
+async_cleanup
+
 # Dump out the time totals
 time_totals
+async_print_timing
 
 # Using the cloud
 # ===============
@@ -1482,14 +1545,11 @@
     echo
 fi
 
-# If USE_SYSTEMD is enabled, tell the user about using it.
-if [[ "$USE_SYSTEMD" == "True" ]]; then
-    echo
-    echo "Services are running under systemd unit files."
-    echo "For more information see: "
-    echo "https://docs.openstack.org/devstack/latest/systemd.html"
-    echo
-fi
+echo
+echo "Services are running under systemd unit files."
+echo "For more information see: "
+echo "https://docs.openstack.org/devstack/latest/systemd.html"
+echo
 
 # Useful info on current state
 cat /etc/devstack-version
diff --git a/stackrc b/stackrc
old mode 100644
new mode 100755
index ffe4050..ebe472c
--- a/stackrc
+++ b/stackrc
@@ -13,6 +13,18 @@
 # Source required DevStack functions and globals
 source $RC_DIR/functions
 
+# Set the target branch. This is used so that stable branching
+# does not need to update each repo below.
+TARGET_BRANCH=master
+
+# Cycle trailing projects need to branch later than the others.
+TRAILING_TARGET_BRANCH=master
+
+# And some repos do not create stable branches, so this is used
+# to make it explicit and avoid accidentally setting to a stable
+# branch.
+BRANCHLESS_TARGET_BRANCH=master
+
 # Destination path for installation
 DEST=/opt/stack
 
@@ -53,15 +65,17 @@
     # Keystone - nothing works without keystone
     ENABLED_SERVICES=key
     # Nova - services to support libvirt based openstack clouds
-    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth,n-api-meta
+    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta
     # Placement service needed for Nova
     ENABLED_SERVICES+=,placement-api,placement-client
     # Glance services needed for Nova
-    ENABLED_SERVICES+=,g-api,g-reg
+    ENABLED_SERVICES+=,g-api
     # Cinder
     ENABLED_SERVICES+=,c-sch,c-api,c-vol
+    # OVN
+    ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server
     # Neutron
-    ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+    ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent
     # Dashboard
     ENABLED_SERVICES+=,horizon
     # Additional services
@@ -77,6 +91,15 @@
 # Set the default Nova APIs to enable
 NOVA_ENABLED_APIS=osapi_compute,metadata
 
+# allow local overrides of env variables, including repo config
+if [[ -f $RC_DIR/localrc ]]; then
+    # Old-style user-supplied config
+    source $RC_DIR/localrc
+elif [[ -f $RC_DIR/.localrc.auto ]]; then
+    # New-style user-supplied config extracted from local.conf
+    source $RC_DIR/.localrc.auto
+fi
+
 # CELLSV2_SETUP - how we should configure services with cells v2
 #
 # - superconductor - this is one conductor for the api services, and
@@ -88,9 +111,7 @@
 # Set the root URL for Horizon
 HORIZON_APACHE_ROOT="/dashboard"
 
-# Whether to use SYSTEMD to manage services, we only do this from
-# Queens forward.
-USE_SYSTEMD="True"
+# Whether to use user specific units for running services or global ones.
 USER_UNITS=$(trueorfalse False USER_UNITS)
 if [[ "$USER_UNITS" == "True" ]]; then
     SYSTEMD_DIR="$HOME/.local/share/systemd/user"
@@ -115,36 +136,17 @@
 fi
 
 # Control whether Python 3 should be used at all.
-export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
+# TODO(frickler): Drop this when all consumers are fixed
+export USE_PYTHON3=True
 
-# Control whether Python 3 is enabled for specific services by the
-# base name of the directory from which they are installed. See
-# enable_python3_package to edit this variable and use_python3_for to
-# test membership.
-export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient"
-
-# Explicitly list services not to run under Python 3. See
-# disable_python3_package to edit this variable.
-export DISABLED_PYTHON3_PACKAGES=""
-
-# When Python 3 is supported by an application, adding the specific
-# version of Python 3 to this variable will install the app using that
-# version of the interpreter instead of 2.7.
+# Adding the specific version of Python 3 to this variable will install
+# the app using that version of the interpreter instead of just 3.
 _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
-export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
+export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}}
 
-# Just to be more explicit on the Python 2 version to use.
-_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
-export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
-
-# allow local overrides of env variables, including repo config
-if [[ -f $RC_DIR/localrc ]]; then
-    # Old-style user-supplied config
-    source $RC_DIR/localrc
-elif [[ -f $RC_DIR/.localrc.auto ]]; then
-    # New-style user-supplied config extracted from local.conf
-    source $RC_DIR/.localrc.auto
-fi
+# Create a virtualenv with this
+# Use the built-in venv to avoid more dependencies
+export VIRTUALENV_CMD="python3 -m venv"
 
 # Default for log coloring is based on interactive-or-not.
 # Baseline assumption is that non-interactive invocations are for CI,
@@ -224,11 +226,10 @@
 # ------------
 
 # Base GIT Repo URL
-# Another option is https://git.openstack.org
-GIT_BASE=${GIT_BASE:-git://git.openstack.org}
+GIT_BASE=${GIT_BASE:-https://opendev.org}
 
 # The location of REQUIREMENTS once cloned
-REQUIREMENTS_DIR=$DEST/requirements
+REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements}
 
 # Which libraries should we install from git instead of using released
 # versions on pypi?
@@ -246,7 +247,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="queens"
+DEVSTACK_SERIES="yoga"
 
 ##############
 #
@@ -256,35 +257,35 @@
 
 # block storage service
 CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
-CINDER_BRANCH=${CINDER_BRANCH:-master}
+CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH}
 
 # image catalog service
 GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
-GLANCE_BRANCH=${GLANCE_BRANCH:-master}
+GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH}
 
 # django powered web control panel for openstack
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
-HORIZON_BRANCH=${HORIZON_BRANCH:-master}
+HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH}
 
 # unified auth system (manages accounts/tokens)
 KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
-KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
+KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH}
 
 # neutron service
 NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
-NEUTRON_BRANCH=${NEUTRON_BRANCH:-master}
-
-# neutron fwaas service
-NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
-NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
+NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH}
 
 # compute service
 NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
-NOVA_BRANCH=${NOVA_BRANCH:-master}
+NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH}
 
 # object storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
-SWIFT_BRANCH=${SWIFT_BRANCH:-master}
+SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH}
+
+# placement service
+PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git}
+PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH}
 
 ##############
 #
@@ -294,11 +295,12 @@
 
 # consolidated openstack requirements
 REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
-REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
+REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH}
 
 # Tempest test suite
 TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
-TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
+TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master}
 
 
 ##############
@@ -310,56 +312,56 @@
 
 # volume client
 GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
-GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master}
+GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
 
 # os-brick client for local volume attachement
 GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git}
-GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master}
+GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
 
 # python barbican client library
 GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git}
-GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-master}
+GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH}
 GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient
 
 # python glance client library
 GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
-GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master}
+GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH}
 
 # ironic client
 GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
-GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
+GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH}
 # ironic plugin is out of tree, but nova uses it. set GITDIR here.
 GITDIR["python-ironicclient"]=$DEST/python-ironicclient
 
 # the base authentication plugins that clients use to authenticate
 GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
-GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master}
+GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH}
 
 # python keystone client library to nova that horizon uses
 GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
-GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master}
+GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH}
 
 # neutron client
 GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
-GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-master}
+GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH}
 
 # python client library to nova that horizon (and others) use
 GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
-GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master}
+GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH}
 
 # python swift client library
 GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
-GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
+GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH}
 
 # consolidated openstack python client
 GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
-GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master}
+GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH}
 # this doesn't exist in a lib file, so set it here
 GITDIR["python-openstackclient"]=$DEST/python-openstackclient
 
 # placement-api CLI
 GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git}
-GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-master}
+GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH}
 
 
 ###################
@@ -371,119 +373,119 @@
 
 # castellan key manager interface
 GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git}
-GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-master}
+GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH}
 
 # cliff command line framework
 GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
-GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
+GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH}
 
 # async framework/helpers
 GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git}
-GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master}
+GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH}
 
 # debtcollector deprecation framework/helpers
 GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
-GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master}
+GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH}
 
 # helpful state machines
 GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git}
-GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master}
+GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH}
 
 # oslo.cache
 GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git}
-GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master}
+GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH}
 
 # oslo.concurrency
 GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
-GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
+GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH}
 
 # oslo.config
 GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
-GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-master}
+GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH}
 
 # oslo.context
 GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git}
-GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-master}
+GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH}
 
 # oslo.db
 GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git}
-GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-master}
+GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH}
 
 # oslo.i18n
 GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
-GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-master}
+GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH}
 
 # oslo.log
 GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
-GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-master}
+GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH}
 
 # oslo.messaging
 GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
-GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-master}
+GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH}
 
 # oslo.middleware
 GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
-GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master}
+GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH}
 
 # oslo.policy
 GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
-GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH}
 
 # oslo.privsep
 GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
-GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master}
+GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH}
 
 # oslo.reports
 GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
-GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
+GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH}
 
 # oslo.rootwrap
 GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
-GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
+GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH}
 
 # oslo.serialization
 GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
-GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master}
+GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH}
 
 # oslo.service
 GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git}
-GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master}
+GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH}
 
 # oslo.utils
 GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
-GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master}
+GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH}
 
 # oslo.versionedobjects
 GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git}
-GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master}
+GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH}
 
 # oslo.vmware
 GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
-GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master}
+GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH}
 
 # osprofiler
 GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git}
-GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-master}
+GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH}
 
 # pycadf auditing library
 GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
-GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master}
+GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH}
 
 # stevedore plugin manager
 GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
-GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-master}
+GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH}
 
 # taskflow plugin manager
 GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
-GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-master}
+GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH}
 
 # tooz plugin manager
 GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git}
-GITBRANCH["tooz"]=${TOOZ_BRANCH:-master}
+GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH}
 
 # pbr drives the setuptools configs
-GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
-GITBRANCH["pbr"]=${PBR_BRANCH:-master}
+GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git}
+GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 
 
 ##################
@@ -494,69 +496,75 @@
 
 # cursive library
 GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git}
-GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master}
+GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH}
 
 # glance store library
 GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
-GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master}
-
-# django openstack_auth library
-GITREPO["django_openstack_auth"]=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git}
-GITBRANCH["django_openstack_auth"]=${HORIZONAUTH_BRANCH:-master}
+GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH}
 
 # keystone middleware
 GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git}
-GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master}
-
-# s3 support for swift
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
-SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
+GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
 
 # ceilometer middleware
 GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
-GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master}
+GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
 GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
 
+# openstacksdk OpenStack Python SDK
+GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git}
+GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH}
+
 # os-brick library to manage local volume attaches
 GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
-GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master}
+GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH}
 
 # os-client-config to manage clouds.yaml and friends
 GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git}
-GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-master}
+GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH}
 GITDIR["os-client-config"]=$DEST/os-client-config
 
 # os-vif library to communicate between Neutron to Nova
 GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git}
-GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master}
+GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH}
 
 # osc-lib OpenStackClient common lib
 GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git}
-GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master}
-
-# python-openstacksdk OpenStack Python SDK
-GITREPO["python-openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/python-openstacksdk.git}
-GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-master}
+GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH}
 
 # ironic common lib
 GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
-GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master}
+GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH}
 # this doesn't exist in a lib file, so set it here
 GITDIR["ironic-lib"]=$DEST/ironic-lib
 
 # diskimage-builder tool
 GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-master}
+GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 GITDIR["diskimage-builder"]=$DEST/diskimage-builder
 
 # neutron-lib library containing neutron stable non-REST interfaces
 GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git}
-GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master}
+GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH}
 GITDIR["neutron-lib"]=$DEST/neutron-lib
 
+# os-resource-classes library containing a list of standardized resource classes for OpenStack
+GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git}
+GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH}
+
 # os-traits library for resource provider traits in the placement service
 GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git}
-GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-master}
+GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH}
+
+# ovsdbapp used by neutron
+GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git}
+GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH}
+GITDIR["ovsdbapp"]=$DEST/ovsdbapp
+
+# os-ken used by neutron
+GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git}
+GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
+GITDIR["os-ken"]=$DEST/os-ken
 
 ##################
 #
@@ -566,19 +574,19 @@
 
 # run-parts script required by os-refresh-config
 DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
-DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-master}
+DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 
 # os-apply-config configuration template tool
 OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
-OAC_BRANCH=${OAC_BRANCH:-master}
+OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
 
 # os-collect-config configuration agent
 OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
-OCC_BRANCH=${OCC_BRANCH:-master}
+OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
 
 # os-refresh-config configuration run-parts tool
 ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
-ORC_BRANCH=${ORC_BRANCH:-master}
+ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
 
 
 #################
@@ -591,27 +599,32 @@
 
 # ironic python agent
 IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
-IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
+IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
 
 # a websockets/html5 or flash powered SPICE console for vm instances
 SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
-SPICE_BRANCH=${SPICE_BRANCH:-master}
+SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 
+# Global flag used to configure Tempest and potentially other services if
+# volume multiattach is supported. In Queens, only the libvirt compute driver
+# and lvm volume driver support multiattach, and qemu must be less than 2.10
+# or libvirt must be greater than or equal to 3.10.
+ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH)
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
-# also install an **LXC**, **OpenVZ** or **XenAPI** based system.  If xenserver-core
-# is installed, the default will be XenAPI
+# also install an **LXC** or **OpenVZ** based system.
 DEFAULT_VIRT_DRIVER=libvirt
-is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
 VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
 case "$VIRT_DRIVER" in
     ironic|libvirt)
         LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
+        LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom}
+        LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem}
         if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
             # The groups change with newer libvirt. Older Ubuntu used
             # 'libvirtd', but now uses libvirt like Debian. Do a quick check
@@ -631,21 +644,10 @@
     fake)
         NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
         ;;
-    xenserver)
-        # Xen config common to nova and neutron
-        XENAPI_USER=${XENAPI_USER:-"root"}
-        # This user will be used for dom0 - domU communication
-        #   should be able to log in to dom0 without a password
-        #   will be used to install the plugins
-        DOMZERO_USER=${DOMZERO_USER:-"domzero"}
-        ;;
     *)
         ;;
 esac
 
-# By default, devstack will use Ubuntu Cloud Archive.
-ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE)
-
 # Images
 # ------
 
@@ -668,7 +670,7 @@
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
-CIRROS_VERSION=${CIRROS_VERSION:-"0.3.5"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
 CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -696,11 +698,6 @@
             DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
             DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME}
             IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";;
-        xenserver)
-            DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk}
-            DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
-            IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
-            IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
         fake)
             # Use the same as the default for libvirt
             DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
@@ -719,12 +716,12 @@
 EXTRA_CACHE_URLS=""
 
 # etcd3 defaults
-ETCD_VERSION=${ETCD_VERSION:-v3.1.10}
-ETCD_SHA256_AMD64="2d335f298619c6fb02b1124773a56966e448ad9952b26fea52909da4fe80d2be"
-# NOTE(sdague): etcd v3.1.10 doesn't have anything for these architectures, though 3.2.x does.
-ETCD_SHA256_ARM64=""
-ETCD_SHA256_PPC64=""
-ETCD_SHA256_S390X=""
+ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
+# etcd v3.2.x doesn't have anything for s390x
+ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
 # Make sure etcd3 downloads the correct architecture
 if is_arch "x86_64"; then
     ETCD_ARCH="amd64"
@@ -748,13 +745,19 @@
 else
     exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
 fi
-ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
+ETCD_PORT=${ETCD_PORT:-2379}
+ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380}
+ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download}
 ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
 ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
 ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
 # etcd is always required, so place it into list of pre-cached downloads
 EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION"
 
+# Cache settings
+CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"}
+MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"}
+
 # Detect duplicate values in IMAGE_URLS
 for image_url in ${IMAGE_URLS//,/ }; do
     if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then
@@ -762,8 +765,8 @@
     fi
 done
 
-# 10Gb default volume backing file size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
+# 30Gb default volume backing file size
+VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G}
 
 # Prefixes for volume and instance names
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
@@ -785,28 +788,20 @@
 # the memory used where there are a large number of CPUs present
 # (the default number of workers for many services is the number of CPUs)
 # Also sets the minimum number of workers to 2.
-if [[ "$VIRT_DRIVER" = 'fake' ]]; then
-    # we need more workers for the large ops job
-    API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))}
-else
-    API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))}
-fi
+API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))}
 
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
+# Timeout for compute node registration in Nova
+NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT}
+
 # Service graceful shutdown timeout
 SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
 
 # Service graceful shutdown timeout
 WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
 
-# Support alternative yum -- in future Fedora 'dnf' will become the
-# only supported installer, but for now 'yum' and 'dnf' are both
-# available in parallel with compatible CLIs.  Allow manual switching
-# till we get to the point we need to handle this automatically
-YUM=${YUM:-yum}
-
 # Common Configuration
 # --------------------
 
@@ -830,7 +825,6 @@
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
 IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
 FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
-FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
 HOST_IPV6=${HOST_IPV6:-}
@@ -871,10 +865,10 @@
 
     DEF_SERVICE_HOST=[$HOST_IPV6]
     DEF_SERVICE_LOCAL_HOST=::1
-    DEF_SERVICE_LISTEN_ADDRESS=::
+    DEF_SERVICE_LISTEN_ADDRESS="[::]"
 fi
 
-# This is either 0.0.0.0 for IPv4 or :: for IPv6
+# This is either 0.0.0.0 for IPv4 or [::] for IPv6
 SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}}
 
 # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for
@@ -926,7 +920,6 @@
 fi
 
 # ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs
-# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks
 
 # System-wide ulimit file descriptors override
 ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048}
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index adf20cd..08143d2 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -272,7 +272,7 @@
 
     export_proxy_variables
     expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
-    results=$(env | egrep '(http(s)?|no)_proxy=')
+    results=$(env | egrep '(http(s)?|no)_proxy=' | sort)
     if [[ $expected = $results ]]; then
         passed "OK: Proxy variables are exported when proxy variables are set"
     else
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index a5e1107..6ed1647 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -44,6 +44,9 @@
 multi = foo1
 multi = foo2
 
+[key_with_spaces]
+rgw special key = something
+
 # inidelete(a)
 [del_separate_options]
 a=b
@@ -82,8 +85,9 @@
 
 # test iniget_sections
 VAL=$(iniget_sections "${TEST_INI}")
-assert_equal "$VAL" "default aaa bbb ccc ddd eee del_separate_options \
-del_same_option del_missing_option del_missing_option_multi del_no_options"
+assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \
+del_separate_options del_same_option del_missing_option \
+del_missing_option_multi del_no_options"
 
 # Test with missing arguments
 BEFORE=$(cat ${TEST_INI})
@@ -121,14 +125,14 @@
 assert_equal "$VAL" "33,44" "inset at EOF"
 
 # test empty option
-if ini_has_option ${TEST_INI} ddd empty; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
     passed "ini_has_option: ddd.empty present"
 else
     failed "ini_has_option failed: ddd.empty not found"
 fi
 
 # test non-empty option
-if ini_has_option ${TEST_INI} bbb handlers; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} bbb handlers; then
     passed "ini_has_option: bbb.handlers present"
 else
     failed "ini_has_option failed: bbb.handlers not found"
@@ -209,6 +213,20 @@
 VAL=$(iniget ${INI_TMP_ETC_DIR}/test.new.ini test foo)
 assert_equal "$VAL" "bar" "iniset created file"
 
+# test creation of keys with spaces
+iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key" somethingelse
+VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key")
+assert_equal "$VAL" "somethingelse" "iniset created a key with spaces"
+
+# test update of keys with spaces
+iniset ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw special key" newvalue
+VAL=$(iniget ${TEST_INI} key_with_spaces "rgw special key")
+assert_equal "$VAL" "newvalue" "iniset updated a key with spaces"
+
+inidelete ${SUDO_ARG} ${TEST_INI} key_with_spaces "rgw another key"
+VAL=$(iniget ${TEST_INI} key_with_spaces "rgw another key")
+assert_empty VAL "inidelete removed a key with spaces"
+
 $SUDO rm -rf ${INI_TMP_DIR}
 
 report_results
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 0bd8d49..ce1b344 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -35,16 +35,16 @@
 ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore"
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
-ALL_LIBS+=" oslo.serialization django_openstack_auth"
+ALL_LIBS+=" oslo.serialization"
 ALL_LIBS+=" python-openstackclient osc-lib osc-placement"
 ALL_LIBS+=" os-client-config oslo.rootwrap"
-ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient"
+ALL_LIBS+=" oslo.i18n oslo.utils openstacksdk python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive"
 ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
 ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
-ALL_LIBS+=" castellan python-barbicanclient"
+ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tests/test_python.sh b/tests/test_python.sh
deleted file mode 100755
index 8652798..0000000
--- a/tests/test_python.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Tests for DevStack INI functions
-
-TOP=$(cd $(dirname "$0")/.. && pwd)
-
-source $TOP/functions-common
-source $TOP/inc/python
-
-source $TOP/tests/unittest.sh
-
-echo "Testing Python 3 functions"
-
-# Initialize variables manipulated by functions under test.
-export ENABLED_PYTHON3_PACKAGES=""
-export DISABLED_PYTHON3_PACKAGES=""
-
-assert_false "should not be enabled yet" python3_enabled_for testpackage1
-
-enable_python3_package testpackage1
-assert_equal "$ENABLED_PYTHON3_PACKAGES" "testpackage1"  "unexpected result"
-assert_true "should be enabled" python3_enabled_for testpackage1
-
-assert_false "should not be disabled yet" python3_disabled_for testpackage2
-
-disable_python3_package testpackage2
-assert_equal "$DISABLED_PYTHON3_PACKAGES" "testpackage2"  "unexpected result"
-assert_true "should be disabled" python3_disabled_for testpackage2
-
-report_results
diff --git a/tests/test_refs.sh b/tests/test_refs.sh
index 65848cd..0f9aa4a 100755
--- a/tests/test_refs.sh
+++ b/tests/test_refs.sh
@@ -15,10 +15,10 @@
 
 echo "Ensuring we don't have crazy refs"
 
-REFS=`grep BRANCH stackrc | grep -v -- '-master' | grep -v 'NOVNC_BRANCH'`
+REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'`
 rc=$?
 if [[ $rc -eq 0 ]]; then
-    echo "Branch defaults must be master. Found:"
+    echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:"
     echo $REFS
     exit 1
 fi
diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh
index f407d40..9196525 100755
--- a/tests/test_worlddump.sh
+++ b/tests/test_worlddump.sh
@@ -8,7 +8,7 @@
 
 OUT_DIR=$(mktemp -d)
 
-$TOP/tools/worlddump.py -d $OUT_DIR
+${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR
 
 if [[ $? -ne 0 ]]; then
     fail "worlddump failed"
diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh
new file mode 100755
index 0000000..71d8d51
--- /dev/null
+++ b/tests/test_write_devstack_local_conf_role.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+source $TOP/tests/unittest.sh
+
+${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 3703ece..fced2ab 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -17,6 +17,8 @@
 PASS=0
 FAILED_FUNCS=""
 
+export PYTHON=$(which python3 2>/dev/null)
+
 # pass a test, printing out MSG
 #  usage: passed message
 function passed {
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
deleted file mode 100644
index c280267..0000000
--- a/tools/cap-pip.txt
+++ /dev/null
@@ -1 +0,0 @@
-pip!=8
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index c0b7ac7..919cacb 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -32,7 +32,7 @@
 source $TOP_DIR/stackrc
 
 # Give the non-root user the ability to run as **root** via ``sudo``
-is_package_installed sudo || install_package sudo
+is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo
 
 [[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting."
 
diff --git a/tools/debug_function.sh b/tools/debug_function.sh
new file mode 100755
index 0000000..68bd85d
--- /dev/null
+++ b/tools/debug_function.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# This is a small helper to speed development and debug with devstack.
+# It is intended to help you run a single function in a project module
+# without having to re-stack.
+#
+# For example, to run the just start_glance function, do this:
+#
+#   ./tools/debug_function.sh glance start_glance
+
+if [ ! -f "lib/$1" ]; then
+    echo "Usage: $0 [project] [function] [function...]"
+fi
+
+source stackrc
+source lib/$1
+shift
+set -x
+while [ "$1" ]; do
+    echo ==== Running $1 ====
+    $1
+    echo ==== Done with $1 ====
+    shift
+done
diff --git a/tools/dstat.sh b/tools/dstat.sh
index 01c6d9b..e6cbb0f 100755
--- a/tools/dstat.sh
+++ b/tools/dstat.sh
@@ -12,8 +12,17 @@
 # Retrieve log directory as argument from calling script.
 LOGDIR=$1
 
+DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem"
+if dstat --version | grep -q 'pcp-dstat' ; then
+    # dstat is unmaintained, and moving to a plugin of performance
+    # co-pilot.  Fedora 29 for example has rolled this out.  It's
+    # mostly compatible, except for a few options which are not
+    # implemented (yet?)
+    DSTAT_TOP_OPTS=""
+fi
+
 # Command line arguments for primary DStat process.
-DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap --tcp"
+DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp"
 
 # Command-line arguments for secondary background DStat process.
 DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log"
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index efe0125..fe5dafa 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -5,16 +5,6 @@
 # fixup_stuff.sh
 #
 # All distro and package specific hacks go in here
-#
-# - prettytable 0.7.2 permissions are 600 in the package and
-#   pip 1.4 doesn't fix it (1.3 did)
-#
-# - httplib2 0.8 permissions are 600 in the package and
-#   pip 1.4 doesn't fix it (1.3 did)
-#
-# - Fedora:
-#   - set selinux not enforcing
-#   - uninstall firewalld (f20 only)
 
 
 # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
@@ -36,106 +26,13 @@
     FILES=$TOP_DIR/files
 fi
 
-# Keystone Port Reservation
-# -------------------------
-# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
-# being used as ephemeral ports by the system. The default(s) are 35357 and
-# 35358 which are in the Linux defined ephemeral port range (in disagreement
-# with the IANA ephemeral port range). This is a workaround for bug #1253482
-# where Keystone will try and bind to the port and the port will already be
-# in use as an ephemeral port by another process. This places an explicit
-# exception into the Kernel for the Keystone AUTH ports.
-keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-
-# Only do the reserved ports when available, on some system (like containers)
-# where it's not exposed we are almost pretty sure these ports would be
-# exclusive for our DevStack.
-if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
-    # Get any currently reserved ports, strip off leading whitespace
-    reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
-
-    if [[ -z "${reserved_ports}" ]]; then
-        # If there are no currently reserved ports, reserve the keystone ports
-        sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
-    else
-        # If there are currently reserved ports, keep those and also reserve the
-        # Keystone specific ports. Duplicate reservations are merged into a single
-        # reservation (or range) automatically by the kernel.
-        sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
-    fi
-else
-    echo_summary "WARNING: unable to reserve keystone ports"
-fi
-
-# Ubuntu Cloud Archive
-#---------------------
-# We've found that Libvirt on Xenial is flaky and crashes enough to be
-# a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to
-# get newer Libvirt.
-# Make it possible to switch this based on an environment variable as
-# libvirt 2.5.0 doesn't handle nested virtualization quite well and this
-# is required for the trove development environment.
-if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; then
-    # This pulls in apt-add-repository
-    install_package "software-properties-common"
-    # Use UCA for newer libvirt. Should give us libvirt 2.5.0.
-    if [[ -f /etc/ci/mirror_info.sh ]] ; then
-        # If we are on a nodepool provided host and it has told us about where
-        # we can find local mirrors then use that mirror.
-        source /etc/ci/mirror_info.sh
-
-        sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main"
-    else
-        # Otherwise use upstream UCA
-        sudo add-apt-repository -y cloud-archive:pike
-    fi
-
-    # Disable use of libvirt wheel since a cached wheel build might be
-    # against older libvirt binary.  Particularly a problem if using
-    # the openstack wheel mirrors, but can hit locally too.
-    # TODO(clarkb) figure out how to use upstream wheel again.
-    iniset -sudo /etc/pip.conf "global" "no-binary" "libvirt-python"
-
-    # Force update our APT repos, since we added UCA above.
-    REPOS_UPDATED=False
-    apt_get_update
-fi
-
-
 # Python Packages
 # ---------------
 
-# get_package_path python-package    # in import notation
-function get_package_path {
-    local package=$1
-    echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])")
-}
-
-
-# Pre-install affected packages so we can fix the permissions
-# These can go away once we are confident that pip 1.4.1+ is available everywhere
-
-# Fix prettytable 0.7.2 permissions
-# Don't specify --upgrade so we use the existing package if present
-pip_install 'prettytable>=0.7'
-PACKAGE_DIR=$(get_package_path prettytable)
-# Only fix version 0.7.2
-dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
-if [[ -d $dir ]]; then
-    sudo chmod +r $dir/*
-fi
-
-# Fix httplib2 0.8 permissions
-# Don't specify --upgrade so we use the existing package if present
-pip_install httplib2
-PACKAGE_DIR=$(get_package_path httplib2)
-# Only fix version 0.8
-dir=$(echo $PACKAGE_DIR-0.8*)
-if [[ -d $dir ]]; then
-    sudo chmod +r $dir/*
-fi
-
-if is_fedora; then
+function fixup_fedora {
+    if ! is_fedora; then
+        return
+    fi
     # Disable selinux to avoid configuring to allow Apache access
     # to Horizon files (LP#1175444)
     if selinuxenabled; then
@@ -169,55 +66,90 @@
         fi
     fi
 
-    if  [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then
-        # requests ships vendored version of chardet/urllib3, but on
-        # fedora these are symlinked back to the primary versions to
-        # avoid duplication of code on disk.  This is fine when
-        # maintainers keep things in sync, but since devstack takes
-        # over and installs later versions via pip we can end up with
-        # incompatible versions.
-        #
-        # The rpm package is not removed to preserve the dependent
-        # packages like cloud-init; rather we remove the symlinks and
-        # force a re-install of requests so the vendored versions it
-        # wants are present.
-        #
-        # Realted issues:
-        # https://bugs.launchpad.net/glance/+bug/1476770
-        # https://bugzilla.redhat.com/show_bug.cgi?id=1253823
+    # Since pip10, pip will refuse to uninstall files from packages
+    # that were created with distutils (rather than more modern
+    # setuptools).  This is because it technically doesn't have a
+    # manifest of what to remove.  However, in most cases, simply
+    # overwriting works.  So this hacks around those packages that
+    # have been dragged in by some other system dependency
+    sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info
 
-        base_path=$(get_package_path requests)/packages
-        if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then
-            sudo rm -f $base_path/{chardet,urllib3}
-            # install requests with the bundled urllib3 to avoid conflicts
-            pip_install --upgrade --force-reinstall requests
-        fi
+    # After updating setuptools based on the requirements, the files from the
+    # python3-setuptools RPM are deleted, it breaks some tools such as semanage
+    # (used in diskimage-builder) that use the -s flag of the python
+    # interpreter, enforcing the use of the packages from /usr/lib.
+    # Importing setuptools/pkg_resources in a such environment fails.
+    # Enforce the package re-installation to fix those applications.
+    if is_package_installed python3-setuptools; then
+        sudo dnf reinstall -y python3-setuptools
     fi
-fi
+}
 
-# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
-# connection issues under proxy so re-install the latest version using
-# pip. To avoid having pip's virtualenv overwritten by the distro's
-# package (e.g. due to installing a distro package with a dependency
-# on python-virtualenv), first install the distro python-virtualenv
-# to satisfy any dependencies then use pip to overwrite it.
+function fixup_suse {
+    if ! is_suse; then
+        return
+    fi
 
-# ... but, for infra builds, the pip-and-virtualenv [1] element has
-# already done this to ensure the latest pip, virtualenv and
-# setuptools on the base image for all platforms.  It has also added
-# the packages to the yum/dnf ignore list to prevent them being
-# overwritten with old versions.  F26 and dnf 2.0 has changed
-# behaviour that means re-installing python-virtualenv fails [2].
-# Thus we do a quick check if we're in the infra environment by
-# looking for the mirror config script before doing this, and just
-# skip it if so.
+    # Deactivate and disable apparmor profiles in openSUSE and SLE
+    # distros to avoid issues with haproxy and dnsmasq.  In newer
+    # releases, systemctl stop apparmor is actually a no-op, so we
+    # have to use aa-teardown to make sure we've deactivated the
+    # profiles:
+    #
+    # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343
+    # https://gitlab.com/apparmor/apparmor/merge_requests/81
+    # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1
+    if sudo systemctl is-active -q apparmor; then
+        sudo systemctl stop apparmor
+    fi
+    if [ -x /usr/sbin/aa-teardown ]; then
+        sudo /usr/sbin/aa-teardown
+    fi
+    if sudo systemctl is-enabled -q apparmor; then
+        sudo systemctl disable apparmor
+    fi
 
-# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \
-#        diskimage_builder/elements/pip-and-virtualenv/ \
-#            install.d/pip-and-virtualenv-source-install/04-install-pip
-# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
+    # Since pip10, pip will refuse to uninstall files from packages
+    # that were created with distutils (rather than more modern
+    # setuptools).  This is because it technically doesn't have a
+    # manifest of what to remove.  However, in most cases, simply
+    # overwriting works.  So this hacks around those packages that
+    # have been dragged in by some other system dependency
+    sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
+    sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info
 
-if [[ ! -f /etc/ci/mirror_info.sh ]]; then
-    install_package python-virtualenv
-    pip_install -U --force-reinstall virtualenv
-fi
+    # Ensure trusted CA certificates are up to date
+    # See https://bugzilla.suse.com/show_bug.cgi?id=1154871
+    # May be removed once a new opensuse-15 image is available in nodepool
+    sudo zypper up -y p11-kit ca-certificates-mozilla
+}
+
+function fixup_ovn_centos {
+    if [[ $os_VENDOR != "CentOS" ]]; then
+        return
+    fi
+    # OVN packages are part of this release for CentOS
+    yum_install centos-release-openstack-victoria
+}
+
+function fixup_ubuntu {
+    if ! is_ubuntu; then
+        return
+    fi
+
+    # Since pip10, pip will refuse to uninstall files from packages
+    # that were created with distutils (rather than more modern
+    # setuptools).  This is because it technically doesn't have a
+    # manifest of what to remove.  However, in most cases, simply
+    # overwriting works.  So this hacks around those packages that
+    # have been dragged in by some other system dependency
+    sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
+    sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
+    sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
+}
+
+function fixup_all {
+    fixup_ubuntu
+    fixup_fedora
+    fixup_suse
+}
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 56f12e7..1cacd06 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
 
 # Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
 #
@@ -19,17 +19,21 @@
 #
 # In order to function correctly, the environment in which the
 # script runs must have
-#   * network access to the review.openstack.org Gerrit API
+#   * network access to the review.opendev.org Gerrit API
 #     working directory
-#   * network access to https://git.openstack.org/cgit
+#   * network access to https://opendev.org/
 
+import functools
 import logging
 import json
 import requests
 
+from requests.adapters import HTTPAdapter
+from requests.packages.urllib3.util.retry import Retry
+
 logging.basicConfig(level=logging.DEBUG)
 
-url = 'https://review.openstack.org/projects/'
+url = 'https://review.opendev.org/projects/'
 
 # This is what a project looks like
 '''
@@ -39,26 +43,41 @@
   },
 '''
 
-def is_in_openstack_namespace(proj):
-    # only interested in openstack namespace (e.g. not retired
-    # stackforge, etc)
-    return proj.startswith('openstack/')
+def is_in_wanted_namespace(proj):
+    # only interested in openstack or x namespace (e.g. not retired
+    # stackforge, etc).
+    #
+    # openstack/openstack "super-repo" of openstack projects as
+    # submodules, that can cause gitea to 500 timeout and thus stop
+    # this script.  Skip it.
+    if proj.startswith('stackforge/') or \
+       proj.startswith('stackforge-attic/') or \
+       proj == "openstack/openstack":
+        return False
+    else:
+        return True
 
 # Check if this project has a plugin file
-def has_devstack_plugin(proj):
+def has_devstack_plugin(session, proj):
     # Don't link in the deb packaging repos
     if "openstack/deb-" in proj:
         return False
-    r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
+    r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj)
     return r.status_code == 200
 
 logging.debug("Getting project list from %s" % url)
 r = requests.get(url)
-projects = sorted(filter(is_in_openstack_namespace, json.loads(r.text[4:])))
+projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:])))
 logging.debug("Found %d projects" % len(projects))
 
-found_plugins = filter(has_devstack_plugin, projects)
+s = requests.Session()
+# sometimes gitea gives us a 500 error; retry sanely
+#  https://stackoverflow.com/a/35636367
+retries = Retry(total=3, backoff_factor=1,
+                status_forcelist=[ 500 ])
+s.mount('https://', HTTPAdapter(max_retries=retries))
+
+found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
 
 for project in found_plugins:
-    # strip of openstack/
-    print(project[10:])
+    print(project)
diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh
index 95f1331..3307943 100755
--- a/tools/generate-devstack-plugins-list.sh
+++ b/tools/generate-devstack-plugins-list.sh
@@ -28,9 +28,9 @@
 #   * the environment variable git_dir pointing to the location
 #   * of said git repositories
 #   ) OR (
-#   * network access to the review.openstack.org Gerrit API
+#   * network access to the review.opendev.org Gerrit API
 #     working directory
-#   * network access to https://git.openstack.org/cgit
+#   * network access to https://opendev.org
 #   ))
 #
 # If a file named data/devstack-plugins-registry.header or
@@ -50,13 +50,11 @@
 }
 
 (
-declare -A plugins
-
 if [[ -r data/devstack-plugins-registry.header ]]; then
     cat data/devstack-plugins-registry.header
 fi
 
-sorted_plugins=$(python tools/generate-devstack-plugins-list.py)
+sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py)
 
 # find the length of the name column & pad
 name_col_len=$(echo "${sorted_plugins}" | wc -L)
@@ -65,7 +63,7 @@
 # ====================== ===
 # Plugin Name            URL
 # ====================== ===
-# foobar                 `git://... <http://...>`__
+# foobar                 `https://... <https://...>`__
 # ...
 
 printf "\n\n"
@@ -74,8 +72,8 @@
 title_underline ${name_col_len}
 
 for plugin in ${sorted_plugins}; do
-    giturl="git://git.openstack.org/openstack/${plugin}"
-    gitlink="https://git.openstack.org/cgit/openstack/${plugin}"
+    giturl="https://opendev.org/${plugin}"
+    gitlink="https://opendev.org/${plugin}"
     printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__"
 done
 
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 3a27c4a..81231be 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -22,7 +22,7 @@
 
 # Possible virt drivers, if we have more, add them here. Always keep
 # dummy in the end position to trigger the fall through case.
-DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
+DRIVERS="openvz ironic libvirt vsphere dummy"
 
 # Extra variables to trigger getting additional images.
 export ENABLED_SERVICES="h-api,tr-api"
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index dbe5278..c72dc89 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -5,7 +5,7 @@
 # Update pip and friends to a known common version
 
 # Assumptions:
-# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed
+# - PYTHON3_VERSION refers to a version already installed
 
 set -o errexit
 
@@ -35,7 +35,7 @@
 # done by openstack-infra diskimage-builder elements as part of image
 # preparation [1].  This prevents any network access, which can be
 # unreliable in CI situations.
-# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip
+# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip
 
 PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
 LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
@@ -46,7 +46,7 @@
 function get_versions {
     # FIXME(dhellmann): Deal with multiple python versions here? This
     # is just used for reporting, so maybe not?
-    PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true)
+    PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true)
     if [[ -n $PIP ]]; then
         PIP_VERSION=$($PIP --version | awk '{ print $2}')
         echo "pip: $PIP_VERSION"
@@ -89,10 +89,7 @@
             die $LINENO "Download of get-pip.py failed"
         touch $LOCAL_PIP.downloaded
     fi
-    sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
-    if python3_enabled; then
-        sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
-    fi
+    sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
 }
 
 
@@ -114,35 +111,28 @@
 
 }
 
-# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time
-# pkg_resources inspects the list of installed Python packages if there are
-# non-compliant version numbers in the egg-info (for example, from distro
-# system packaged Python libraries). This is off by default after 8.2 but can
-# be enabled by uncommenting the lines below.
-#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources
-#export PYTHONWARNINGS
-
 # Show starting versions
 get_versions
 
-# Do pip
-
-# Eradicate any and all system packages
-
-# Python in fedora depends on the python-pip package so removing it
-# results in a nonfunctional system. pip on fedora installs to /usr so pip
-# can safely override the system pip for all versions of fedora
-if ! is_fedora ; then
-    uninstall_package python-pip
-    uninstall_package python3-pip
-fi
-
-install_get_pip
-
 if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
     configure_pypi_alternative_url
 fi
 
+if is_fedora && [[ ${DISTRO} == f* ]]; then
+    # get-pip.py will not install over the python3-pip package in
+    # Fedora 34 any more.
+    #  https://bugzilla.redhat.com/show_bug.cgi?id=1988935
+    #  https://github.com/pypa/pip/issues/9904
+    # You can still install using get-pip.py if python3-pip is *not*
+    # installed; this *should* remain separate under /usr/local and not break
+    # if python3-pip is later installed.
+    # For general sanity, we just use the packaged pip.  It should be
+    # recent enough anyway.  This is included via rpms/general
+    : # Simply fall through
+else
+    install_get_pip
+fi
+
 set -x
 
 # Note setuptools is part of requirements.txt and we want to make sure
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 6189085..a7c03d2 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -81,21 +81,6 @@
     fi
 fi
 
-if python3_enabled; then
-    install_python3
-    export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
-else
-    export PYTHON=$(which python 2>/dev/null)
-fi
-
-if is_suse; then
-    # now reinstall cryptography from source, in order to rebuilt it against the
-    # system libssl rather than the bundled openSSL 1.1, which segfaults when combined
-    # with a system provided openSSL 1.0
-    # see https://github.com/pyca/cryptography/issues/3804 and followup issues
-    sudo pip install cryptography --no-binary :all:
-fi
-
 
 # Mark end of run
 # ---------------
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index e91464f..0212d00 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -27,7 +27,7 @@
 }
 
 CN=$1
-if [ -z "$CN" ]]; then
+if [ -z "$CN" ]; then
     usage
 fi
 ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME}
@@ -52,5 +52,5 @@
 make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME
 
 # Create a cert bundle
-cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
-
+cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
+    $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index 63f25ca..6c36534 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -14,7 +14,7 @@
 
 set -o errexit
 
-PYTHON=${PYTHON:-python}
+PYTHON=${PYTHON:-python3}
 
 # time to sleep between checks
 SLEEP_TIME=20
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
old mode 100755
new mode 100644
index 07716b0..1b081bb
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 # This tool lists processes that lock memory pages from swapping to disk.
 
 import re
@@ -26,17 +24,19 @@
         # iterate over the /proc/%pid/status files manually
         try:
             s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r')
-        except EnvironmentError:
+            with s:
+                for line in s:
+                    result = LCK_SUMMARY_REGEX.search(line)
+                    if result:
+                        locked = int(result.group('locked'))
+                        if locked:
+                            mlock_users.append({'name': proc.name(),
+                                                'pid': proc.pid,
+                                                'locked': locked})
+        except OSError:
+            # pids can disappear, we're ok with that
             continue
-        with s:
-            for line in s:
-                result = LCK_SUMMARY_REGEX.search(line)
-                if result:
-                    locked = int(result.group('locked'))
-                    if locked:
-                        mlock_users.append({'name': proc.name(),
-                                            'pid': proc.pid,
-                                            'locked': locked})
+
 
     # produce a single line log message with per process mlock stats
     if mlock_users:
diff --git a/tools/outfilter.py b/tools/outfilter.py
old mode 100755
new mode 100644
index f82939b..e910f79
--- a/tools/outfilter.py
+++ b/tools/outfilter.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-#
+#!/usr/bin/env python3
+
 # Copyright 2014 Hewlett-Packard Development Company, L.P.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -36,6 +36,13 @@
     parser.add_argument('-o', '--outfile',
                         help='Output file for content',
                         default=None)
+    # NOTE(ianw): This is intended for the case where your stdout is
+    # being captured by something like ansible which independently
+    # logs timestamps on the lines it receives.  Note that if using a
+    # output file, those log lines are still timestamped.
+    parser.add_argument('-b', '--no-timestamp', action='store_true',
+                        help='Do not prefix stdout with timestamp (bare)',
+                        default=False)
     parser.add_argument('-v', '--verbose', action='store_true',
                         default=False)
     return parser.parse_args()
@@ -50,33 +57,45 @@
     opts = get_options()
     outfile = None
     if opts.outfile:
-        outfile = open(opts.outfile, 'a', 0)
+        # note, binary mode so we can do unbuffered output.
+        outfile = open(opts.outfile, 'ab', 0)
 
     # Otherwise fileinput reprocess args as files
     sys.argv = []
-    while True:
-        line = sys.stdin.readline()
-        if not line:
-            return 0
 
+    for line in iter(sys.stdin.readline, ''):
         # put skip lines here
         if skip_line(line):
             continue
 
-        # This prevents us from nesting date lines, because
-        # we'd like to pull this in directly in Grenade and not double
-        # up on DevStack lines
+        # This prevents us from nesting date lines, because we'd like
+        # to pull this in directly in Grenade and not double up on
+        # DevStack lines.
+        # NOTE(ianw): we could actually strip the extra ts in "bare"
+        # mode (which came after this)? ... as we get more experience
+        # with zuulv3 native jobs and ansible capture it may become
+        # clearer what to do
         if HAS_DATE.search(line) is None:
             now = datetime.datetime.utcnow()
-            line = ("%s | %s" % (
+            ts_line = ("%s | %s" % (
                 now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3],
                 line))
+        else:
+            ts_line = line
 
         if opts.verbose:
-            sys.stdout.write(line)
+            sys.stdout.write(line if opts.no_timestamp else ts_line)
             sys.stdout.flush()
+
         if outfile:
-            outfile.write(line)
+            # We've opened outfile as a binary file to get the
+            # non-buffered behaviour.  on python3, sys.stdin was
+            # opened with the system encoding and made the line into
+            # utf-8, so write the logfile out in utf-8 bytes.
+            if sys.version_info < (3,):
+                outfile.write(ts_line)
+            else:
+                outfile.write(ts_line.encode('utf-8'))
             outfile.flush()
 
 
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
deleted file mode 100644
index 1d994a6..0000000
--- a/tools/uec/meta.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import BaseHTTPServer
-import SimpleHTTPServer
-import sys
-
-
-def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
-         ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
-    """simple http server that listens on a give address:port."""
-
-    server_address = (host, port)
-
-    HandlerClass.protocol_version = protocol
-    httpd = ServerClass(server_address, HandlerClass)
-
-    sa = httpd.socket.getsockname()
-    print("Serving HTTP on", sa[0], "port", sa[1], "...")
-    httpd.serve_forever()
-
-if __name__ == '__main__':
-    if sys.argv[1:]:
-        address = sys.argv[1]
-    else:
-        address = '0.0.0.0'
-    if ':' in address:
-        host, port = address.split(':')
-    else:
-        host = address
-        port = 8080
-
-    main(host, int(port))
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index eb7265f..74dcdb2 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -41,12 +41,19 @@
                 'auth_url': args.os_auth_url,
                 'username': args.os_username,
                 'password': args.os_password,
-                'project_name': args.os_project_name,
             },
         }
-        if args.os_identity_api_version == '3':
+        if args.os_project_name and args.os_system_scope:
+            print(
+                "WARNING: os_project_name and os_system_scope were both"
+                " given. os_system_scope will take priority.")
+        if args.os_project_name and not args.os_system_scope:
+            self._cloud_data['auth']['project_name'] = args.os_project_name
+        if args.os_identity_api_version == '3' and not args.os_system_scope:
             self._cloud_data['auth']['user_domain_id'] = 'default'
             self._cloud_data['auth']['project_domain_id'] = 'default'
+        if args.os_system_scope:
+            self._cloud_data['auth']['system_scope'] = args.os_system_scope
         if args.os_cacert:
             self._cloud_data['cacert'] = args.os_cacert
 
@@ -58,7 +65,7 @@
     def _read_clouds(self):
         try:
             with open(self._clouds_path) as clouds_file:
-                self._clouds = yaml.load(clouds_file)
+                self._clouds = yaml.safe_load(clouds_file)
         except IOError:
             # The user doesn't have a clouds.yaml file.
             print("The user clouds.yaml file didn't exist.")
@@ -83,12 +90,13 @@
     parser.add_argument('--os-cloud', required=True)
     parser.add_argument('--os-region-name', default='RegionOne')
     parser.add_argument('--os-identity-api-version', default='3')
-    parser.add_argument('--os-volume-api-version', default='2')
+    parser.add_argument('--os-volume-api-version', default='3')
     parser.add_argument('--os-cacert')
     parser.add_argument('--os-auth-url', required=True)
     parser.add_argument('--os-username', required=True)
     parser.add_argument('--os-password', required=True)
-    parser.add_argument('--os-project-name', required=True)
+    parser.add_argument('--os-project-name')
+    parser.add_argument('--os-system-scope')
 
     args = parser.parse_args()
 
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
new file mode 100755
index 0000000..2596395
--- /dev/null
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+#
+#
+# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that
+# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack
+# plugins are missing the required setting to listen on IPv6 address. This is run as part of
+# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6'
+# can expand the IPv6 verification specific to project by defining the new post-run script which
+# will run along with this base script.
+# If there are more common verification for IPv6 then we can always extent this script.
+
+# Keep track of the DevStack directory
+TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
+source $TOP_DIR/stackrc
+source $TOP_DIR/openrc admin admin
+
+function verify_devstack_ipv6_setting {
+    local _service_host=''
+    _service_host=$(echo $SERVICE_HOST | tr -d [])
+    local _host_ipv6=''
+    _host_ipv6=$(echo $HOST_IPV6 | tr -d [])
+    local _service_listen_address=''
+    _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
+    local _service_local_host=''
+    _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+    if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
+        echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
+        exit 1
+    fi
+    is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
+    if [[ "$is_service_host_ipv6" != "True" ]]; then
+        echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        exit 1
+    fi
+    is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
+    if [[ "$is_host_ipv6" != "True" ]]; then
+        echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        exit 1
+    fi
+    is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
+    if [[ "$is_service_listen_address" != "True" ]]; then
+        echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        exit 1
+    fi
+    is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
+    if [[ "$is_service_local_host" != "True" ]]; then
+        echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        exit 1
+    fi
+    echo "Devstack is properly configured with IPv6"
+    echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+}
+
+function sanity_check_system_ipv6_enabled {
+    system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())')
+    if [[ $system_ipv6_enabled != "True" ]]; then
+        echo "IPv6 is disabled in system"
+        exit 1
+    fi
+    echo "IPv6 is enabled in system"
+}
+
+function verify_service_listen_address_is_ipv6 {
+    local endpoints_verified=False
+    local all_ipv6=True
+    endpoints=$(openstack endpoint list -f value -c URL)
+    for endpoint in ${endpoints}; do
+        local endpoint_address=''
+        endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}')
+        endpoint_address=$(echo $endpoint_address | tr -d [])
+        local is_endpoint_ipv6=''
+        is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
+        if [[ "$is_endpoint_ipv6" != "True" ]]; then
+            all_ipv6=False
+            echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+            continue
+        fi
+        endpoints_verified=True
+    done
+    if [[ "$all_ipv6" == "False"  ]] || [[ "$endpoints_verified" == "False" ]]; then
+        exit 1
+    fi
+    echo "All services deployed by devstack is on IPv6 endpoints"
+    echo $endpoints
+}
+
+#First thing to verify if system has IPv6 enabled or not
+sanity_check_system_ipv6_enabled
+#Verify whether devstack is configured properly with IPv6 setting
+verify_devstack_ipv6_setting
+#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6.
+verify_service_listen_address_is_ipv6
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 6fff149..e292173 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 #
 # Copyright 2014 Hewlett-Packard Development Company, L.P.
 #
@@ -17,14 +17,13 @@
 
 """Dump the state of the world for post mortem."""
 
-from __future__ import print_function
-
 import argparse
 import datetime
 from distutils import spawn
 import fnmatch
+import io
 import os
-import os.path
+import shutil
 import subprocess
 import sys
 
@@ -108,9 +107,10 @@
 # This method gets max version searching 'OpenFlow versions 0x1:0x'.
 # And return a version value converted to an integer type.
 def _get_ofp_version():
-    process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE)
+    process = subprocess.Popen(['ovs-ofctl', '--version'],
+                               stdout=subprocess.PIPE)
     stdout, _ = process.communicate()
-    find_str = 'OpenFlow versions 0x1:0x'
+    find_str = b'OpenFlow versions 0x1:0x'
     offset = stdout.find(find_str)
     return int(stdout[offset + len(find_str):-1]) - 1
 
@@ -134,7 +134,7 @@
 
 
 def ebtables_dump():
-    tables = ['filter', 'nat', 'broute']
+    tables = ['filter', 'nat']
     _header("EB Tables Dump")
     if not _find_cmd('ebtables'):
         return
@@ -163,14 +163,14 @@
 def network_dump():
     _header("Network Dump")
 
-    _dump_cmd("brctl show")
-    _dump_cmd("arp -n")
-    ip_cmds = ["addr", "link", "route"]
+    _dump_cmd("bridge link")
+    _dump_cmd("ip link show type bridge")
+    ip_cmds = ["neigh", "addr", "route", "-6 route"]
     for cmd in ip_cmds + ['netns']:
         _dump_cmd("ip %s" % cmd)
     for netns_ in _netns_list():
         for cmd in ip_cmds:
-            args = {'netns': netns_, 'cmd': cmd}
+            args = {'netns': bytes.decode(netns_), 'cmd': cmd}
             _dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
 
 
@@ -191,7 +191,7 @@
     _dump_cmd("sudo ovs-vsctl show")
     for ofctl_cmd in ofctl_cmds:
         for bridge in bridges:
-            args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge}
+            args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)}
             _dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args)
 
 
@@ -203,7 +203,7 @@
 
 def compute_consoles():
     _header("Compute consoles")
-    for root, dirnames, filenames in os.walk('/opt/stack'):
+    for root, _, filenames in os.walk('/opt/stack'):
         for filename in fnmatch.filter(filenames, 'console.log'):
             fullpath = os.path.join(root, filename)
             _dump_cmd("sudo cat %s" % fullpath)
@@ -231,12 +231,22 @@
         # tools out there that can do that sort of thing though.
         _dump_cmd("ls -ltrah /var/core")
 
+
+def disable_stdio_buffering():
+    # re-open STDOUT as binary, then wrap it in a
+    # TextIOWrapper, and write through everything.
+    binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0)
+    sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True)
+
+
 def main():
     opts = get_options()
     fname = filename(opts.dir, opts.name)
     print("World dumping... see %s for details" % fname)
-    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
-    with open(fname, 'w') as f:
+
+    disable_stdio_buffering()
+
+    with io.open(fname, 'w') as f:
         os.dup2(f.fileno(), sys.stdout.fileno())
         disk_space()
         process_list()
@@ -247,6 +257,14 @@
         compute_consoles()
         guru_meditation_reports()
         var_core()
+    # Singular name for ease of log retrieval
+    copyname = os.path.join(opts.dir, 'worlddump')
+    if opts.name:
+        copyname += '-' + opts.name
+    copyname += '-latest.txt'
+    # We make a full copy to deal with jobs that may or may not
+    # gzip logs breaking symlinks.
+    shutil.copyfile(fname, copyname)
 
 
 if __name__ == '__main__':
diff --git a/tools/xen/README.md b/tools/xen/README.md
deleted file mode 100644
index 9559e77..0000000
--- a/tools/xen/README.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# Getting Started With XenServer and Devstack
-
-The purpose of the code in this directory it to help developers bootstrap a
-XenServer 6.2 (older versions may also work) + OpenStack development
-environment. This file gives some pointers on how to get started.
-
-Xenserver is a Type 1 hypervisor, so it is best installed on bare metal.  The
-OpenStack services are configured to run within a virtual machine (called OS
-domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with
-the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`).
-
-The provided localrc helps to build a basic environment.
-
-## Introduction
-
-### Requirements
-
- - An internet-enabled network with a DHCP server on it
- - XenServer box plugged in to the same network
-This network will be used as the OpenStack management network. The VM Network
-and the Public Network will not be connected to any physical interfaces, only
-new virtual networks will be created by the `install_os_domU.sh` script.
-
-### Steps to follow
-
- - Install XenServer
- - Download Devstack to XenServer
- - Customise `localrc`
- - Start `install_os_domU.sh` script
-
-### Brief explanation
-
-The `install_os_domU.sh` script will:
- - Setup XenAPI plugins
- - Create the named networks, if they don't exist
- - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse
-   it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network
-   interface:
-   - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to
-     `MGT_BRIDGE_OR_NET_NAME`
- - After the Ubuntu install process finished, the network configuration is
- modified to:
-   - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi
-     must be accessible through this network.
-   - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME`
-   - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME`
- - Start devstack inside the created OpenStack VM
-
-## Step 1: Install Xenserver
-Install XenServer on a clean box. You can download the latest XenServer for
-free from: http://www.xenserver.org/
-
-The XenServer IP configuration depends on your local network setup. If you are
-using dhcp, make a reservation for XenServer, so its IP address won't change
-over time. Make a note of the XenServer's IP address, as it has to be specified
-in `localrc`. The other option is to manually specify the IP setup for the
-XenServer box. Please make sure, that a gateway and a nameserver is configured,
-as `install_os_domU.sh` will connect to github.com to get source-code snapshots.
-
-## Step 2: Download devstack
-On your XenServer host, run the following commands as root:
-
-    wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master
-    unzip -o master -d ./devstack
-    cd devstack/*/
-
-## Step 3: Configure your localrc inside the devstack directory
-Devstack uses a localrc for user-specific configuration.  Note that
-the `XENAPI_PASSWORD` must be your dom0 root password.
-Of course, use real passwords if this machine is exposed.
-
-    cat > ./localrc <<EOF
-    # At the moment, we depend on github's snapshot function.
-    GIT_BASE="http://github.com"
-
-    # Passwords
-    # NOTE: these need to be specified, otherwise devstack will try
-    # to prompt for these passwords, blocking the install process.
-
-    DATABASE_PASSWORD=my_super_secret
-    ADMIN_PASSWORD=my_super_secret
-    SERVICE_PASSWORD=my_super_secret
-    RABBIT_PASSWORD=my_super_secret
-    SWIFT_HASH="66a3d6b56c1f479c8b4e70ab5c2000f5"
-    # This will be the password for the OpenStack VM (both stack and root users)
-    GUEST_PASSWORD=my_super_secret
-
-    # XenAPI parameters
-    # NOTE: The following must be set to your XenServer root password!
-
-    XENAPI_PASSWORD=my_xenserver_root_password
-
-    XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
-    VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
-
-    # Explicitly set virt driver
-    VIRT_DRIVER=xenserver
-
-    # Explicitly enable multi-host for nova-network HA
-    MULTI_HOST=1
-
-    # Give extra time for boot
-    ACTIVE_TIMEOUT=45
-
-    EOF
-
-## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory
-
-    cd tools/xen
-    ./install_os_domU.sh
-
-Once this script finishes executing, log into the VM (openstack domU) that it
-installed and tail the run.sh.log file. You will need to wait until it run.sh
-has finished executing.
-
-# Appendix
-
-This section contains useful information for running devstack in CI
-environments / using ubuntu network mirrors.
-
-## Use a specific Ubuntu mirror for installation
-
-To speed up the Ubuntu installation, you can use a specific mirror. To specify
-a mirror explicitly, include the following settings in your `localrc` file:
-
-    UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com"
-    UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
-
-These variables set the `mirror/http/hostname` and `mirror/http/directory`
-settings in the ubuntu preseed file. The minimal ubuntu VM will use the
-specified parameters.
-
-## Use an http proxy to speed up Ubuntu installation
-
-To further speed up the Ubuntu VM and package installation, an internal http
-proxy could be used. `squid-deb-proxy` has prooven to be stable. To use an http
-proxy, specify:
-
-    UBUNTU_INST_HTTP_PROXY="http://ubuntu-proxy.somedomain.com:8000"
-
-in your `localrc` file.
-
-## Reuse the Ubuntu VM
-
-Performing a minimal ubuntu installation could take a lot of time, depending on
-your mirror/network speed. If you run `install_os_domU.sh` script on a clean
-hypervisor, you can speed up the installation, by re-using the ubuntu vm from
-a previous installation.
-
-### Export the Ubuntu VM to an XVA
-
-Given you have an nfs export `TEMPLATE_NFS_DIR`:
-
-    TEMPLATE_FILENAME=devstack-jeos.xva
-    TEMPLATE_NAME=jeos_template_for_devstack
-    mountdir=$(mktemp -d)
-    mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
-    VM="$(xe template-list name-label="$TEMPLATE_NAME" --minimal)"
-    xe template-export template-uuid=$VM filename="$mountdir/$TEMPLATE_FILENAME"
-    umount "$mountdir"
-    rm -rf "$mountdir"
-
-### Import the Ubuntu VM
-
-Given you have an nfs export `TEMPLATE_NFS_DIR` where you exported the Ubuntu
-VM as `TEMPLATE_FILENAME`:
-
-    mountdir=$(mktemp -d)
-    mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
-    xe vm-import filename="$mountdir/$TEMPLATE_FILENAME"
-    umount "$mountdir"
-    rm -rf "$mountdir"
-
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
deleted file mode 100755
index 34ef719..0000000
--- a/tools/xen/build_xva.sh
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/bin/bash
-
-# This script is run by install_os_domU.sh
-#
-# It modifies the ubuntu image created by install_os_domU.sh
-# and previously moodified by prepare_guest_template.sh
-#
-# This script is responsible for:
-# - pushing in the DevStack code
-# - creating run.sh, to run the code on boot
-# It does this by mounting the disk image of the VM.
-#
-# The resultant image is then templated and started
-# by install_os_domU.sh
-
-# Exit on errors
-set -o errexit
-# Echo commands
-set -o xtrace
-
-# This directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $TOP_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $TOP_DIR/functions
-
-# Source params - override xenrc params in your localrc to suite your taste
-source xenrc
-
-#
-# Parameters
-#
-GUEST_NAME="$1"
-
-function _print_interface_config {
-    local device_nr
-    local ip_address
-    local netmask
-
-    device_nr="$1"
-    ip_address="$2"
-    netmask="$3"
-
-    local device
-
-    device="eth${device_nr}"
-
-    echo "auto $device"
-    if [ $ip_address == "dhcp" ]; then
-        echo "iface $device inet dhcp"
-    else
-        echo "iface $device inet static"
-        echo "  address $ip_address"
-        echo "  netmask $netmask"
-    fi
-
-    # Turn off tx checksumming for better performance
-    echo "  post-up ethtool -K $device tx off"
-}
-
-function print_interfaces_config {
-    echo "auto lo"
-    echo "iface lo inet loopback"
-
-    _print_interface_config $PUB_DEV_NR $PUB_IP $PUB_NETMASK
-    _print_interface_config $VM_DEV_NR $VM_IP $VM_NETMASK
-    _print_interface_config $MGT_DEV_NR $MGT_IP $MGT_NETMASK
-}
-
-#
-# Mount the VDI
-#
-STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
-add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
-
-# Make sure we have a stage
-if [ ! -d $STAGING_DIR/etc ]; then
-    echo "Stage is not properly set up!"
-    exit 1
-fi
-
-# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf
-if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then
-    echo "Configuration without DHCP not supported"
-    exit 1
-fi
-
-# Copy over devstack
-rm -f /tmp/devstack.tar
-cd $TOP_DIR/../../
-tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar .
-mkdir -p $STAGING_DIR/opt/stack/devstack
-tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack
-cd $TOP_DIR
-
-# Create an systemd task for devstack
-cat >$STAGING_DIR/etc/systemd/system/devstack.service << EOF
-[Unit]
-Description=Install OpenStack by DevStack
-
-[Service]
-Type=oneshot
-RemainAfterExit=yes
-ExecStartPre=/bin/rm -f /opt/stack/runsh.succeeded
-ExecStart=/bin/su -c "/opt/stack/run.sh" stack
-StandardOutput=tty
-StandardError=tty
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-
-# enable this service
-ln -s $STAGING_DIR/etc/systemd/system/devstack.service $STAGING_DIR/etc/systemd/system/multi-user.target.wants/devstack.service
-
-# Configure the hostname
-echo $GUEST_NAME > $STAGING_DIR/etc/hostname
-
-# Hostname must resolve for rabbit
-HOSTS_FILE_IP=$PUB_IP
-if [ $MGT_IP != "dhcp" ]; then
-    HOSTS_FILE_IP=$MGT_IP
-fi
-cat <<EOF >$STAGING_DIR/etc/hosts
-$HOSTS_FILE_IP $GUEST_NAME
-127.0.0.1 localhost localhost.localdomain
-EOF
-
-# Configure the network
-print_interfaces_config > $STAGING_DIR/etc/network/interfaces
-
-# Gracefully cp only if source file/dir exists
-function cp_it {
-    if [ -e $1 ] || [ -d $1 ]; then
-        cp -pRL $1 $2
-    fi
-}
-
-# Copy over your ssh keys and env if desired
-COPYENV=${COPYENV:-1}
-if [ "$COPYENV" = "1" ]; then
-    cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh
-    cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys
-    cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig
-    cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc
-    cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc
-fi
-
-# Configure run.sh
-cat <<EOF >$STAGING_DIR/opt/stack/run.sh
-#!/bin/bash
-set -eux
-(
-  flock -n 9 || exit 1
-
-  sudo chown -R stack /opt/stack
-
-  [ -e /opt/stack/runsh.succeeded ] && rm /opt/stack/runsh.succeeded
-  echo \$\$ >> /opt/stack/run_sh.pid
-
-  cd /opt/stack/devstack
-  ./unstack.sh || true
-  ./stack.sh
-
-  # Got to the end - success
-  touch /opt/stack/runsh.succeeded
-
-  # Update /etc/issue
-  (
-      echo "OpenStack VM - Installed by DevStack"
-      IPADDR=$(ip -4 address show eth0 | sed -n 's/.*inet \([0-9\.]\+\).*/\1/p')
-      echo "  Management IP:   $IPADDR"
-      echo -n "  Devstack run:    "
-      if [ -e /opt/stack/runsh.succeeded ]; then
-          echo "SUCCEEDED"
-      else
-          echo "FAILED"
-      fi
-      echo ""
-  ) > /opt/stack/issue
-  sudo cp /opt/stack/issue /etc/issue
-
-  rm /opt/stack/run_sh.pid
-) 9> /opt/stack/.runsh_lock
-EOF
-
-chmod 755 $STAGING_DIR/opt/stack/run.sh
diff --git a/tools/xen/devstackubuntu_latecommand.sh b/tools/xen/devstackubuntu_latecommand.sh
deleted file mode 100644
index 2afbe2c..0000000
--- a/tools/xen/devstackubuntu_latecommand.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-set -eux
-
-# Need to set barrier=0 to avoid a Xen bug
-# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089
-sed -i -e 's/errors=/barrier=0,errors=/' /etc/fstab
-
-# Allow root to login with a password
-sed -i -e 's/.*PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config
-
-# Install the XenServer tools so IP addresses are reported
-wget --no-proxy @XS_TOOLS_URL@ -O /root/tools.deb
-dpkg -i /root/tools.deb
-rm /root/tools.deb
diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg
deleted file mode 100644
index 80f334b..0000000
--- a/tools/xen/devstackubuntupreseed.cfg
+++ /dev/null
@@ -1,471 +0,0 @@
-### Contents of the preconfiguration file (for squeeze)
-### Localization
-# Preseeding only locale sets language, country and locale.
-d-i debian-installer/locale string en_US
-
-# The values can also be preseeded individually for greater flexibility.
-#d-i debian-installer/language string en
-#d-i debian-installer/country string NL
-#d-i debian-installer/locale string en_GB.UTF-8
-# Optionally specify additional locales to be generated.
-#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8
-
-# Keyboard selection.
-# Disable automatic (interactive) keymap detection.
-d-i console-setup/ask_detect boolean false
-#d-i keyboard-configuration/modelcode string pc105
-d-i keyboard-configuration/layoutcode string us
-# To select a variant of the selected layout (if you leave this out, the
-# basic form of the layout will be used):
-#d-i keyboard-configuration/variantcode string dvorak
-
-### Network configuration
-# Disable network configuration entirely. This is useful for cdrom
-# installations on non-networked devices where the network questions,
-# warning and long timeouts are a nuisance.
-#d-i netcfg/enable boolean false
-
-# netcfg will choose an interface that has link if possible. This makes it
-# skip displaying a list if there is more than one interface.
-d-i netcfg/choose_interface select auto
-
-# To pick a particular interface instead:
-#d-i netcfg/choose_interface select eth1
-
-# If you have a slow dhcp server and the installer times out waiting for
-# it, this might be useful.
-d-i netcfg/dhcp_timeout string 120
-
-# If you prefer to configure the network manually, uncomment this line and
-# the static network configuration below.
-#d-i netcfg/disable_autoconfig boolean true
-
-# If you want the preconfiguration file to work on systems both with and
-# without a dhcp server, uncomment these lines and the static network
-# configuration below.
-#d-i netcfg/dhcp_failed note
-#d-i netcfg/dhcp_options select Configure network manually
-
-# Static network configuration.
-#d-i netcfg/get_nameservers string 192.168.1.1
-#d-i netcfg/get_ipaddress string 192.168.1.42
-#d-i netcfg/get_netmask string 255.255.255.0
-#d-i netcfg/get_gateway string 192.168.1.1
-#d-i netcfg/confirm_static boolean true
-
-# Any hostname and domain names assigned from dhcp take precedence over
-# values set here. However, setting the values still prevents the questions
-# from being shown, even if values come from dhcp.
-d-i netcfg/get_hostname string stack
-d-i netcfg/get_domain string stackpass
-
-# Disable that annoying WEP key dialog.
-d-i netcfg/wireless_wep string
-# The wacky dhcp hostname that some ISPs use as a password of sorts.
-#d-i netcfg/dhcp_hostname string radish
-
-# If non-free firmware is needed for the network or other hardware, you can
-# configure the installer to always try to load it, without prompting. Or
-# change to false to disable asking.
-#d-i hw-detect/load_firmware boolean true
-
-### Network console
-# Use the following settings if you wish to make use of the network-console
-# component for remote installation over SSH. This only makes sense if you
-# intend to perform the remainder of the installation manually.
-#d-i anna/choose_modules string network-console
-#d-i network-console/password password r00tme
-#d-i network-console/password-again password r00tme
-
-### Mirror settings
-# If you select ftp, the mirror/country string does not need to be set.
-#d-i mirror/protocol string ftp
-d-i mirror/country string manual
-d-i mirror/http/hostname string archive.ubuntu.com
-d-i mirror/http/directory string /ubuntu
-d-i mirror/http/proxy string
-
-# Alternatively: by default, the installer uses CC.archive.ubuntu.com where
-# CC is the ISO-3166-2 code for the selected country. You can preseed this
-# so that it does so without asking.
-#d-i mirror/http/mirror select CC.archive.ubuntu.com
-
-# Suite to install.
-#d-i mirror/suite string squeeze
-# Suite to use for loading installer components (optional).
-#d-i mirror/udeb/suite string squeeze
-# Components to use for loading installer components (optional).
-#d-i mirror/udeb/components multiselect main, restricted
-
-### Clock and time zone setup
-# Controls whether or not the hardware clock is set to UTC.
-d-i clock-setup/utc boolean true
-
-# You may set this to any valid setting for $TZ; see the contents of
-# /usr/share/zoneinfo/ for valid values.
-d-i time/zone string US/Pacific
-
-# Controls whether to use NTP to set the clock during the install
-d-i clock-setup/ntp boolean true
-# NTP server to use. The default is almost always fine here.
-d-i clock-setup/ntp-server string 0.us.pool.ntp.org
-
-### Partitioning
-## Partitioning example
-# If the system has free space you can choose to only partition that space.
-# This is only honoured if partman-auto/method (below) is not set.
-# Alternatives: custom, some_device, some_device_crypto, some_device_lvm.
-#d-i partman-auto/init_automatically_partition select biggest_free
-
-# Alternatively, you may specify a disk to partition. If the system has only
-# one disk the installer will default to using that, but otherwise the device
-# name must be given in traditional, non-devfs format (so e.g. /dev/hda or
-# /dev/sda, and not e.g. /dev/discs/disc0/disc).
-# For example, to use the first SCSI/SATA hard disk:
-#d-i partman-auto/disk string /dev/sda
-# In addition, you'll need to specify the method to use.
-# The presently available methods are:
-# - regular: use the usual partition types for your architecture
-# - lvm:     use LVM to partition the disk
-# - crypto:  use LVM within an encrypted partition
-d-i partman-auto/method string regular
-
-# If one of the disks that are going to be automatically partitioned
-# contains an old LVM configuration, the user will normally receive a
-# warning. This can be preseeded away...
-d-i partman-lvm/device_remove_lvm boolean true
-# The same applies to pre-existing software RAID array:
-d-i partman-md/device_remove_md boolean true
-# And the same goes for the confirmation to write the lvm partitions.
-d-i partman-lvm/confirm boolean true
-
-# For LVM partitioning, you can select how much of the volume group to use
-# for logical volumes.
-#d-i partman-auto-lvm/guided_size string max
-#d-i partman-auto-lvm/guided_size string 10GB
-#d-i partman-auto-lvm/guided_size string 50%
-
-# You can choose one of the three predefined partitioning recipes:
-# - atomic: all files in one partition
-# - home:   separate /home partition
-# - multi:  separate /home, /usr, /var, and /tmp partitions
-d-i partman-auto/choose_recipe select atomic
-
-# Or provide a recipe of your own...
-# If you have a way to get a recipe file into the d-i environment, you can
-# just point at it.
-#d-i partman-auto/expert_recipe_file string /hd-media/recipe
-
-# If not, you can put an entire recipe into the preconfiguration file in one
-# (logical) line. This example creates a small /boot partition, suitable
-# swap, and uses the rest of the space for the root partition:
-#d-i partman-auto/expert_recipe string                         \
-#      boot-root ::                                            \
-#              40 50 100 ext3                                  \
-#                      $primary{ } $bootable{ }                \
-#                      method{ format } format{ }              \
-#                      use_filesystem{ } filesystem{ ext3 }    \
-#                      mountpoint{ /boot }                     \
-#              .                                               \
-#              500 10000 1000000000 ext3                       \
-#                      method{ format } format{ }              \
-#                      use_filesystem{ } filesystem{ ext3 }    \
-#                      mountpoint{ / }                         \
-#              .                                               \
-#              64 512 300% linux-swap                          \
-#                      method{ swap } format{ }                \
-#              .
-
-# If you just want to change the default filesystem from ext3 to something
-# else, you can do that without providing a full recipe.
-d-i partman/default_filesystem string ext3
-
-# The full recipe format is documented in the file partman-auto-recipe.txt
-# included in the 'debian-installer' package or available from D-I source
-# repository. This also documents how to specify settings such as file
-# system labels, volume group names and which physical devices to include
-# in a volume group.
-
-# This makes partman automatically partition without confirmation, provided
-# that you told it what to do using one of the methods above.
-d-i partman-partitioning/confirm_write_new_label boolean true
-d-i partman/choose_partition select finish
-d-i partman/confirm boolean true
-d-i partman/confirm_nooverwrite boolean true
-
-## Partitioning using RAID
-# The method should be set to "raid".
-#d-i partman-auto/method string raid
-# Specify the disks to be partitioned. They will all get the same layout,
-# so this will only work if the disks are the same size.
-#d-i partman-auto/disk string /dev/sda /dev/sdb
-
-# Next you need to specify the physical partitions that will be used. 
-#d-i partman-auto/expert_recipe string \
-#      multiraid ::                                         \
-#              1000 5000 4000 raid                          \
-#                      $primary{ } method{ raid }           \
-#              .                                            \
-#              64 512 300% raid                             \
-#                      method{ raid }                       \
-#              .                                            \
-#              500 10000 1000000000 raid                    \
-#                      method{ raid }                       \
-#              .
-
-# Last you need to specify how the previously defined partitions will be
-# used in the RAID setup. Remember to use the correct partition numbers
-# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported;
-# devices are separated using "#".
-# Parameters are:
-# <raidtype> <devcount> <sparecount> <fstype> <mountpoint> \
-#          <devices> <sparedevices>
-
-#d-i partman-auto-raid/recipe string \
-#    1 2 0 ext3 /                    \
-#          /dev/sda1#/dev/sdb1       \
-#    .                               \
-#    1 2 0 swap -                    \
-#          /dev/sda5#/dev/sdb5       \
-#    .                               \
-#    0 2 0 ext3 /home                \
-#          /dev/sda6#/dev/sdb6       \
-#    .
-
-# For additional information see the file partman-auto-raid-recipe.txt
-# included in the 'debian-installer' package or available from D-I source
-# repository.
-
-# This makes partman automatically partition without confirmation.
-d-i partman-md/confirm boolean true
-d-i partman-partitioning/confirm_write_new_label boolean true
-d-i partman/choose_partition select finish
-d-i partman/confirm boolean true
-d-i partman/confirm_nooverwrite boolean true
-
-## Controlling how partitions are mounted
-# The default is to mount by UUID, but you can also choose "traditional" to
-# use traditional device names, or "label" to try filesystem labels before
-# falling back to UUIDs.
-#d-i partman/mount_style select uuid
-
-### Base system installation
-# Configure APT to not install recommended packages by default. Use of this
-# option can result in an incomplete system and should only be used by very
-# experienced users.
-#d-i base-installer/install-recommends boolean false
-
-# The kernel image (meta) package to be installed; "none" can be used if no
-# kernel is to be installed.
-d-i base-installer/kernel/image string linux-virtual
-
-### Account setup
-# Skip creation of a root account (normal user account will be able to
-# use sudo). The default is false; preseed this to true if you want to set
-# a root password.
-d-i passwd/root-login boolean true
-# Alternatively, to skip creation of a normal user account.
-d-i passwd/make-user boolean false
-
-# Root password, either in clear text
-d-i passwd/root-password password stackpass
-d-i passwd/root-password-again password stackpass
-# or encrypted using an MD5 hash.
-#d-i passwd/root-password-crypted password [MD5 hash]
-
-# To create a normal user account.
-#d-i passwd/user-fullname string Ubuntu User
-#d-i passwd/username string ubuntu
-# Normal user's password, either in clear text
-#d-i passwd/user-password password insecure
-#d-i passwd/user-password-again password insecure
-# or encrypted using an MD5 hash.
-#d-i passwd/user-password-crypted password [MD5 hash]
-# Create the first user with the specified UID instead of the default.
-#d-i passwd/user-uid string 1010
-# The installer will warn about weak passwords. If you are sure you know
-# what you're doing and want to override it, uncomment this.
-d-i user-setup/allow-password-weak boolean true
-
-# The user account will be added to some standard initial groups. To
-# override that, use this.
-#d-i passwd/user-default-groups string audio cdrom video
-
-# Set to true if you want to encrypt the first user's home directory.
-d-i user-setup/encrypt-home boolean false
-
-### Apt setup
-# You can choose to install restricted and universe software, or to install
-# software from the backports repository.
-d-i apt-setup/restricted boolean true
-d-i apt-setup/universe boolean true
-d-i apt-setup/backports boolean true
-# Uncomment this if you don't want to use a network mirror.
-#d-i apt-setup/use_mirror boolean false
-# Select which update services to use; define the mirrors to be used.
-# Values shown below are the normal defaults.
-#d-i apt-setup/services-select multiselect security
-#d-i apt-setup/security_host string security.ubuntu.com
-#d-i apt-setup/security_path string /ubuntu
-
-# Additional repositories, local[0-9] available
-#d-i apt-setup/local0/repository string \
-#       http://local.server/ubuntu squeeze main
-#d-i apt-setup/local0/comment string local server
-# Enable deb-src lines
-#d-i apt-setup/local0/source boolean true
-# URL to the public key of the local repository; you must provide a key or
-# apt will complain about the unauthenticated repository and so the
-# sources.list line will be left commented out
-#d-i apt-setup/local0/key string http://local.server/key
-
-# By default the installer requires that repositories be authenticated
-# using a known gpg key. This setting can be used to disable that
-# authentication. Warning: Insecure, not recommended.
-#d-i debian-installer/allow_unauthenticated boolean true
-
-### Package selection
-#tasksel tasksel/first multiselect ubuntu-desktop
-#tasksel tasksel/first multiselect lamp-server, print-server
-#tasksel tasksel/first multiselect kubuntu-desktop
-tasksel tasksel/first multiselect openssh-server
-
-# Individual additional packages to install
-d-i pkgsel/include string cracklib-runtime curl wget ssh openssh-server tcpdump ethtool git sudo python-netaddr coreutils
-
-# Whether to upgrade packages after debootstrap.
-# Allowed values: none, safe-upgrade, full-upgrade
-d-i pkgsel/upgrade select safe-upgrade
-
-# Language pack selection
-#d-i pkgsel/language-packs multiselect de, en, zh
-
-# Policy for applying updates. May be "none" (no automatic updates),
-# "unattended-upgrades" (install security updates automatically), or
-# "landscape" (manage system with Landscape).
-d-i pkgsel/update-policy select unattended-upgrades
-
-# Some versions of the installer can report back on what software you have
-# installed, and what software you use. The default is not to report back,
-# but sending reports helps the project determine what software is most
-# popular and include it on CDs.
-#popularity-contest popularity-contest/participate boolean false
-
-# By default, the system's locate database will be updated after the
-# installer has finished installing most packages. This may take a while, so
-# if you don't want it, you can set this to "false" to turn it off.
-d-i pkgsel/updatedb boolean false
-
-### Boot loader installation
-# Grub is the default boot loader (for x86). If you want lilo installed
-# instead, uncomment this:
-#d-i grub-installer/skip boolean true
-# To also skip installing lilo, and install no bootloader, uncomment this
-# too:
-#d-i lilo-installer/skip boolean true
-
-# With a few exceptions for unusual partitioning setups, GRUB 2 is now the
-# default. If you need GRUB Legacy for some particular reason, then
-# uncomment this:
-d-i grub-installer/grub2_instead_of_grub_legacy boolean false
-
-# This is fairly safe to set, it makes grub install automatically to the MBR
-# if no other operating system is detected on the machine.
-d-i grub-installer/only_debian boolean true
-
-# This one makes grub-installer install to the MBR if it also finds some other
-# OS, which is less safe as it might not be able to boot that other OS.
-d-i grub-installer/with_other_os boolean true
-
-# Alternatively, if you want to install to a location other than the mbr,
-# uncomment and edit these lines:
-#d-i grub-installer/only_debian boolean false
-#d-i grub-installer/with_other_os boolean false
-#d-i grub-installer/bootdev  string (hd0,0)
-# To install grub to multiple disks:
-#d-i grub-installer/bootdev  string (hd0,0) (hd1,0) (hd2,0)
-
-# Optional password for grub, either in clear text
-#d-i grub-installer/password password r00tme
-#d-i grub-installer/password-again password r00tme
-# or encrypted using an MD5 hash, see grub-md5-crypt(8).
-#d-i grub-installer/password-crypted password [MD5 hash]
-
-# Use the following option to add additional boot parameters for the
-# installed system (if supported by the bootloader installer).
-# Note: options passed to the installer will be added automatically.
-#d-i debian-installer/add-kernel-opts string nousb
-
-### Finishing up the installation
-# During installations from serial console, the regular virtual consoles
-# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next
-# line to prevent this.
-d-i finish-install/keep-consoles boolean true
-
-# Avoid that last message about the install being complete.
-d-i finish-install/reboot_in_progress note
-
-# This will prevent the installer from ejecting the CD during the reboot,
-# which is useful in some situations.
-#d-i cdrom-detect/eject boolean false
-
-# This is how to make the installer shutdown when finished, but not
-# reboot into the installed system.
-#d-i debian-installer/exit/halt boolean true
-# This will power off the machine instead of just halting it.
-#d-i debian-installer/exit/poweroff boolean true
-
-### X configuration
-# X can detect the right driver for some cards, but if you're preseeding,
-# you override whatever it chooses. Still, vesa will work most places.
-#xserver-xorg xserver-xorg/config/device/driver select vesa
-
-# A caveat with mouse autodetection is that if it fails, X will retry it
-# over and over. So if it's preseeded to be done, there is a possibility of
-# an infinite loop if the mouse is not autodetected.
-#xserver-xorg xserver-xorg/autodetect_mouse boolean true
-
-# Monitor autodetection is recommended.
-xserver-xorg xserver-xorg/autodetect_monitor boolean true
-# Uncomment if you have an LCD display.
-#xserver-xorg xserver-xorg/config/monitor/lcd boolean true
-# X has three configuration paths for the monitor. Here's how to preseed
-# the "medium" path, which is always available. The "simple" path may not
-# be available, and the "advanced" path asks too many questions.
-xserver-xorg xserver-xorg/config/monitor/selection-method \
-       select medium
-xserver-xorg xserver-xorg/config/monitor/mode-list \
-       select 1024x768 @ 60 Hz
-
-### Preseeding other packages
-# Depending on what software you choose to install, or if things go wrong
-# during the installation process, it's possible that other questions may
-# be asked. You can preseed those too, of course. To get a list of every
-# possible question that could be asked during an install, do an
-# installation, and then run these commands:
-#   debconf-get-selections --installer > file
-#   debconf-get-selections >> file
-
-
-#### Advanced options
-### Running custom commands during the installation
-# d-i preseeding is inherently not secure. Nothing in the installer checks
-# for attempts at buffer overflows or other exploits of the values of a
-# preconfiguration file like this one. Only use preconfiguration files from
-# trusted locations! To drive that home, and because it's generally useful,
-# here's a way to run any shell command you'd like inside the installer,
-# automatically.
-
-# This first command is run as early as possible, just after
-# preseeding is read.
-#d-i preseed/early_command string anna-install some-udeb
-# This command is run immediately before the partitioner starts. It may be
-# useful to apply dynamic partitioner preseeding that depends on the state
-# of the disks (which may not be visible when preseed/early_command runs).
-#d-i partman/early_command \
-#       string debconf-set partman-auto/disk "$(list-devices disk | head -n1)"
-# This command is run just before the install finishes, but when there is
-# still a usable /target directory. You can chroot to /target and use it
-# directly, or use the apt-install and in-target commands to easily install
-# packages and run commands in the target system.
-d-i preseed/late_command string
diff --git a/tools/xen/functions b/tools/xen/functions
deleted file mode 100644
index bc0c515..0000000
--- a/tools/xen/functions
+++ /dev/null
@@ -1,341 +0,0 @@
-#!/bin/bash
-
-function die_with_error {
-    local err_msg
-
-    err_msg="$1"
-
-    echo "$err_msg" >&2
-    exit 1
-}
-
-function xapi_plugin_location {
-    for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins" "/usr/lib64/xapi/plugins"; do
-        if [ -d $PLUGIN_DIR ]; then
-            echo $PLUGIN_DIR
-            return 0
-        fi
-    done
-    return 1
-}
-
-function create_directory_for_kernels {
-    if [ -d "/boot/guest" ]; then
-        echo "INFO: /boot/guest directory already exists, using that" >&2
-    else
-        local local_path
-        local_path="$(get_local_sr_path)/os-guest-kernels"
-        mkdir -p $local_path
-        ln -s $local_path /boot/guest
-    fi
-}
-
-function create_directory_for_images {
-    if [ -d "/images" ]; then
-        echo "INFO: /images directory already exists, using that" >&2
-    else
-        local local_path
-        local_path="$(get_local_sr_path)/os-images"
-        mkdir -p $local_path
-        ln -s $local_path /images
-    fi
-}
-
-function get_local_sr {
-    xe pool-list params=default-SR minimal=true
-}
-
-function get_local_sr_path {
-    pbd_path="/var/run/sr-mount/$(get_local_sr)"
-    pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "`
-    if [ -n "$pbd_device_config_path" ]; then
-        pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true`
-        pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""`
-    fi
-    echo $pbd_path
-}
-
-function find_ip_by_name {
-    local guest_name="$1"
-    local interface="$2"
-
-    local period=10
-    local max_tries=10
-    local i=0
-
-    while true; do
-        if [ $i -ge $max_tries ]; then
-            echo "Timeout: ip address for interface $interface of $guest_name"
-            exit 11
-        fi
-
-        ipaddress=$(xe vm-list --minimal \
-                    name-label=$guest_name \
-                    params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p")
-
-        if [ -z "$ipaddress" ]; then
-            sleep $period
-            i=$((i+1))
-        else
-            echo $ipaddress
-            break
-        fi
-    done
-}
-
-function _vm_uuid {
-    local vm_name_label
-
-    vm_name_label="$1"
-
-    xe vm-list name-label="$vm_name_label" --minimal
-}
-
-function _create_new_network {
-    local name_label
-    name_label=$1
-
-    xe network-create name-label="$name_label"
-}
-
-function _multiple_networks_with_name {
-    local name_label
-    name_label=$1
-
-    # A comma indicates multiple matches
-    xe network-list name-label="$name_label" --minimal | grep -q ","
-}
-
-function _network_exists {
-    local name_label
-    name_label=$1
-
-    ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ]
-}
-
-function _bridge_exists {
-    local bridge
-    bridge=$1
-
-    ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ]
-}
-
-function _network_uuid {
-    local bridge_or_net_name
-    bridge_or_net_name=$1
-
-    if _bridge_exists "$bridge_or_net_name"; then
-        xe network-list bridge="$bridge_or_net_name" --minimal
-    else
-        xe network-list name-label="$bridge_or_net_name" --minimal
-    fi
-}
-
-function add_interface {
-    local vm_name_label
-    local bridge_or_network_name
-
-    vm_name_label="$1"
-    bridge_or_network_name="$2"
-    device_number="$3"
-
-    local vm
-    local net
-
-    vm=$(_vm_uuid "$vm_name_label")
-    net=$(_network_uuid "$bridge_or_network_name")
-    xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
-}
-
-function setup_network {
-    local bridge_or_net_name
-    bridge_or_net_name=$1
-
-    if ! _bridge_exists "$bridge_or_net_name"; then
-        if _network_exists "$bridge_or_net_name"; then
-            if _multiple_networks_with_name "$bridge_or_net_name"; then
-                cat >&2 << EOF
-ERROR: Multiple networks found matching name-label to "$bridge_or_net_name"
-please review your XenServer network configuration / localrc file.
-EOF
-                exit 1
-            fi
-        else
-            _create_new_network "$bridge_or_net_name"
-        fi
-    fi
-}
-
-function bridge_for {
-    local bridge_or_net_name
-    bridge_or_net_name=$1
-
-    if _bridge_exists "$bridge_or_net_name"; then
-        echo "$bridge_or_net_name"
-    else
-        xe network-list name-label="$bridge_or_net_name" params=bridge --minimal
-    fi
-}
-
-function xenapi_ip_on {
-    local bridge_or_net_name
-    bridge_or_net_name=$1
-
-    ip -4 addr show $(bridge_for "$bridge_or_net_name") |\
-    awk '/inet/{split($2, ip, "/"); print ip[1];}'
-}
-
-function xenapi_is_listening_on {
-    local bridge_or_net_name
-    bridge_or_net_name=$1
-
-    ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ]
-}
-
-function parameter_is_specified {
-    local parameter_name
-    parameter_name=$1
-
-    compgen -v | grep "$parameter_name"
-}
-
-function append_kernel_cmdline {
-    local vm_name_label
-    local kernel_args
-
-    vm_name_label="$1"
-    kernel_args="$2"
-
-    local vm
-    local pv_args
-
-    vm=$(_vm_uuid "$vm_name_label")
-    pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm)
-    xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
-}
-
-function destroy_all_vifs_of {
-    local vm_name_label
-
-    vm_name_label="$1"
-
-    local vm
-
-    vm=$(_vm_uuid "$vm_name_label")
-    IFS=,
-    for vif in $(xe vif-list vm-uuid=$vm --minimal); do
-        xe vif-destroy uuid="$vif"
-    done
-    unset IFS
-}
-
-function have_multiple_hosts {
-    xe host-list --minimal | grep -q ","
-}
-
-function attach_network {
-    local bridge_or_net_name
-
-    bridge_or_net_name="$1"
-
-    local net
-    local host
-
-    net=$(_network_uuid "$bridge_or_net_name")
-    host=$(xe host-list --minimal)
-
-    xe network-attach uuid=$net host-uuid=$host
-}
-
-function set_vm_memory {
-    local vm_name_label
-    local memory
-
-    vm_name_label="$1"
-    memory="$2"
-
-    local vm
-
-    vm=$(_vm_uuid "$vm_name_label")
-
-    xe vm-memory-limits-set \
-        static-min=${memory}MiB \
-        static-max=${memory}MiB \
-        dynamic-min=${memory}MiB \
-        dynamic-max=${memory}MiB \
-        uuid=$vm
-}
-
-function max_vcpus {
-    local vm_name_label
-
-    vm_name_label="$1"
-
-    local vm
-    local host
-    local cpu_count
-
-    host=$(xe host-list --minimal)
-    vm=$(_vm_uuid "$vm_name_label")
-
-    cpu_count=$(xe host-param-get \
-        param-name=cpu_info \
-        uuid=$host |
-        sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g')
-
-    if [ -z "$cpu_count" ]; then
-        # get dom0's vcpu count
-        cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l)
-    fi
-
-    # Assert cpu_count is not empty
-    [ -n "$cpu_count" ]
-
-    # Assert ithas a numeric nonzero value
-    expr "$cpu_count" + 0
-
-    # 8 VCPUs should be enough for devstack VM; avoid using too
-    # many VCPUs:
-    # 1. too many VCPUs may trigger a kernel bug which result VM
-    #    not able to boot:
-    #    https://kernel.googlesource.com/pub/scm/linux/kernel/git/wsa/linux/+/e2e004acc7cbe3c531e752a270a74e95cde3ea48
-    # 2. The remaining CPUs can be used for other purpose:
-    #    e.g. boot test VMs.
-    MAX_VCPUS=8
-    if [ $cpu_count -ge $MAX_VCPUS ]; then
-        cpu_count=$MAX_VCPUS
-    fi
-
-    xe vm-param-set uuid=$vm VCPUs-max=$cpu_count
-    xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
-}
-
-function get_domid {
-    local vm_name_label
-
-    vm_name_label="$1"
-
-    xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
-}
-
-function install_conntrack_tools {
-    local xs_host
-    local xs_ver_major
-    local centos_ver
-    local conntrack_conf
-    xs_host=$(xe host-list --minimal)
-    xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1)
-    if [ $xs_ver_major -gt 6 ]; then
-        # Only support conntrack-tools in Dom0 with XS7.0 and above
-        if [ ! -f /usr/sbin/conntrackd ]; then
-            sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo
-            centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'/' -f 1 | cut -d'-' -f 1)
-            yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools
-            # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode
-            mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back
-            conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats)
-            cp $conntrack_conf /etc/conntrackd/conntrackd.conf
-        fi
-        service conntrackd restart
-    fi
-}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
deleted file mode 100755
index f4ca71a..0000000
--- a/tools/xen/install_os_domU.sh
+++ /dev/null
@@ -1,418 +0,0 @@
-#!/bin/bash
-
-# This script must be run on a XenServer or XCP machine
-#
-# It creates a DomU VM that runs OpenStack services
-#
-# For more details see: README.md
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-export LC_ALL=C
-
-# This directory
-THIS_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $THIS_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $THIS_DIR/functions
-
-#
-# Get Settings
-#
-TOP_DIR=$(cd $THIS_DIR/../../ && pwd)
-source $TOP_DIR/inc/meta-config
-rm -f $TOP_DIR/.localrc.auto
-extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto
-
-# Source params - override xenrc params in your localrc to suit your taste
-source $THIS_DIR/xenrc
-
-xe_min()
-{
-    local cmd="$1"
-    shift
-    xe "$cmd" --minimal "$@"
-}
-
-#
-# Prepare Dom0
-# including installing XenAPI plugins
-#
-
-cd $THIS_DIR
-
-# Die if multiple hosts listed
-if have_multiple_hosts; then
-    cat >&2 << EOF
-ERROR: multiple hosts found. This might mean that the XenServer is a member
-of a pool - Exiting.
-EOF
-    exit 1
-fi
-
-#
-# Configure Networking
-#
-
-MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true`
-MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true`
-
-setup_network "$VM_BRIDGE_OR_NET_NAME"
-setup_network "$MGT_BRIDGE_OR_NET_NAME"
-setup_network "$PUB_BRIDGE_OR_NET_NAME"
-
-if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
-    if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then
-        cat >&2 << EOF
-ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network
-found on XenServer by searching for networks by that value as name-label or
-bridge name or the network found does not match the network specified by
-VM_BRIDGE_OR_NET_NAME. Please check your localrc file.
-EOF
-        exit 1
-    fi
-fi
-
-if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then
-    cat >&2 << EOF
-ERROR: XenAPI does not have an assigned IP address on the management network.
-please review your XenServer network configuration / localrc file.
-EOF
-    exit 1
-fi
-
-HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME")
-
-# Set up ip forwarding, but skip on xcp-xapi
-if [ -a /etc/sysconfig/network ]; then
-    if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then
-        # FIXME: This doesn't work on reboot!
-        echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network
-    fi
-fi
-# Also, enable ip forwarding in rc.local, since the above trick isn't working
-if ! grep -q  "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then
-    echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local
-fi
-# Enable ip forwarding at runtime as well
-echo 1 > /proc/sys/net/ipv4/ip_forward
-
-
-#
-# Shutdown previous runs
-#
-
-DO_SHUTDOWN=${DO_SHUTDOWN:-1}
-CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
-if [ "$DO_SHUTDOWN" = "1" ]; then
-    # Shutdown all domU's that created previously
-    clean_templates_arg=""
-    if $CLEAN_TEMPLATES; then
-        clean_templates_arg="--remove-templates"
-    fi
-    ./scripts/uninstall-os-vpx.sh $clean_templates_arg
-
-    # Destroy any instances that were launched
-    for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
-        echo "Shutting down nova instance $uuid"
-        xe vm-uninstall uuid=$uuid force=true
-    done
-
-    # Destroy orphaned vdis
-    for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do
-        xe vdi-destroy uuid=$uuid
-    done
-fi
-
-
-#
-# Create Ubuntu VM template
-# and/or create VM from template
-#
-
-GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
-TNAME="jeos_template_for_devstack"
-SNAME_TEMPLATE="jeos_snapshot_for_devstack"
-SNAME_FIRST_BOOT="before_first_boot"
-
-function wait_for_VM_to_halt {
-    set +x
-    echo "Waiting for the VM to halt.  Progress in-VM can be checked with XenCenter or xl console:"
-    mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
-    domid=$(get_domid "$GUEST_NAME")
-    echo "ssh root@$mgmt_ip \"xl console $domid\""
-    while true; do
-        state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
-        if [ -n "$state" ]; then
-            break
-        else
-            echo -n "."
-            sleep 20
-        fi
-    done
-    set -x
-}
-
-templateuuid=$(xe template-list name-label="$TNAME")
-if [ -z "$templateuuid" ]; then
-    #
-    # Install Ubuntu over network
-    #
-    UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"}
-
-    # always update the preseed file, incase we have a newer one
-    PRESEED_URL=${PRESEED_URL:-""}
-    if [ -z "$PRESEED_URL" ]; then
-        PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg"
-
-        HTTP_SERVER_LOCATION="/opt/xensource/www"
-        if [ ! -e $HTTP_SERVER_LOCATION ]; then
-            HTTP_SERVER_LOCATION="/var/www/html"
-            mkdir -p $HTTP_SERVER_LOCATION
-        fi
-
-        # Copy the tools DEB to the XS web server
-        XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb"
-        ISO_DIR="/opt/xensource/packages/iso"
-        if [ -e "$ISO_DIR" ]; then
-            TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1)
-            TMP_DIR=/tmp/temp.$RANDOM
-            mkdir -p $TMP_DIR
-            mount -o loop $TOOLS_ISO $TMP_DIR
-            # the target deb package maybe *amd64.deb or *all.deb,
-            # so use *amd64.deb by default. If it doesn't exist,
-            # then use *all.deb.
-            DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb)
-            cp $DEB_FILE $HTTP_SERVER_LOCATION
-            umount $TMP_DIR
-            rmdir $TMP_DIR
-            XS_TOOLS_URL=${HOST_IP}/$(basename $DEB_FILE)
-        fi
-
-        cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
-        cp -f $THIS_DIR/devstackubuntu_latecommand.sh $HTTP_SERVER_LOCATION/latecommand.sh
-
-        sed \
-            -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \
-            -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \
-            -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \
-            -e "s,\(d-i passwd/root-password password\).*,\1 $GUEST_PASSWORD,g" \
-            -e "s,\(d-i passwd/root-password-again password\).*,\1 $GUEST_PASSWORD,g" \
-            -e "s,\(d-i preseed/late_command string\).*,\1 in-target mkdir -p /tmp; in-target wget --no-proxy ${HOST_IP}/latecommand.sh -O /root/latecommand.sh; in-target bash /root/latecommand.sh,g" \
-            -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
-
-        sed \
-            -e "s,@XS_TOOLS_URL@,$XS_TOOLS_URL,g" \
-            -i "${HTTP_SERVER_LOCATION}/latecommand.sh"
-    fi
-
-    # Update the template
-    $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL
-
-    # create a new VM from the given template with eth0 attached to the given
-    # network
-    $THIS_DIR/scripts/install-os-vpx.sh \
-        -t "$UBUNTU_INST_TEMPLATE_NAME" \
-        -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \
-        -l "$GUEST_NAME"
-
-    set_vm_memory "$GUEST_NAME" "1024"
-
-    xe vm-start vm="$GUEST_NAME"
-
-    # wait for install to finish
-    wait_for_VM_to_halt
-
-    # set VM to restart after a reboot
-    vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME")
-    xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid"
-
-    # Make template from VM
-    snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE")
-    xe snapshot-clone uuid=$snuuid new-name-label="$TNAME"
-else
-    #
-    # Template already installed, create VM from template
-    #
-    vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
-fi
-
-if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then
-    echo "User requested to quit after JEOS installation"
-    exit 0
-fi
-
-#
-# Prepare VM for DevStack
-#
-xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
-
-# Install XenServer tools, and other such things
-$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME"
-
-# Set virtual machine parameters
-set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
-
-# Max out VCPU count for better performance
-max_vcpus "$GUEST_NAME"
-
-# Wipe out all network cards
-destroy_all_vifs_of "$GUEST_NAME"
-
-# Add only one interface to prepare the guest template
-add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0"
-
-# start the VM to run the prepare steps
-xe vm-start vm="$GUEST_NAME"
-
-# Wait for prep script to finish and shutdown system
-wait_for_VM_to_halt
-
-## Setup network cards
-# Wipe out all
-destroy_all_vifs_of "$GUEST_NAME"
-# Tenant network
-add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR"
-# Management network
-add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR"
-# Public network
-add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR"
-
-#
-# Inject DevStack inside VM disk
-#
-$THIS_DIR/build_xva.sh "$GUEST_NAME"
-
-FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}"
-append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
-
-# Add a separate xvdb, if it was requested
-if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then
-    vm=$(xe vm-list name-label="$GUEST_NAME" --minimal)
-
-    # Add a new disk
-    localsr=$(get_local_sr)
-    extra_vdi=$(xe vdi-create \
-        name-label=xvdb-added-by-devstack \
-        virtual-size="${XEN_XVDB_SIZE_GB}GiB" \
-        sr-uuid=$localsr type=user)
-    xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1
-fi
-
-# create a snapshot before the first boot
-# to allow a quick re-run with the same settings
-xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
-
-#
-# Run DevStack VM
-#
-xe vm-start vm="$GUEST_NAME"
-
-function ssh_no_check {
-    ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@"
-}
-
-# Get hold of the Management IP of OpenStack VM
-OS_VM_MANAGEMENT_ADDRESS=$MGT_IP
-if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then
-    OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
-fi
-
-# Get hold of the Service IP of OpenStack VM
-if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then
-    OS_VM_SERVICES_ADDRESS=$MGT_IP
-    if [ $MGT_IP == "dhcp" ]; then
-        OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
-    fi
-else
-    OS_VM_SERVICES_ADDRESS=$PUB_IP
-    if [ $PUB_IP == "dhcp" ]; then
-        OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR)
-    fi
-fi
-
-# Create an ssh-keypair, and set it up for dom0 user
-rm -f /root/dom0key /root/dom0key.pub
-ssh-keygen -f /root/dom0key -P "" -C "dom0"
-DOMID=$(get_domid "$GUEST_NAME")
-
-xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)"
-xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID
-
-function run_on_appliance {
-    ssh \
-        -i /root/dom0key \
-        -o UserKnownHostsFile=/dev/null \
-        -o StrictHostKeyChecking=no \
-        -o BatchMode=yes \
-        "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@"
-}
-
-# Wait until we can log in to the appliance
-while ! run_on_appliance true; do
-    sleep 1
-done
-
-# Remove authenticated_keys updater cronjob
-echo "" | run_on_appliance crontab -
-
-# Generate a passwordless ssh key for domzero user
-echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance
-
-# Authenticate that user to dom0
-run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
-
-# If we have copied our ssh credentials, use ssh to monitor while the installation runs
-WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
-COPYENV=${COPYENV:-1}
-if [ "$WAIT_TILL_LAUNCH" = "1" ]  && [ -e ~/.ssh/id_rsa.pub  ] && [ "$COPYENV" = "1" ]; then
-    set +x
-
-    echo "VM Launched - Waiting for run.sh"
-    while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e /opt/stack/run_sh.pid"; do
-        sleep 10
-    done
-    echo -n "devstack service is running, waiting for stack.sh to start logging..."
-
-    pid=`ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "cat /opt/stack/run_sh.pid"`
-    if [ -n "$SCREEN_LOGDIR" ]; then
-        while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e ${SCREEN_LOGDIR}/stack.log"; do
-            sleep 10
-        done
-
-        ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "tail --pid $pid -n +1 -f ${SCREEN_LOGDIR}/stack.log"
-    else
-        echo -n "SCREEN_LOGDIR not set; just waiting for process $pid to finish"
-        ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "wait $pid"
-    fi
-
-    set -x
-    # Fail if devstack did not succeed
-    ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /opt/stack/runsh.succeeded'
-
-    set +x
-    echo "################################################################################"
-    echo ""
-    echo "All Finished!"
-    echo "You can visit the OpenStack Dashboard"
-    echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
-else
-    set +x
-    echo "################################################################################"
-    echo ""
-    echo "All Finished!"
-    echo "Now, you can monitor the progress of the stack.sh installation by "
-    echo "looking at the console of your domU / checking the log files."
-    echo ""
-    echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
-    echo "and then do: 'sudo systemctl status devstack' to check if devstack is still running."
-    echo "Check that /opt/stack/runsh.succeeded exists"
-    echo ""
-    echo "When devstack completes, you can visit the OpenStack Dashboard"
-    echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
-fi
diff --git a/tools/xen/mocks b/tools/xen/mocks
deleted file mode 100644
index 3b9b05c..0000000
--- a/tools/xen/mocks
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-test ! -e "$LIST_OF_ACTIONS" && {
-    echo "Mocking is not set up properly."
-    echo "LIST_OF_ACTIONS should point to an existing file."
-    exit 1
-}
-
-test ! -e "$LIST_OF_DIRECTORIES" && {
-    echo "Mocking is not set up properly."
-    echo "LIST_OF_DIRECTORIES should point to an existing file."
-    exit 1
-}
-
-test ! -e "$XE_RESPONSE" && {
-    echo "Mocking is not set up properly."
-    echo "XE_RESPONSE should point to an existing file."
-    exit 1
-}
-
-test ! -e "$XE_CALLS" && {
-    echo "Mocking is not set up properly."
-    echo "XE_CALLS should point to an existing file."
-    exit 1
-}
-
-function mktemp {
-    if test "${1:-}" = "-d";
-    then
-        echo "tempdir"
-    else
-        echo "tempfile"
-    fi
-}
-
-function wget {
-    if [[ $@ =~ "failurl" ]]; then
-        return 1
-    fi
-    echo "wget $@" >> $LIST_OF_ACTIONS
-}
-
-function mkdir {
-    if test "${1:-}" = "-p";
-    then
-        echo "$2" >> $LIST_OF_DIRECTORIES
-    fi
-}
-
-function unzip {
-    echo "Random rubbish from unzip"
-    echo "unzip $@" >> $LIST_OF_ACTIONS
-}
-
-function rm {
-    echo "rm $@" >> $LIST_OF_ACTIONS
-}
-
-function ln {
-    echo "ln $@" >> $LIST_OF_ACTIONS
-}
-
-function [ {
-    if test "${1:-}" = "-d";
-    then
-        echo "[ $@" >> $LIST_OF_ACTIONS
-        for directory in $(cat $LIST_OF_DIRECTORIES)
-        do
-            if test "$directory" = "$2"
-            then
-                return 0
-            fi
-        done
-        return 1
-    fi
-    echo "Mock test does not implement the requested function: ${1:-}"
-    exit 1
-}
-
-function die_with_error {
-    echo "$1" >> $DEAD_MESSAGES
-}
-
-function xe {
-    cat $XE_RESPONSE
-    {
-    for i in $(seq "$#")
-    do
-        eval "echo \"\$$i\""
-    done
-    } >> $XE_CALLS
-}
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
deleted file mode 100755
index 6de1afc..0000000
--- a/tools/xen/prepare_guest.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-
-# This script is run on an Ubuntu VM.
-# This script is inserted into the VM by prepare_guest_template.sh
-# and is run when that VM boots.
-# It customizes a fresh Ubuntu install, so it is ready
-# to run stack.sh
-#
-# This includes installing the XenServer tools,
-# creating the user called "stack",
-# and shuts down the VM to signal the script has completed
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# Configurable nuggets
-GUEST_PASSWORD="$1"
-STACK_USER="$2"
-DOMZERO_USER="$3"
-
-
-function setup_domzero_user {
-    local username
-
-    username="$1"
-
-    local key_updater_script
-    local sudoers_file
-    key_updater_script="/home/$username/update_authorized_keys.sh"
-    sudoers_file="/etc/sudoers.d/allow_$username"
-
-    # Create user
-    adduser --disabled-password --quiet "$username" --gecos "$username"
-
-    # Give passwordless sudo
-    cat > $sudoers_file << EOF
-    $username ALL = NOPASSWD: ALL
-EOF
-    chmod 0440 $sudoers_file
-
-    # A script to populate this user's authenticated_keys from xenstore
-    cat > $key_updater_script << EOF
-#!/bin/bash
-set -eux
-
-DOMID=\$(sudo xenstore-read domid)
-sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username
-sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value
-cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys
-EOF
-
-    # Give the key updater to the user
-    chown $username:$username $key_updater_script
-    chmod 0700 $key_updater_script
-
-    # Setup the .ssh folder
-    mkdir -p /home/$username/.ssh
-    chown $username:$username /home/$username/.ssh
-    chmod 0700 /home/$username/.ssh
-    touch /home/$username/.ssh/authorized_keys
-    chown $username:$username /home/$username/.ssh/authorized_keys
-    chmod 0600 /home/$username/.ssh/authorized_keys
-
-    # Setup the key updater as a cron job
-    crontab -u $username - << EOF
-* * * * * $key_updater_script
-EOF
-
-}
-
-# Make a small cracklib dictionary, so that passwd still works, but we don't
-# have the big dictionary.
-mkdir -p /usr/share/cracklib
-echo a | cracklib-packer
-
-# Make /etc/shadow, and set the root password
-pwconv
-echo "root:$GUEST_PASSWORD" | chpasswd
-
-# Put the VPX into UTC.
-rm -f /etc/localtime
-
-# Add stack user
-groupadd libvirtd
-useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
-echo $STACK_USER:$GUEST_PASSWORD | chpasswd
-echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-setup_domzero_user "$DOMZERO_USER"
-
-# Add an udev rule, so that new block devices could be written by stack user
-cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF
-KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660"
-EOF
-
-# Give ownership of /opt/stack to stack user
-chown -R $STACK_USER /opt/stack
-
-function setup_vimrc {
-    if [ ! -e $1 ]; then
-        # Simple but usable vimrc
-        cat > $1 <<EOF
-se ts=4
-se expandtab
-se shiftwidth=4
-EOF
-    fi
-}
-
-# Setup simple .vimrcs
-setup_vimrc /root/.vimrc
-setup_vimrc /opt/stack/.vimrc
-
-# remove self from local.rc
-# so this script is not run again
-rm -rf /etc/rc.local
-
-# Restore rc.local file
-cp /etc/rc.local.preparebackup /etc/rc.local
-
-# shutdown to notify we are done
-shutdown -h now
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
deleted file mode 100755
index 6cdddda..0000000
--- a/tools/xen/prepare_guest_template.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-
-# This script is run by install_os_domU.sh
-#
-# Parameters:
-# - $GUEST_NAME - hostname for the DomU VM
-#
-# It modifies the ubuntu image created by install_os_domU.sh
-#
-# This script is responsible for cusomtizing the fresh ubuntu
-# image so on boot it runs the prepare_guest.sh script
-# that modifies the VM so it is ready to run stack.sh.
-# It does this by mounting the disk image of the VM.
-#
-# The resultant image is started by install_os_domU.sh,
-# and once the VM has shutdown, build_xva.sh is run
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# This directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $TOP_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $TOP_DIR/functions
-
-# Source params - override xenrc params in your localrc to suite your taste
-source xenrc
-
-#
-# Parameters
-#
-GUEST_NAME="$1"
-
-# Mount the VDI
-STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
-add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
-
-# Make sure we have a stage
-if [ ! -d $STAGING_DIR/etc ]; then
-    echo "Stage is not properly set up!"
-    exit 1
-fi
-
-# Copy prepare_guest.sh to VM
-mkdir -p $STAGING_DIR/opt/stack/
-cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh
-
-# backup rc.local
-cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup
-
-# run prepare_guest.sh on boot
-cat <<EOF >$STAGING_DIR/etc/rc.local
-#!/bin/sh -e
-bash /opt/stack/prepare_guest.sh \\
-    "$GUEST_PASSWORD" "$STACK_USER" "$DOMZERO_USER" \\
-    > /opt/stack/prepare_guest.log 2>&1
-EOF
-
-# Update ubuntu repositories
-cat > $STAGING_DIR/etc/apt/sources.list << EOF
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse
-
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse
-EOF
-
-rm -f $STAGING_DIR/etc/apt/apt.conf
-if [ -n "$UBUNTU_INST_HTTP_PROXY" ]; then
-    cat > $STAGING_DIR/etc/apt/apt.conf << EOF
-Acquire::http::Proxy "$UBUNTU_INST_HTTP_PROXY";
-EOF
-fi
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
deleted file mode 100755
index 66f7ef4..0000000
--- a/tools/xen/scripts/install-os-vpx.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-set -eux
-
-BRIDGE=
-NAME_LABEL=
-TEMPLATE_NAME=
-
-usage()
-{
-cat << EOF
-
-  Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE]
-
-  Install a VM from a template
-
-  OPTIONS:
-
-     -h           Shows this message.
-     -t template  VM template to use
-     -l name      Specifies the name label for the VM.
-     -n bridge    The bridge/network to use for eth0. Defaults to xenbr0
-EOF
-}
-
-get_params()
-{
-    while getopts "hbn:r:l:t:" OPTION; do
-        case $OPTION in
-            h) usage
-                exit 1
-                ;;
-            n)
-                BRIDGE=$OPTARG
-                ;;
-            l)
-                NAME_LABEL=$OPTARG
-                ;;
-            t)
-                TEMPLATE_NAME=$OPTARG
-                ;;
-            ?)
-                usage
-                exit
-                ;;
-        esac
-    done
-    if [[ -z $BRIDGE ]]; then
-        BRIDGE=xenbr0
-    fi
-
-    if [[ -z $TEMPLATE_NAME ]]; then
-        echo "Please specify a template name" >&2
-        exit 1
-    fi
-
-    if [[ -z $NAME_LABEL ]]; then
-        echo "Please specify a name-label for the new VM" >&2
-        exit 1
-    fi
-}
-
-
-xe_min()
-{
-    local cmd="$1"
-    shift
-    xe "$cmd" --minimal "$@"
-}
-
-
-find_network()
-{
-    result=$(xe_min network-list bridge="$1")
-    if [ "$result" = "" ]; then
-        result=$(xe_min network-list name-label="$1")
-    fi
-    echo "$result"
-}
-
-
-create_vif()
-{
-    local v="$1"
-    echo "Installing VM interface on [$BRIDGE]"
-    local out_network_uuid
-    out_network_uuid=$(find_network "$BRIDGE")
-    xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
-}
-
-
-
-# Make the VM auto-start on server boot.
-set_auto_start()
-{
-    local v="$1"
-    xe vm-param-set uuid="$v" other-config:auto_poweron=true
-}
-
-
-destroy_vifs()
-{
-    local v="$1"
-    IFS=,
-    for vif in $(xe_min vif-list vm-uuid="$v"); do
-        xe vif-destroy uuid="$vif"
-    done
-    unset IFS
-}
-
-
-get_params "$@"
-
-vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL")
-destroy_vifs "$vm_uuid"
-set_auto_start "$vm_uuid"
-create_vif "$vm_uuid"
-xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh
deleted file mode 100755
index 6ea3642..0000000
--- a/tools/xen/scripts/install_ubuntu_template.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-#
-# This creates an Ubuntu Server 32bit or 64bit template
-# on Xenserver 5.6.x, 6.0.x and 6.1.x
-# The template does a net install only
-#
-# Based on a script by: David Markey <david.markey@citrix.com>
-#
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# This directory
-BASE_DIR=$(cd $(dirname "$0") && pwd)
-
-# For default setings see xenrc
-source $BASE_DIR/../xenrc
-
-# Get the params
-preseed_url=$1
-
-# Delete template or skip template creation as required
-previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \
-    params=uuid --minimal)
-if [ -n "$previous_template" ]; then
-    if $CLEAN_TEMPLATES; then
-        xe template-param-clear param-name=other-config uuid=$previous_template
-        xe template-uninstall template-uuid=$previous_template force=true
-    else
-        echo "Template $UBUNTU_INST_TEMPLATE_NAME already present"
-        exit 0
-    fi
-fi
-
-# Get built-in template
-builtin_name="Debian Squeeze 6.0 (32-bit)"
-builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal)
-if [[ -z $builtin_uuid ]]; then
-    echo "Can't find the Debian Squeeze 32bit template on your XenServer."
-    exit 1
-fi
-
-# Clone built-in template to create new template
-new_uuid=$(xe vm-clone uuid=$builtin_uuid \
-    new-name-label="$UBUNTU_INST_TEMPLATE_NAME")
-disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024))
-
-# Some of these settings can be found in example preseed files
-# however these need to be answered before the netinstall
-# is ready to fetch the preseed file, and as such must be here
-# to get a fully automated install
-pvargs="quiet console=hvc0 partman/default_filesystem=ext3 \
-console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \
-keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \
-netcfg/choose_interface=eth0 \
-netcfg/get_hostname=os netcfg/get_domain=os auto \
-url=${preseed_url}"
-
-if [ "$UBUNTU_INST_IP" != "dhcp" ]; then
-    netcfgargs="netcfg/disable_autoconfig=true \
-netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \
-netcfg/get_ipaddress=${UBUNTU_INST_IP} \
-netcfg/get_netmask=${UBUNTU_INST_NETMASK} \
-netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \
-netcfg/confirm_static=true"
-    pvargs="${pvargs} ${netcfgargs}"
-fi
-
-xe template-param-set uuid=$new_uuid \
-    other-config:install-methods=http \
-    other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \
-    PV-args="$pvargs" \
-    other-config:debian-release="$UBUNTU_INST_RELEASE" \
-    other-config:default_template=true \
-    other-config:disks='<provision><disk device="0" size="'$disk_size'" sr="" bootable="true" type="system"/></provision>' \
-    other-config:install-arch="$UBUNTU_INST_ARCH"
-
-if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then
-    xe template-param-set uuid=$new_uuid \
-        other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY"
-fi
-
-echo "Ubuntu template installed uuid:$new_uuid"
diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi
deleted file mode 100755
index 909ce32..0000000
--- a/tools/xen/scripts/manage-vdi
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-action="$1"
-vm="$2"
-device="${3-0}"
-part="${4-}"
-
-function xe_min() {
-  local cmd="$1"
-  shift
-  xe "$cmd" --minimal "$@"
-}
-
-function run_udev_settle() {
-  which_udev=$(which udevsettle) || true
-  if [ -n "$which_udev" ]; then
-      udevsettle
-  else
-      udevadm settle
-  fi
-}
-
-vm_uuid=$(xe_min vm-list name-label="$vm")
-vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \
-                           userdevice="$device")
-
-dom0_uuid=$(xe_min vm-list is-control-domain=true)
-
-function get_mount_device() {
-  vbd_uuid=$1
-
-  dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
-  if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then
-    DEBIAN_FRONTEND=noninteractive \
-        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \
-        install kpartx &> /dev/null || true
-    mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p")
-    if [ -z "$mapping" ]; then
-       echo "Failed to find mapping"
-       exit -1
-    fi
-
-    local device="/dev/mapper/${mapping}"
-    for (( i = 0; i < 5; i++ )) ; do
-        if [ -b $device ] ; then
-            echo $device
-            return
-        fi
-        sleep 1
-    done
-    echo "ERROR: timed out waiting for dev-mapper"
-    exit 1
-  else
-    echo "/dev/$dev$part"
-  fi
-}
-
-function clean_dev_mappings() {
-  dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
-  if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then
-    kpartx -dv "/dev/$dev"
-  fi
-}
-
-function open_vdi() {
-  vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \
-                         device=autodetect)
-  mp=$(mktemp -d)
-  xe vbd-plug uuid="$vbd_uuid"
-
-  run_udev_settle
-
-  mount_device=$(get_mount_device "$vbd_uuid")
-  mount "$mount_device" "$mp"
-  echo "Your vdi is mounted at $mp"
-}
-
-function close_vdi() {
-  vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid")
-  mount_device=$(get_mount_device "$vbd_uuid")
-  run_udev_settle
-  umount "$mount_device"
-
-  clean_dev_mappings
-
-  xe vbd-unplug uuid=$vbd_uuid
-  xe vbd-destroy uuid=$vbd_uuid
-}
-
-if [ "$action" == "open" ]; then
-  open_vdi
-elif [ "$action" == "close" ]; then
-  close_vdi
-fi
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
deleted file mode 100755
index 2846dc4..0000000
--- a/tools/xen/scripts/on_exit.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o xtrace
-
-if [ -z "${on_exit_hooks:-}" ]; then
-    on_exit_hooks=()
-fi
-
-on_exit()
-{
-    for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do
-        eval "${on_exit_hooks[$i]}"
-    done
-}
-
-add_on_exit()
-{
-    local n=${#on_exit_hooks[*]}
-    on_exit_hooks[$n]="$*"
-    if [[ $n -eq 0 ]]; then
-        trap on_exit EXIT
-    fi
-}
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
deleted file mode 100755
index 96dad7e..0000000
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-set -ex
-
-# By default, don't remove the templates
-REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"}
-if [ "$1" = "--remove-templates" ]; then
-    REMOVE_TEMPLATES=true
-fi
-
-xe_min()
-{
-    local cmd="$1"
-    shift
-    xe "$cmd" --minimal "$@"
-}
-
-destroy_vdi()
-{
-    local vbd_uuid="$1"
-    local type
-    type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
-    local dev
-    dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
-    local vdi_uuid
-    vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
-
-    if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
-        xe vdi-destroy uuid=$vdi_uuid
-    fi
-}
-
-uninstall()
-{
-    local vm_uuid="$1"
-    local power_state
-    power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
-
-    if [ "$power_state" != "halted" ]; then
-        xe vm-shutdown vm=$vm_uuid force=true
-    fi
-
-    for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
-        destroy_vdi "$v"
-    done
-
-    xe vm-uninstall vm=$vm_uuid force=true >/dev/null
-}
-
-uninstall_template()
-{
-    local vm_uuid="$1"
-
-    for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
-        destroy_vdi "$v"
-    done
-
-    xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null
-}
-
-# remove the VMs and their disks
-for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
-    uninstall "$u"
-done
-
-# remove the templates
-if [ "$REMOVE_TEMPLATES" == "true" ]; then
-    for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
-        uninstall_template "$u"
-    done
-fi
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
deleted file mode 100755
index 324e6a1..0000000
--- a/tools/xen/test_functions.sh
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/bash
-
-# Tests for functions.
-#
-# The tests are sourcing the mocks file to mock out various functions. The
-# mocking-out always happens in a sub-shell, thus it does not have impact on
-# the functions defined here.
-
-# To run the tests, please run:
-#
-# ./test_functions.sh run_tests
-#
-# To only print out the discovered test functions, run:
-#
-# ./test_functions.sh
-
-. functions
-
-# Setup
-function before_each_test {
-    LIST_OF_DIRECTORIES=$(mktemp)
-    truncate -s 0 $LIST_OF_DIRECTORIES
-
-    LIST_OF_ACTIONS=$(mktemp)
-    truncate -s 0 $LIST_OF_ACTIONS
-
-    XE_RESPONSE=$(mktemp)
-    truncate -s 0 $XE_RESPONSE
-
-    XE_CALLS=$(mktemp)
-    truncate -s 0 $XE_CALLS
-
-    DEAD_MESSAGES=$(mktemp)
-    truncate -s 0 $DEAD_MESSAGES
-}
-
-# Teardown
-function after_each_test {
-    rm -f $LIST_OF_DIRECTORIES
-    rm -f $LIST_OF_ACTIONS
-    rm -f $XE_RESPONSE
-    rm -f $XE_CALLS
-}
-
-# Helpers
-function setup_xe_response {
-    echo "$1" > $XE_RESPONSE
-}
-
-function given_directory_exists {
-    echo "$1" >> $LIST_OF_DIRECTORIES
-}
-
-function assert_directory_exists {
-    grep "$1" $LIST_OF_DIRECTORIES
-}
-
-function assert_previous_command_failed {
-    [ "$?" != "0" ] || exit 1
-}
-
-function assert_xe_min {
-    grep -qe "^--minimal\$" $XE_CALLS
-}
-
-function assert_xe_param {
-    grep -qe "^$1\$" $XE_CALLS
-}
-
-function assert_died_with {
-    diff -u <(echo "$1") $DEAD_MESSAGES
-}
-
-function mock_out {
-    local FNNAME="$1"
-    local OUTPUT="$2"
-
-    . <(cat << EOF
-function $FNNAME {
-    echo "$OUTPUT"
-}
-EOF
-)
-}
-
-function assert_symlink {
-    grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS
-}
-
-# Tests
-function test_plugin_directory_on_xenserver {
-    given_directory_exists "/etc/xapi.d/plugins/"
-
-    PLUGDIR=$(. mocks && xapi_plugin_location)
-
-    [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ]
-}
-
-function test_plugin_directory_on_xcp {
-    given_directory_exists "/usr/lib/xcp/plugins/"
-
-    PLUGDIR=$(. mocks && xapi_plugin_location)
-
-    [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ]
-}
-
-function test_no_plugin_directory_found {
-    set +e
-
-    local IGNORE
-    IGNORE=$(. mocks && xapi_plugin_location)
-
-    assert_previous_command_failed
-
-    grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS
-    grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
-}
-
-function test_create_directory_for_kernels {
-    (
-        . mocks
-        mock_out get_local_sr_path /var/run/sr-mount/uuid1
-        create_directory_for_kernels
-    )
-
-    assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels"
-    assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels"
-}
-
-function test_create_directory_for_kernels_existing_dir {
-    (
-        . mocks
-        given_directory_exists "/boot/guest"
-        create_directory_for_kernels
-    )
-
-    diff -u $LIST_OF_ACTIONS - << EOF
-[ -d /boot/guest ]
-EOF
-}
-
-function test_create_directory_for_images {
-    (
-        . mocks
-        mock_out get_local_sr_path /var/run/sr-mount/uuid1
-        create_directory_for_images
-    )
-
-    assert_directory_exists "/var/run/sr-mount/uuid1/os-images"
-    assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images"
-}
-
-function test_create_directory_for_images_existing_dir {
-    (
-        . mocks
-        given_directory_exists "/images"
-        create_directory_for_images
-    )
-
-    diff -u $LIST_OF_ACTIONS - << EOF
-[ -d /images ]
-EOF
-}
-
-function test_get_local_sr {
-    setup_xe_response "uuid123"
-
-    local RESULT
-    RESULT=$(. mocks && get_local_sr)
-
-    [ "$RESULT" == "uuid123" ]
-
-    assert_xe_param "pool-list" params=default-SR minimal=true
-}
-
-function test_get_local_sr_path {
-    local RESULT
-    RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
-
-    [ "/var/run/sr-mount/uuid1" == "$RESULT" ]
-}
-
-# Test runner
-[ "$1" = "" ] && {
-    grep -e "^function *test_" $0 | cut -d" " -f2
-}
-
-[ "$1" = "run_tests" ] && {
-    for testname in $($0); do
-        echo "$testname"
-        before_each_test
-        (
-            set -eux
-            $testname
-        )
-        if [ "$?" != "0" ]; then
-            echo "FAIL"
-            exit 1
-        else
-            echo "PASS"
-        fi
-
-        after_each_test
-    done
-}
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
deleted file mode 100644
index 169e042..0000000
--- a/tools/xen/xenrc
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-
-#
-# XenServer specific defaults for the /tools/xen/ scripts
-# Similar to stackrc, you can override these in your localrc
-#
-
-# Name of this guest
-GUEST_NAME=${GUEST_NAME:-DevStackOSDomU}
-
-# Template cleanup
-CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
-
-# Size of image
-VDI_MB=${VDI_MB:-5000}
-
-# Devstack now contains many components.  4GB ram is not enough to prevent
-# swapping and memory fragmentation - the latter of which can cause failures
-# such as blkfront failing to plug a VBD and lead to random test fails.
-#
-# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs
-OSDOMU_MEM_MB=6144
-OSDOMU_VDI_GB=8
-
-# Network mapping. Specify bridge names or network names. Network names may
-# differ across localised versions of XenServer. If a given bridge/network
-# was not found, a new network will be created with the specified name.
-
-# Get the management network from the XS installation
-VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
-PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
-
-# VM Password
-GUEST_PASSWORD=${GUEST_PASSWORD:-secret}
-
-# Extracted variables for OpenStack VM network device numbers.
-# Make sure they form a continuous sequence starting from 0
-MGT_DEV_NR=0
-VM_DEV_NR=1
-PUB_DEV_NR=2
-
-# Host Interface, i.e. the interface on the nova vm you want to expose the
-# services on. Usually the device connected to the management network or the
-# one connected to the public network is used.
-HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"}
-
-#
-# Our nova host's network info
-#
-
-# Management network
-MGT_IP=${MGT_IP:-dhcp}
-MGT_NETMASK=${MGT_NETMASK:-ignored}
-
-# VM Network
-VM_IP=${VM_IP:-10.255.255.255}
-VM_NETMASK=${VM_NETMASK:-255.255.255.0}
-
-# Public network
-# Aligned with stack.sh - see FLOATING_RANGE
-PUB_IP=${PUB_IP:-172.24.4.10}
-PUB_NETMASK=${PUB_NETMASK:-255.255.255.0}
-
-# Ubuntu install settings
-UBUNTU_INST_RELEASE="xenial"
-UBUNTU_INST_TEMPLATE_NAME="Ubuntu 16.04 (64-bit) for DevStack"
-# For 12.04 use "precise" and update template name
-# However, for 12.04, you should be using
-# XenServer 6.1 and later or XCP 1.6 or later
-# 11.10 is only really supported with XenServer 6.0.2 and later
-UBUNTU_INST_ARCH="amd64"
-UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com"
-UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
-UBUNTU_INST_HTTP_PROXY=""
-UBUNTU_INST_LOCALE="en_US"
-UBUNTU_INST_KEYBOARD="us"
-# network configuration for ubuntu netinstall
-UBUNTU_INST_IP="dhcp"
-UBUNTU_INST_NAMESERVERS=""
-UBUNTU_INST_NETMASK=""
-UBUNTU_INST_GATEWAY=""
-
-# Create a separate xvdb. Tis could be used as a backing device for cinder
-# volumes. Specify
-#   XEN_XVDB_SIZE_GB=10
-#   VOLUME_BACKING_DEVICE=/dev/xvdb
-# in your localrc to avoid kernel lockups:
-#   https://bugs.launchpad.net/cinder/+bug/1023755
-#
-# Set the size to 0 to avoid creation of additional disk.
-XEN_XVDB_SIZE_GB=0
-
-STACK_USER=stack
-DOMZERO_USER=domzero
-
-RC_DIR="../.."
-
-restore_nounset=$(set +o | grep nounset)
-set +u
-
-## Note that the lines below are coming from stackrc to support
-## new-style config files
-source $RC_DIR/functions-common
-
-# allow local overrides of env variables, including repo config
-if [[ -f $RC_DIR/localrc ]]; then
-    # Old-style user-supplied config
-    source $RC_DIR/localrc
-elif [[ -f $RC_DIR/.localrc.auto ]]; then
-    # New-style user-supplied config extracted from local.conf
-    source $RC_DIR/.localrc.auto
-fi
-
-$restore_nounset
diff --git a/tox.ini b/tox.ini
index 46b15f4..ec764ab 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,19 +1,19 @@
 [tox]
-minversion = 1.6
+minversion = 3.18.0
 skipsdist = True
 envlist = bashate
 
 [testenv]
 usedevelop = False
-install_command = pip install {opts} {packages}
+basepython = python3
 
 [testenv:bashate]
 # if you want to test out some changes you have made to bashate
 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
 # modified bashate tree
 deps =
-   {env:BASHATE_INSTALL_PATH:bashate==0.5.1}
-whitelist_externals = bash
+   {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
+allowlist_externals = bash
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .?\* -prune \)    \
          -not \( -type d -name doc -prune \)     \
@@ -35,27 +35,22 @@
 
 [testenv:docs]
 deps =
-   Pygments
-   docutils
-   sphinx>=1.6.2
-   pbr>=2.0.0,!=2.1.0
-   openstackdocstheme>=1.11.0
-   nwdiag
-   blockdiag
-   sphinxcontrib-blockdiag
-   sphinxcontrib-nwdiag
-whitelist_externals = bash
+  -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+  -r{toxinidir}/doc/requirements.txt
+allowlist_externals = bash
 setenv =
   TOP_DIR={toxinidir}
 commands =
-  python setup.py build_sphinx
+  sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html
+
+[testenv:pdf-docs]
+deps = {[testenv:docs]deps}
+allowlist_externals =
+   make
+commands =
+   sphinx-build -W -b latex doc/source doc/build/pdf
+   make -C doc/build/pdf
 
 [testenv:venv]
-deps =
-   pbr>=2.0.0,!=2.1.0
-   sphinx>=1.6.2
-   openstackdocstheme>=1.11.0
-   blockdiag
-   sphinxcontrib-blockdiag
-   sphinxcontrib-nwdiag
+deps = -r{toxinidir}/doc/requirements.txt
 commands = {posargs}
diff --git a/unstack.sh b/unstack.sh
index 5d3672e..d9dca7c 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -45,6 +45,10 @@
 # Configure Projects
 # ==================
 
+# Determine what system we are running on.  This provides ``os_VENDOR``,
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` and ``DISTRO``
+GetDistro
+
 # Plugin Phase 0: override_defaults - allow plugins to override
 # defaults before other services are run
 run_phase override_defaults
@@ -83,10 +87,6 @@
 
 load_plugin_settings
 
-# Determine what system we are running on.  This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
-GetOSVersion
-
 set -o xtrace
 
 # Run extras
@@ -99,6 +99,7 @@
 
 if is_service_enabled nova; then
     stop_nova
+    cleanup_nova
 fi
 
 if is_service_enabled placement; then
@@ -181,3 +182,6 @@
     clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
     clean_lvm_filter
 fi
+
+clean_pyc_files
+rm -Rf $DEST/async