Merge "Fix Virtuozzo CI"
diff --git a/.gitignore b/.gitignore
index 8553b3f..e5e1f6a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,9 @@
 *~
 .*.sw?
 *.log
-*.log.[1-9]
+*-log
+*.log.*
+*-log.*
 *.pem
 *.pyc
 .localrc.auto
diff --git a/.zuul.yaml b/.zuul.yaml
index c8bb337..6682142 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -9,6 +9,16 @@
           - controller
 
 - nodeset:
+    name: openstack-single-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: devstack-single-node-centos-7
     nodes:
       - name: controller
@@ -19,10 +29,10 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-opensuse-423
+    name: devstack-single-node-opensuse-150
     nodes:
       - name: controller
-        label: opensuse-423
+        label: opensuse-150
     groups:
       - name: tempest
         nodes:
@@ -39,16 +49,6 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-fedora-27
-    nodes:
-      - name: controller
-        label: fedora-27
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
     name: devstack-single-node-fedora-latest
     nodes:
       - name: controller
@@ -103,10 +103,10 @@
       job.group-vars.peers, which is what is used by multi node jobs for subnode
       nodes (everything but the controller).
     required-projects:
-      - openstack-dev/devstack
+      - git.openstack.org/openstack-dev/devstack
     roles:
-      - zuul: openstack-infra/devstack-gate
-      - zuul: openstack-infra/openstack-zuul-jobs
+      - zuul: git.openstack.org/openstack-infra/devstack-gate
+      - zuul: git.openstack.org/openstack-infra/openstack-zuul-jobs
     vars:
       devstack_localrc:
         DATABASE_PASSWORD: secretdatabase
@@ -130,46 +130,46 @@
         # Ignore any default set by devstack. Emit a "disable_all_services".
         base: false
       zuul_copy_output:
-        '{{ devstack_conf_dir }}/local.conf': 'logs'
-        '{{ devstack_conf_dir }}/localrc': 'logs'
-        '{{ devstack_conf_dir }}/.localrc.auto': 'logs'
-        '{{ devstack_conf_dir }}/.stackenv': 'logs'
-        '{{ devstack_log_dir }}/dstat-csv.log': 'logs'
-        '{{ devstack_log_dir }}/devstacklog.txt': 'logs'
-        '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs'
-        '{{ devstack_full_log}}': 'logs'
-        '{{ stage_dir }}/verify_tempest_conf.log': 'logs'
-        '{{ stage_dir }}/apache': 'logs'
-        '{{ stage_dir }}/apache_config': 'logs'
-        '{{ stage_dir }}/etc': 'logs'
-        '/var/log/rabbitmq': 'logs'
-        '/var/log/postgresql': 'logs'
-        '/var/log/mysql.err': 'logs'
-        '/var/log/mysql.log': 'logs'
-        '/var/log/libvirt': 'logs'
-        '/etc/sudoers': 'logs'
-        '/etc/sudoers.d': 'logs'
-        '{{ stage_dir }}/iptables.txt': 'logs'
-        '{{ stage_dir }}/df.txt': 'logs'
-        '{{ stage_dir }}/pip2-freeze.txt': 'logs'
-        '{{ stage_dir }}/pip3-freeze.txt': 'logs'
-        '{{ stage_dir }}/dpkg-l.txt': 'logs'
-        '{{ stage_dir }}/rpm-qa.txt': 'logs'
-        '{{ stage_dir }}/core': 'logs'
-        '{{ stage_dir }}/listen53.txt': 'logs'
-        '{{ stage_dir }}/deprecations.log': 'logs'
-        '/var/log/ceph': 'logs'
-        '/var/log/openvswitch': 'logs'
-        '/var/log/glusterfs': 'logs'
-        '/etc/glusterfs/glusterd.vol': 'logs'
-        '/etc/resolv.conf': 'logs'
-        '/var/log/unbound.log': 'logs'
+        '{{ devstack_conf_dir }}/local.conf': logs
+        '{{ devstack_conf_dir }}/localrc': logs
+        '{{ devstack_conf_dir }}/.localrc.auto': logs
+        '{{ devstack_conf_dir }}/.stackenv': logs
+        '{{ devstack_log_dir }}/dstat-csv.log': logs
+        '{{ devstack_log_dir }}/devstacklog.txt': logs
+        '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
+        '{{ devstack_full_log}}': logs
+        '{{ stage_dir }}/verify_tempest_conf.log': logs
+        '{{ stage_dir }}/apache': logs
+        '{{ stage_dir }}/apache_config': logs
+        '{{ stage_dir }}/etc': logs
+        /var/log/rabbitmq: logs
+        /var/log/postgresql: logs
+        /var/log/mysql.err: logs
+        /var/log/mysql.log: logs
+        /var/log/libvirt: logs
+        /etc/sudoers: logs
+        /etc/sudoers.d: logs
+        '{{ stage_dir }}/iptables.txt': logs
+        '{{ stage_dir }}/df.txt': logs
+        '{{ stage_dir }}/pip2-freeze.txt': logs
+        '{{ stage_dir }}/pip3-freeze.txt': logs
+        '{{ stage_dir }}/dpkg-l.txt': logs
+        '{{ stage_dir }}/rpm-qa.txt': logs
+        '{{ stage_dir }}/core': logs
+        '{{ stage_dir }}/listen53.txt': logs
+        '{{ stage_dir }}/deprecations.log': logs
+        /var/log/ceph: logs
+        /var/log/openvswitch: logs
+        /var/log/glusterfs: logs
+        /etc/glusterfs/glusterd.vol: logs
+        /etc/resolv.conf: logs
+        /var/log/unbound.log: logs
       extensions_to_txt:
-        conf: True
-        log: True
-        localrc: True
-        stackenv: True
-        auto: True
+        conf: true
+        log: true
+        localrc: true
+        stackenv: true
+        auto: true
     group-vars:
       subnode:
         devstack_localrc:
@@ -211,13 +211,13 @@
       less than the normal minimum set of required-projects.
     nodeset: openstack-single-node
     required-projects:
-      - openstack/requirements
+      - git.openstack.org/openstack/requirements
     vars:
       devstack_localrc:
         # Multinode specific settings
         SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
         HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
-        PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+        PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
       devstack_services:
         # Shared services
         dstat: true
@@ -235,7 +235,7 @@
           # Multinode specific settings
           HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
           SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
-          PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+          PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
           # Subnode specific settings
           DATABASE_TYPE: mysql
           RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
@@ -270,12 +270,12 @@
       and extended.
     nodeset: openstack-single-node
     required-projects:
-      - openstack/cinder
-      - openstack/glance
-      - openstack/keystone
-      - openstack/neutron
-      - openstack/nova
-      - openstack/swift
+      - git.openstack.org/openstack/cinder
+      - git.openstack.org/openstack/glance
+      - git.openstack.org/openstack/keystone
+      - git.openstack.org/openstack/neutron
+      - git.openstack.org/openstack/nova
+      - git.openstack.org/openstack/swift
     timeout: 7200
     vars:
       devstack_localrc:
@@ -284,7 +284,7 @@
         SWIFT_START_ALL_SERVICES: false
         SWIFT_HASH: 1234123412341234
         CINDER_PERIODIC_INTERVAL: 10
-        DEBUG_LIBVIRT_COREDUMPS: True
+        DEBUG_LIBVIRT_COREDUMPS: true
         NOVA_VNC_ENABLED: true
         VNCSERVER_LISTEN: 0.0.0.0
         VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
@@ -292,7 +292,7 @@
         post-config:
           $NEUTRON_CONF:
             DEFAULT:
-              global_physnet_mtu: "{{ external_bridge_mtu }}"
+              global_physnet_mtu: '{{ external_bridge_mtu }}'
       devstack_services:
         # Core services enabled for this branch.
         # This list replaces the test-matrix.
@@ -386,6 +386,19 @@
           VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
 
 - job:
+    name: devstack-ipv6
+    parent: devstack
+    description: |
+      Devstack single node job for integration gate with IPv6.
+    vars:
+      devstack_localrc:
+        SERVICE_IP_VERSION: 6
+        SERVICE_HOST: ""
+        # IPv6 and certificates known issue with python2
+        # https://bugs.launchpad.net/devstack/+bug/1794929
+        USE_PYTHON3: true
+
+- job:
     name: devstack-multinode
     parent: devstack
     nodeset: openstack-two-node
@@ -405,10 +418,10 @@
     voting: false
 
 - job:
-    name: devstack-platform-opensuse-423
+    name: devstack-platform-opensuse-150
     parent: tempest-full
-    description: openSUSE 43.2 platform test
-    nodeset: devstack-single-node-opensuse-423
+    description: openSUSE 15.0 platform test
+    nodeset: devstack-single-node-opensuse-150
     voting: false
 
 - job:
@@ -489,19 +502,63 @@
     run: playbooks/unit-tests/run.yaml
 
 - project:
+    templates:
+      - integrated-gate
+      - integrated-gate-py35
+      - publish-openstack-docs-pti
     check:
       jobs:
         - devstack
+        - devstack-ipv6:
+            voting: false
         - devstack-platform-centos-7
-        - devstack-platform-opensuse-423
+        - devstack-platform-opensuse-150
         - devstack-platform-opensuse-tumbleweed
         - devstack-platform-fedora-latest
         - devstack-multinode
         - devstack-unit-tests
+        - openstack-tox-bashate
+        - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
+            voting: false
+        - swift-dsvm-functional:
+            voting: false
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-grenade:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-grenade-multinode:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-tempest-linuxbridge:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-multinode-full:
+            voting: false
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
     gate:
       jobs:
         - devstack
         - devstack-unit-tests
+        - openstack-tox-bashate
+        - neutron-grenade-multinode:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-tempest-linuxbridge:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-grenade:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
     # Please add a note on each job and conditions for the job not
     # being experimental any more, so we can keep this list somewhat
     # pruned.
@@ -511,6 +568,16 @@
     #    changes to devstack w/o gating on it for all devstack changes.
     # * nova-next: maintained by nova for unreleased/undefaulted
     #    things like cellsv2 and placement-api
+    # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
+    #    when neutron-api is served by uwsgi, it's in exprimental for testing.
+    #    the next cycle we can remove this  job if things turn out to be
+    #    stable enough.
+    # * neutron-functional-with-uwsgi: maintained by neutron for functional
+    #    test. Next cycle we can remove this one if things turn out to be
+    #    stable engouh with uwsgi.
+    # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
+    #    Next cycle we can remove this if everything run out stable enough.
+
     experimental:
       jobs:
         - nova-cells-v1:
@@ -518,3 +585,34 @@
               - ^.*\.rst$
               - ^doc/.*$
         - nova-next
+        - neutron-fullstack-with-uwsgi
+        - neutron-functional-with-uwsgi
+        - neutron-tempest-with-uwsgi
+        - devstack-plugin-ceph-tempest:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - devstack-plugin-ceph-tempest-py3:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-tempest-dvr:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - legacy-tempest-dsvm-neutron-dvr-multinode-full:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - neutron-tempest-dvr-ha-multinode-full:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - legacy-tempest-dsvm-lvm-multibackend:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-pg-full:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
diff --git a/HACKING.rst b/HACKING.rst
index d5d6fbc..3853eed 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -47,12 +47,7 @@
 level.
 
 ``doc`` - Contains the Sphinx source for the documentation.
-``tools/build_docs.sh`` is used to generate the HTML versions of the
-DevStack scripts.  A complete doc build can be run with ``tox -edocs``.
-
-``exercises`` - Contains the test scripts used to sanity-check and
-demonstrate some OpenStack functions. These scripts know how to exit
-early or skip services that are not enabled.
+A complete doc build can be run with ``tox -edocs``.
 
 ``extras.d`` - Contains the dispatch scripts called by the hooks in
 ``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins
@@ -183,88 +178,6 @@
 OpenStack project standard.
 
 
-Exercises
----------
-
-The scripts in the exercises directory are meant to 1) perform basic operational
-checks on certain aspects of OpenStack; and b) document the use of the
-OpenStack command-line clients.
-
-In addition to the guidelines above, exercise scripts MUST follow the structure
-outlined here.  ``swift.sh`` is perhaps the clearest example of these guidelines.
-These scripts are executed serially by ``exercise.sh`` in testing situations.
-
-* Begin and end with a banner that stands out in a sea of script logs to aid
-  in debugging failures, particularly in automated testing situations.  If the
-  end banner is not displayed, the script ended prematurely and can be assumed
-  to have failed.
-
-  ::
-
-    echo "**************************************************"
-    echo "Begin DevStack Exercise: $0"
-    echo "**************************************************"
-    ...
-    set +o xtrace
-    echo "**************************************************"
-    echo "End DevStack Exercise: $0"
-    echo "**************************************************"
-
-* The scripts will generally have the shell ``xtrace`` attribute set to display
-  the actual commands being executed, and the ``errexit`` attribute set to exit
-  the script on non-zero exit codes::
-
-    # This script exits on an error so that errors don't compound and you see
-    # only the first error that occurred.
-    set -o errexit
-
-    # Print the commands being run so that we can see the command that triggers
-    # an error.  It is also useful for following as the install occurs.
-    set -o xtrace
-
-* Settings and configuration are stored in ``exerciserc``, which must be
-  sourced after ``openrc`` or ``stackrc``::
-
-    # Import exercise configuration
-    source $TOP_DIR/exerciserc
-
-* There are a couple of helper functions in the common ``functions`` sub-script
-  that will check for non-zero exit codes and unset environment variables and
-  print a message and exit the script.  These should be called after most client
-  commands that are not otherwise checked to short-circuit long timeouts
-  (instance boot failure, for example)::
-
-    swift post $CONTAINER
-    die_if_error "Failure creating container $CONTAINER"
-
-    FLOATING_IP=`euca-allocate-address | cut -f2`
-    die_if_not_set FLOATING_IP "Failure allocating floating IP"
-
-* If you want an exercise to be skipped when for example a service wasn't
-  enabled for the exercise to be run, you can exit your exercise with the
-  special exitcode 55 and it will be detected as skipped.
-
-* The exercise scripts should only use the various OpenStack client binaries to
-  interact with OpenStack.  This specifically excludes any ``*-manage`` tools
-  as those assume direct access to configuration and databases, as well as direct
-  database access from the exercise itself.
-
-* If specific configuration needs to be present for the exercise to complete,
-  it should be staged in ``stack.sh``, or called from ``stack.sh``.
-
-* The ``OS_*`` environment variables should be the only ones used for all
-  authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
-
-.. _CLIAuth: https://wiki.openstack.org/CLIAuth
-
-* The exercise MUST clean up after itself if successful.  If it is not successful,
-  it is assumed that state will be left behind; this allows a chance for developers
-  to look around and attempt to debug the problem.  The exercise SHOULD clean up
-  or graciously handle possible artifacts left over from previous runs if executed
-  again.  It is acceptable to require a reboot or even a re-install of DevStack
-  to restore a clean test environment.
-
-
 Bash Style Guidelines
 ~~~~~~~~~~~~~~~~~~~~~
 DevStack defines a bash set of best practices for maintaining large
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 7efe4d6..022e6ba 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -446,6 +446,16 @@
 
      ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
 
+Use python3
+------------
+
+By default ``stack.sh`` uses python2 (the exact version set by the
+``PYTHON2_VERSION``). This can be overriden so devstack will run
+python3 (the exact version set by ``PYTHON3_VERSION``).
+
+  ::
+
+     USE_PYTHON3=True
 
 A clean install every time
 --------------------------
@@ -665,8 +675,7 @@
     enable_service n-cell
 
 Be aware that there are some features currently missing in cells, one
-notable one being security groups.  The exercises have been patched to
-disable functionality not supported by cells.
+notable one being security groups.
 
 Cinder
 ~~~~~~
@@ -729,44 +738,6 @@
 
     ENABLE_IDENTITY_V2=False
 
-Exercises
-~~~~~~~~~
-
-``exerciserc`` is used to configure settings for the exercise scripts.
-The values shown below are the default values. These can all be
-overridden by setting them in the ``localrc`` section.
-
-* Max time to wait while vm goes from build to active state
-
-    ::
-
-        ACTIVE_TIMEOUT==30
-
-* Max time to wait for proper IP association and dis-association.
-
-    ::
-
-        ASSOCIATE_TIMEOUT=15
-
-* Max time till the vm is bootable
-
-    ::
-
-        BOOT_TIMEOUT=30
-
-* Max time from run instance command until it is running
-
-    ::
-
-        RUNNING_TIMEOUT=$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))
-
-* Max time to wait for a vm to terminate
-
-    ::
-
-        TERMINATE_TIMEOUT=30
-
-
 .. _arch-configuration:
 
 Architectures
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 7f360c6..12c6d69 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -376,8 +376,8 @@
 
         ## Neutron options
         Q_USE_SECGROUP=True
-        ENABLE_PROJECT_VLANS=True
-        PROJECT_VLAN_RANGE=3001:4000
+        ENABLE_TENANT_VLANS=True
+        TENANT_VLAN_RANGE=3001:4000
         PHYSICAL_NETWORK=default
         OVS_PHYSICAL_BRIDGE=br-ex
 
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 48a4fa8..515ea9a 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -127,7 +127,3 @@
 http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if
 you give them floating IPs and security group access those VMs will be
 accessible from other machines on your network.
-
-Some examples of using the OpenStack command-line clients ``nova`` and
-``glance`` are in the shakedown scripts in ``devstack/exercises``.
-``exercise.sh`` will run all of those scripts and report on the results.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 2ff4ff0..fcf1e82 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -39,8 +39,9 @@
 -------------
 
 Start with a clean and minimal install of a Linux system. Devstack
-attempts to support Ubuntu 16.04/17.04, Fedora 24/25, CentOS/RHEL 7,
-as well as Debian and OpenSUSE.
+attempts to support the two latest LTS releases of Ubuntu, the
+latest/current Fedora version, CentOS/RHEL 7, as well as Debian and
+OpenSUSE.
 
 If you do not have a preference, Ubuntu 16.04 is the most tested, and
 will probably go the smoothest.
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 814a2b1..2479cd0 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -75,11 +75,3 @@
 
 -  single node
 -  multi-node configurations as are tested by the gate
-
-Exercises
----------
-
-The DevStack exercise scripts are no longer used as integration and gate
-testing as that job has transitioned to Tempest. They are still
-maintained as a demonstrations of using OpenStack from the command line
-and for quick operational testing.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 9b2cb7e..b02061e 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -27,7 +27,6 @@
 almanach                               `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
 aodh                                   `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
 apmec                                  `git://git.openstack.org/openstack/apmec <https://git.openstack.org/cgit/openstack/apmec>`__
-astara                                 `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
 barbican                               `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
 bilean                                 `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
 blazar                                 `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
@@ -60,11 +59,12 @@
 ec2-api                                `git://git.openstack.org/openstack/ec2-api <https://git.openstack.org/cgit/openstack/ec2-api>`__
 freezer                                `git://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
 freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
+freezer-tempest-plugin                 `git://git.openstack.org/openstack/freezer-tempest-plugin <https://git.openstack.org/cgit/openstack/freezer-tempest-plugin>`__
 freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
-fuxi                                   `git://git.openstack.org/openstack/fuxi <https://git.openstack.org/cgit/openstack/fuxi>`__
 gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
 glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
 group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
+gyan                                   `git://git.openstack.org/openstack/gyan <https://git.openstack.org/cgit/openstack/gyan>`__
 heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
 heat-dashboard                         `git://git.openstack.org/openstack/heat-dashboard <https://git.openstack.org/cgit/openstack/heat-dashboard>`__
 horizon-mellanox                       `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
@@ -99,6 +99,7 @@
 monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
 murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
+networking-ansible                     `git://git.openstack.org/openstack/networking-ansible <https://git.openstack.org/cgit/openstack/networking-ansible>`__
 networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
 networking-baremetal                   `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
@@ -156,6 +157,7 @@
 patrole                                `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
 picasso                                `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
 qinling                                `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
+qinling-dashboard                      `git://git.openstack.org/openstack/qinling-dashboard <https://git.openstack.org/cgit/openstack/qinling-dashboard>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 rally-openstack                        `git://git.openstack.org/openstack/rally-openstack <https://git.openstack.org/cgit/openstack/rally-openstack>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
@@ -168,6 +170,9 @@
 solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
 stackube                               `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
 storlets                               `git://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
+stx-config                             `git://git.openstack.org/openstack/stx-config <https://git.openstack.org/cgit/openstack/stx-config>`__
+stx-fault                              `git://git.openstack.org/openstack/stx-fault <https://git.openstack.org/cgit/openstack/stx-fault>`__
+stx-update                             `git://git.openstack.org/openstack/stx-update <https://git.openstack.org/cgit/openstack/stx-update>`__
 tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
 tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
 tap-as-a-service-dashboard             `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
diff --git a/exercise.sh b/exercise.sh
deleted file mode 100755
index 9067033..0000000
--- a/exercise.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# **exercise.sh**
-
-# Keep track of the current DevStack directory.
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-# Run everything in the exercises/ directory that isn't explicitly disabled
-
-# comma separated list of script basenames to skip
-# to refrain from exercising foo.sh use ``SKIP_EXERCISES=foo``
-SKIP_EXERCISES=${SKIP_EXERCISES:-""}
-
-# comma separated list of script basenames to run
-# to run only foo.sh use ``RUN_EXERCISES=foo``
-basenames=${RUN_EXERCISES:-""}
-
-EXERCISE_DIR=$TOP_DIR/exercises
-
-if [[ -z "${basenames}" ]]; then
-    # Locate the scripts we should run
-    basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
-else
-    # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``.
-    SKIP_EXERCISES=
-fi
-
-# Track the state of each script
-passes=""
-failures=""
-skips=""
-
-# Loop over each possible script (by basename)
-for script in $basenames; do
-    if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then
-        skips="$skips $script"
-    else
-        echo "====================================================================="
-        echo Running $script
-        echo "====================================================================="
-        $EXERCISE_DIR/$script.sh
-        exitcode=$?
-        if [[ $exitcode == 55 ]]; then
-            skips="$skips $script"
-        elif [[ $exitcode -ne 0 ]]; then
-            failures="$failures $script"
-        else
-            passes="$passes $script"
-        fi
-    fi
-done
-
-# Output status of exercise run
-echo "====================================================================="
-for script in $skips; do
-    echo SKIP $script
-done
-for script in $passes; do
-    echo PASS $script
-done
-for script in $failures; do
-    echo FAILED $script
-done
-echo "====================================================================="
-
-if [[ -n "$failures" ]]; then
-    exit 1
-fi
diff --git a/exerciserc b/exerciserc
deleted file mode 100644
index 978e0b3..0000000
--- a/exerciserc
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-# source exerciserc
-#
-# Configure the DevStack exercise scripts
-# For best results, source this _after_ stackrc/localrc as it will set
-# values only if they are not already set.
-
-# Max time to wait while vm goes from build to active state
-export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time to wait for proper IP association and dis-association.
-export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
-
-# Max time till the vm is bootable
-export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
-
-# Max time from run instance command until it is running
-export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
-
-# Max time to wait for a vm to terminate
-export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
-
-# The size of the volume we want to boot from; some storage back-ends
-# do not allow a disk resize, so it's important that this can be tuned
-export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1}
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
deleted file mode 100755
index 8cbca54..0000000
--- a/exercises/aggregates.sh
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env bash
-
-# **aggregates.sh**
-
-# This script demonstrates how to use host aggregates:
-#
-# *  Create an Aggregate
-# *  Updating Aggregate details
-# *  Testing Aggregate metadata
-# *  Testing Aggregate delete
-# *  Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
-# *  Testing add/remove hosts (with one host)
-
-echo "**************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "**************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Test as the admin user
-# note this imports stackrc/functions, etc
-. $TOP_DIR/openrc admin admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Cells does not support aggregates.
-is_service_enabled n-cell && exit 55
-
-# Create an aggregate
-# ===================
-
-AGGREGATE_NAME=test_aggregate_$RANDOM
-AGGREGATE2_NAME=test_aggregate_$RANDOM
-AGGREGATE_A_ZONE=nova
-
-function exit_if_aggregate_present {
-    aggregate_name=$1
-
-    if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
-        echo "SUCCESS $aggregate_name not present"
-    else
-        die $LINENO "found aggregate: $aggregate_name"
-        exit -1
-    fi
-}
-
-exit_if_aggregate_present $AGGREGATE_NAME
-
-AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
-
-AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
-
-# check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
-
-
-# Ensure creating a duplicate fails
-# =================================
-
-if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
-    die $LINENO "could create duplicate aggregate"
-fi
-
-
-# Test aggregate-update (and aggregate-details)
-# =============================================
-AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-
-# Test aggregate-set-metadata
-# ===========================
-META_DATA_1_KEY=asdf
-META_DATA_2_KEY=foo
-META_DATA_3_KEY=bar
-
-#ensure no additional metadata is set
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep 123
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
-
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-
-# Test aggregate-add/remove-host
-# ==============================
-if [ "$VIRT_DRIVER" == "xenserver" ]; then
-    echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
-fi
-FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1)
-# Make sure can add two aggregates to same host
-nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
-nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
-if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
-    die $LINENO "could add duplicate host to single aggregate"
-fi
-nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
-nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
-
-# Test aggregate-delete
-# =====================
-nova aggregate-delete $AGGREGATE_ID
-nova aggregate-delete $AGGREGATE2_ID
-exit_if_aggregate_present $AGGREGATE_NAME
-
-set +o xtrace
-echo "**************************************************"
-echo "End DevStack Exercise: $0"
-echo "**************************************************"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
deleted file mode 100755
index 7478bdf..0000000
--- a/exercises/boot_from_volume.sh
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env bash
-
-# **boot_from_volume.sh**
-
-# This script demonstrates how to boot from a volume.  It does the following:
-#
-# *  Create a bootable volume
-# *  Boot a volume-backed instance
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled cinder || exit 55
-
-# Ironic does not support boot from volume.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-boot_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-bfv-inst}
-VOL_NAME=${VOL_NAME:-ex-vol-bfv}
-
-
-# Launching a server
-# ==================
-
-# List servers for project:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
-    # Cells does not support security groups, so force the use of "default"
-    SECGROUP="default"
-    echo "Using the default security group because of Cells."
-else
-    # Create a secgroup
-    if ! nova secgroup-list | grep -q $SECGROUP; then
-        nova secgroup-create $SECGROUP "$SECGROUP description"
-        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-            echo "Security group not created"
-            exit 1
-        fi
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
-    exit 1
-fi
-
-# Setup Keypair
-KEY_NAME=test_key
-KEY_FILE=key.pem
-nova keypair-delete $KEY_NAME || true
-nova keypair-add $KEY_NAME > $KEY_FILE
-chmod 600 $KEY_FILE
-
-# Set up volume
-# -------------
-
-# Delete any old volume
-cinder delete $VOL_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
-fi
-
-# Create the bootable volume
-start_time=$(date +%s)
-cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not created"
-    exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME  | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Boot instance
-# -------------
-
-# Boot using the --block-device-mapping param. The format of mapping is:
-# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
-# Leaving the middle two fields blank appears to do-the-right-thing
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Clean up
-# --------
-
-# Delete volume backed instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
-fi
-
-# Wait for volume to be released
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not released"
-    exit 1
-fi
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-if [[ $SECGROUP = "default" ]] ; then
-    echo "Skipping deleting default security group"
-else
-    # Delete secgroup
-    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
deleted file mode 100755
index b380968..0000000
--- a/exercises/client-args.sh
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-args.sh**
-
-# Test OpenStack client authentication arguments handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-# Save the known variables for later
-export x_PROJECT_NAME=$OS_PROJECT_NAME
-export x_USERNAME=$OS_USERNAME
-export x_PASSWORD=$OS_PASSWORD
-export x_AUTH_URL=$OS_AUTH_URL
-
-# Unset the usual variables to force argument processing
-unset OS_PROJECT_NAME
-unset OS_USERNAME
-unset OS_PASSWORD
-unset OS_AUTH_URL
-
-# Common authentication args
-PROJECT_ARG="--os-project-name=$x_PROJECT_NAME"
-ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
-        STATUS_KEYSTONE="Skipped"
-    else
-        echo -e "\nTest Keystone"
-        if openstack $PROJECT_ARG $ARGS catalog show identity; then
-            STATUS_KEYSTONE="Succeeded"
-        else
-            STATUS_KEYSTONE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
-        STATUS_NOVA="Skipped"
-    else
-        # Test OSAPI
-        echo -e "\nTest Nova"
-        if nova $PROJECT_ARG $ARGS flavor-list; then
-            STATUS_NOVA="Succeeded"
-        else
-            STATUS_NOVA="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
-        STATUS_CINDER="Skipped"
-    else
-        echo -e "\nTest Cinder"
-        if cinder $PROJECT_ARG $ARGS list; then
-            STATUS_CINDER="Succeeded"
-        else
-            STATUS_CINDER="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
-        STATUS_GLANCE="Skipped"
-    else
-        echo -e "\nTest Glance"
-        if openstack $PROJECT_ARG $ARGS image list; then
-            STATUS_GLANCE="Succeeded"
-        else
-            STATUS_GLANCE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Swift client
-# ------------
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
-        STATUS_SWIFT="Skipped"
-    else
-        echo -e "\nTest Swift"
-        if swift $PROJECT_ARG $ARGS stat; then
-            STATUS_SWIFT="Succeeded"
-        else
-            STATUS_SWIFT="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
-    if [[ -n "$2" ]]; then
-        echo "$1: $2"
-    fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
-    echo "*********************************************************************"
-    echo "SUCCESS: End DevStack Exercise: $0"
-    echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
deleted file mode 100755
index fff04df..0000000
--- a/exercises/client-env.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-env.sh**
-
-# Test OpenStack client environment variable handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
-    is_set $i
-    if [[ $? -ne 0 ]]; then
-        echo "$i expected to be set"
-        ABORT=1
-    fi
-done
-if [[ -n "$ABORT" ]]; then
-    exit 1
-fi
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
-        STATUS_KEYSTONE="Skipped"
-    else
-        echo -e "\nTest Keystone"
-        if openstack endpoint show identity; then
-            STATUS_KEYSTONE="Succeeded"
-        else
-            STATUS_KEYSTONE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
-        STATUS_NOVA="Skipped"
-    else
-        # Test OSAPI
-        echo -e "\nTest Nova"
-        if nova flavor-list; then
-            STATUS_NOVA="Succeeded"
-        else
-            STATUS_NOVA="Failed"
-            RETURN=1
-        fi
-
-    fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
-        STATUS_CINDER="Skipped"
-    else
-        echo -e "\nTest Cinder"
-        if cinder list; then
-            STATUS_CINDER="Succeeded"
-        else
-            STATUS_CINDER="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
-        STATUS_GLANCE="Skipped"
-    else
-        echo -e "\nTest Glance"
-        if openstack image list; then
-            STATUS_GLANCE="Succeeded"
-        else
-            STATUS_GLANCE="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-# Swift client
-# ------------
-
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
-        STATUS_SWIFT="Skipped"
-    else
-        echo -e "\nTest Swift"
-        if swift stat; then
-            STATUS_SWIFT="Succeeded"
-        else
-            STATUS_SWIFT="Failed"
-            RETURN=1
-        fi
-    fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
-    if [[ -n "$2" ]]; then
-        echo "$1: $2"
-    fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
-    echo "*********************************************************************"
-    echo "SUCCESS: End DevStack Exercise: $0"
-    echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
deleted file mode 100755
index 5abc713..0000000
--- a/exercises/floating_ips.sh
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env bash
-
-# **floating_ips.sh** - using the cloud can be fun
-
-# Test instance connectivity with the ``nova`` command from ``python-novaclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-test_secgroup}
-
-# Default floating IP pool name
-DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public}
-
-# Additional floating IP pool and range
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-
-# Instance name
-VM_NAME="ex-float"
-
-# Cells does not support floating ips API calls
-is_service_enabled n-cell && exit 55
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
-    nova secgroup-create $SECGROUP "$SECGROUP description"
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        die $LINENO "Security group not created"
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-    die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    die $LINENO "server didn't terminate!"
-    exit 1
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Floating IPs
-# ------------
-
-# Allocate a floating IP from the default pool
-FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
-
-# List floating addresses
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
-    die $LINENO "Floating IP not allocated"
-fi
-
-# Add floating IP to our server
-nova add-floating-ip $VM_UUID $FLOATING_IP || \
-    die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
-
-# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
-ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME"
-
-if ! is_service_enabled neutron; then
-    # Allocate an IP from second floating pool
-    TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
-    die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
-
-    # list floating addresses
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
-        die $LINENO "Floating IP not allocated"
-    fi
-fi
-
-# Dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
-    die $LINENO "Failure deleting security group rule from $SECGROUP"
-
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
-    die $LINENO "Security group rule not deleted from $SECGROUP"
-fi
-
-# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
-    # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
-    ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail
-fi
-
-# Clean up
-# --------
-
-if ! is_service_enabled neutron; then
-    # Delete second floating IP
-    nova floating-ip-delete $TEST_FLOATING_IP || \
-        die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
-fi
-
-# Delete the floating ip
-nova floating-ip-delete $FLOATING_IP || \
-    die $LINENO "Failure deleting floating IP $FLOATING_IP"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-# Wait for termination
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    die $LINENO "Server $VM_NAME not deleted"
-fi
-
-# Delete secgroup
-nova secgroup-delete $SECGROUP || \
-    die $LINENO "Failure deleting security group $SECGROUP"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
deleted file mode 100755
index e8c8f62..0000000
--- a/exercises/neutron-adv-test.sh
+++ /dev/null
@@ -1,466 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright 2012, Cisco Systems
-# Copyright 2012, VMware, Inc.
-# Copyright 2012, NTT MCL, Inc.
-#
-# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com
-#
-# **neutron-adv-test.sh**
-
-# Perform integration testing of Nova and other components with Neutron.
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-
-set -o errtrace
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-# Environment
-# -----------
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import neutron functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped.
-neutron_plugin_check_adv_test_requirements || exit 55
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Neutron Settings
-# ----------------
-
-PROJECTS="DEMO1"
-# TODO (nati)_Test public network
-#PROJECTS="DEMO1,DEMO2"
-
-PUBLIC_NAME="admin"
-DEMO1_NAME="demo1"
-DEMO2_NAME="demo2"
-
-PUBLIC_NUM_NET=1
-DEMO1_NUM_NET=1
-DEMO2_NUM_NET=2
-
-PUBLIC_NET1_CIDR="200.0.0.0/24"
-DEMO1_NET1_CIDR="10.10.0.0/24"
-DEMO2_NET1_CIDR="10.20.0.0/24"
-DEMO2_NET2_CIDR="10.20.1.0/24"
-
-PUBLIC_NET1_GATEWAY="200.0.0.1"
-DEMO1_NET1_GATEWAY="10.10.0.1"
-DEMO2_NET1_GATEWAY="10.20.0.1"
-DEMO2_NET2_GATEWAY="10.20.1.1"
-
-PUBLIC_NUM_VM=1
-DEMO1_NUM_VM=1
-DEMO2_NUM_VM=2
-
-PUBLIC_VM1_NET='admin-net1'
-DEMO1_VM1_NET='demo1-net1'
-# Multinic settings. But this is fail without nic setting in OS image
-DEMO2_VM1_NET='demo2-net1'
-DEMO2_VM2_NET='demo2-net2'
-
-PUBLIC_NUM_ROUTER=1
-DEMO1_NUM_ROUTER=1
-DEMO2_NUM_ROUTER=1
-
-PUBLIC_ROUTER1_NET="admin-net1"
-DEMO1_ROUTER1_NET="demo1-net1"
-DEMO2_ROUTER1_NET="demo2-net1"
-
-# Various functions
-# -----------------
-
-function foreach_project {
-    COMMAND=$1
-    for PROJECT in ${PROJECTS//,/ };do
-        eval ${COMMAND//%PROJECT%/$PROJECT}
-    done
-}
-
-function foreach_project_resource {
-    COMMAND=$1
-    RESOURCE=$2
-    for PROJECT in ${PROJECTS//,/ };do
-        eval 'NUM=$'"${PROJECT}_NUM_$RESOURCE"
-        for i in `seq $NUM`;do
-            local COMMAND_LOCAL=${COMMAND//%PROJECT%/$PROJECT}
-            COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i}
-            eval $COMMAND_LOCAL
-        done
-    done
-}
-
-function foreach_project_vm {
-    COMMAND=$1
-    foreach_project_resource "$COMMAND" 'VM'
-}
-
-function foreach_project_net {
-    COMMAND=$1
-    foreach_project_resource "$COMMAND" 'NET'
-}
-
-function get_image_id {
-    local IMAGE_ID
-    IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-    die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
-    echo "$IMAGE_ID"
-}
-
-function get_project_id {
-    local PROJECT_NAME=$1
-    local PROJECT_ID
-    PROJECT_ID=`openstack project list | grep " $PROJECT_NAME " | head -n 1 | get_field 1`
-    die_if_not_set $LINENO PROJECT_ID "Failure retrieving PROJECT_ID for $PROJECT_NAME"
-    echo "$PROJECT_ID"
-}
-
-function get_user_id {
-    local USER_NAME=$1
-    local USER_ID
-    USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
-    die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
-    echo "$USER_ID"
-}
-
-function get_role_id {
-    local ROLE_NAME=$1
-    local ROLE_ID
-    ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
-    die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
-    echo "$ROLE_ID"
-}
-
-function get_network_id {
-    local NETWORK_NAME="$1"
-    local NETWORK_ID
-    NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
-    echo $NETWORK_ID
-}
-
-function get_flavor_id {
-    local INSTANCE_TYPE=$1
-    local FLAVOR_ID
-    FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
-    die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
-    echo "$FLAVOR_ID"
-}
-
-function confirm_server_active {
-    local VM_UUID=$1
-    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-        echo "server '$VM_UUID' did not become active!"
-        false
-    fi
-}
-
-function neutron_debug_admin {
-    local os_username=$OS_USERNAME
-    local os_project_id=$OS_PROJECT_ID
-    source $TOP_DIR/openrc admin admin
-    neutron-debug $@
-    source $TOP_DIR/openrc $os_username $os_project_id
-}
-
-function add_project {
-    openstack project create $1
-    openstack user create $2 --password ${ADMIN_PASSWORD} --project $1
-    openstack role add Member --project $1 --user $2
-}
-
-function remove_project {
-    local PROJECT=$1
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    openstack project delete $PROJECT_ID
-}
-
-function remove_user {
-    local USER=$1
-    local USER_ID
-    USER_ID=$(get_user_id $USER)
-    openstack user delete $USER_ID
-}
-
-function create_projects {
-    source $TOP_DIR/openrc admin admin
-    add_project demo1 demo1 demo1
-    add_project demo2 demo2 demo2
-    source $TOP_DIR/openrc demo demo
-}
-
-function delete_projects_and_users {
-    source $TOP_DIR/openrc admin admin
-    remove_user demo1
-    remove_project demo1
-    remove_user demo2
-    remove_project demo2
-    echo "removed all projects"
-    source $TOP_DIR/openrc demo demo
-}
-
-function create_network {
-    local PROJECT=$1
-    local GATEWAY=$2
-    local CIDR=$3
-    local NUM=$4
-    local EXTRA=$5
-    local NET_NAME="${PROJECT}-net$NUM"
-    local ROUTER_NAME="${PROJECT}-router${NUM}"
-    source $TOP_DIR/openrc admin admin
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    local NET_ID
-    NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
-    die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
-    openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
-    neutron_debug_admin probe-create --device-owner compute $NET_ID
-    source $TOP_DIR/openrc demo demo
-}
-
-function create_networks {
-    foreach_project_net 'create_network ${%PROJECT%_NAME} ${%PROJECT%_NET%NUM%_GATEWAY} ${%PROJECT%_NET%NUM%_CIDR} %NUM% ${%PROJECT%_NET%NUM%_EXTRA}'
-    #TODO(nati) test security group function
-    # allow ICMP for both project's security groups
-    #source $TOP_DIR/openrc demo1 demo1
-    #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-    #source $TOP_DIR/openrc demo2 demo2
-    #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_vm {
-    local PROJECT=$1
-    local NUM=$2
-    local NET_NAMES=$3
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    local NIC=""
-    for NET_NAME in ${NET_NAMES//,/ };do
-        NIC="$NIC --nic net-id="`get_network_id $NET_NAME`
-    done
-    #TODO (nati) Add multi-nic test
-    #TODO (nati) Add public-net test
-    local VM_UUID
-    VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
-        --image $(get_image_id) \
-        $NIC \
-        $PROJECT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
-    die_if_not_set $LINENO VM_UUID "Failure launching $PROJECT-server$NUM"
-    confirm_server_active $VM_UUID
-}
-
-function create_vms {
-    foreach_project_vm 'create_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function ping_ip {
-    # Test agent connection.  Assumes namespaces are disabled, and
-    # that DHCP is in use, but not L3
-    local VM_NAME=$1
-    local NET_NAME=$2
-    IP=$(get_instance_ip $VM_NAME $NET_NAME)
-    ping_check $IP $BOOT_TIMEOUT $NET_NAME
-}
-
-function check_vm {
-    local PROJECT=$1
-    local NUM=$2
-    local VM_NAME="$PROJECT-server$NUM"
-    local NET_NAME=$3
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    ping_ip $VM_NAME $NET_NAME
-    # TODO (nati) test ssh connection
-    # TODO (nati) test inter connection between vm
-    # TODO (nati) test dhcp host routes
-    # TODO (nati) test multi-nic
-}
-
-function check_vms {
-    foreach_project_vm 'check_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function shutdown_vm {
-    local PROJECT=$1
-    local NUM=$2
-    source $TOP_DIR/openrc $PROJECT $PROJECT
-    VM_NAME=${PROJECT}-server$NUM
-    nova delete $VM_NAME
-}
-
-function shutdown_vms {
-    foreach_project_vm 'shutdown_vm ${%PROJECT%_NAME} %NUM%'
-    if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
-        die $LINENO "Some VMs failed to shutdown"
-    fi
-}
-
-function delete_network {
-    local PROJECT=$1
-    local NUM=$2
-    local NET_NAME="${PROJECT}-net$NUM"
-    source $TOP_DIR/openrc admin admin
-    local PROJECT_ID
-    PROJECT_ID=$(get_project_id $PROJECT)
-    #TODO(nati) comment out until l3-agent merged
-    #for res in port subnet net router;do
-    for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
-        delete_probe $net_id
-        openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
-        openstack network delete $net_id
-    done
-    source $TOP_DIR/openrc demo demo
-}
-
-function delete_networks {
-    foreach_project_net 'delete_network ${%PROJECT%_NAME} %NUM%'
-    # TODO(nati) add secuirty group check after it is implemented
-    # source $TOP_DIR/openrc demo1 demo1
-    # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
-    # source $TOP_DIR/openrc demo2 demo2
-    # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_all {
-    create_projects
-    create_networks
-    create_vms
-}
-
-function delete_all {
-    shutdown_vms
-    delete_networks
-    delete_projects_and_users
-}
-
-function all {
-    create_all
-    check_vms
-    delete_all
-}
-
-# Test functions
-# --------------
-
-function test_functions {
-    IMAGE=$(get_image_id)
-    echo $IMAGE
-
-    PROJECT_ID=$(get_project_id demo)
-    echo $PROJECT_ID
-
-    FLAVOR_ID=$(get_flavor_id m1.tiny)
-    echo $FLAVOR_ID
-
-    NETWORK_ID=$(get_network_id admin)
-    echo $NETWORK_ID
-}
-
-# Usage and main
-# --------------
-
-function usage {
-    echo "$0: [-h]"
-    echo "  -h, --help              Display help message"
-    echo "  -t, --project            Create projects"
-    echo "  -n, --net               Create networks"
-    echo "  -v, --vm                Create vms"
-    echo "  -c, --check             Check connection"
-    echo "  -x, --delete-projects    Delete projects"
-    echo "  -y, --delete-nets       Delete networks"
-    echo "  -z, --delete-vms        Delete vms"
-    echo "  -T, --test              Test functions"
-}
-
-function main {
-
-    echo Description
-
-    if [ $# -eq 0 ] ; then
-        # if no args are provided, run all tests
-        all
-    else
-
-        while [ "$1" != "" ]; do
-            case $1 in
-                -h | --help )   usage
-                                exit
-                                ;;
-                -n | --net )    create_networks
-                                exit
-                                ;;
-                -v | --vm )     create_vms
-                                exit
-                                ;;
-                -t | --project ) create_projects
-                                exit
-                                ;;
-                -c | --check )   check_vms
-                                exit
-                                ;;
-                -T | --test )   test_functions
-                                exit
-                                ;;
-                -x | --delete-projects ) delete_projects_and_users
-                                exit
-                                ;;
-                -y | --delete-nets ) delete_networks
-                                exit
-                                ;;
-                -z | --delete-vms ) shutdown_vms
-                                exit
-                                ;;
-                -a | --all )    all
-                                exit
-                                ;;
-                * )             usage
-                                exit 1
-            esac
-            shift
-        done
-    fi
-}
-
-trap failed ERR
-function failed {
-    local r=$?
-    set +o errtrace
-    set +o xtrace
-    echo "Failed to execute"
-    echo "Starting cleanup..."
-    delete_all
-    echo "Finished cleanup"
-    exit $r
-}
-
-# Kick off script
-# ---------------
-
-echo $*
-main $*
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
deleted file mode 100755
index 2f78e39..0000000
--- a/exercises/sec_groups.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-# **sec_groups.sh**
-
-# Test security groups via the command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-
-# Testing Security Groups
-# =======================
-
-# List security groups
-nova secgroup-list
-
-# Create random name for new sec group and create secgroup of said name
-SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)"
-nova secgroup-create $SEC_GROUP_NAME 'a test security group'
-
-# Add some rules to the secgroup
-RULES_TO_ADD=( 22 3389 5900 )
-
-for RULE in "${RULES_TO_ADD[@]}"; do
-    nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Check to make sure rules were added
-SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
-die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
-for i in "${RULES_TO_ADD[@]}"; do
-    skip=
-    for j in "${SEC_GROUP_RULES[@]}"; do
-        [[ $i == $j ]] && { skip=1; break; }
-    done
-    [[ -n $skip ]] || exit 1
-done
-
-# Delete rules and secgroup
-for RULE in "${RULES_TO_ADD[@]}"; do
-    nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Delete secgroup
-nova secgroup-delete $SEC_GROUP_NAME || \
-    die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
deleted file mode 100755
index 8aa376b..0000000
--- a/exercises/swift.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-
-# **swift.sh**
-
-# Test swift via the ``python-openstackclient`` command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If swift is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled s-proxy || exit 55
-
-# Container name
-CONTAINER=ex-swift
-OBJECT=/etc/issue
-
-
-# Testing Swift
-# =============
-
-# Check if we have to swift via keystone
-openstack object store account show || die $LINENO "Failure getting account status"
-
-# We start by creating a test container
-openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
-
-# add a file into it.
-openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER"
-
-# list the objects
-openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
-
-# delete the object first
-openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER"
-
-# delete the container
-openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
deleted file mode 100755
index e7c3560..0000000
--- a/exercises/volumes.sh
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env bash
-
-# **volumes.sh**
-
-# Test cinder volumes with the ``cinder`` command from ``python-cinderclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled cinder || exit 55
-
-# Ironic does not currently support volume attachment.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-vol_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-vol-inst}
-VOL_NAME="ex-vol-$(openssl rand -hex 4)"
-
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
-    # Cells does not support security groups, so force the use of "default"
-    SECGROUP="default"
-    echo "Using the default security group because of Cells."
-else
-    # Create a secgroup
-    if ! nova secgroup-list | grep -q $SECGROUP; then
-        nova secgroup-create $SECGROUP "$SECGROUP description"
-        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-            echo "Security group not created"
-            exit 1
-        fi
-    fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
-    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-    die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    die $LINENO "server didn't terminate!"
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Volumes
-# -------
-
-# Verify it doesn't exist
-if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
-    die $LINENO "Volume $VOL_NAME already exists"
-fi
-
-# Create a new volume
-start_time=$(date +%s)
-cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not created"
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Attach to server
-DEVICE=/dev/vdb
-start_time=$(date +%s)
-nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
-    die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-attach in $((end_time - start_time)) seconds"
-
-VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
-if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
-    die $LINENO "Volume not attached to correct instance"
-fi
-
-# Clean up
-# --------
-
-# Detach volume
-start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-detach in $((end_time - start_time)) seconds"
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    die $LINENO "Volume $VOL_NAME not deleted"
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    die $LINENO "Server $VM_NAME not deleted"
-fi
-
-if [[ $SECGROUP = "default" ]] ; then
-    echo "Skipping deleting default security group"
-else
-    # Delete secgroup
-    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index bfd7567..efcfc03 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,5 +1,5 @@
 <VirtualHost *:80>
-    WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
+    WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py
     WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP}
     WSGIApplicationGroup %{GLOBAL}
 
diff --git a/files/apache-neutron.template b/files/apache-neutron.template
new file mode 100644
index 0000000..c7796b9
--- /dev/null
+++ b/files/apache-neutron.template
@@ -0,0 +1,36 @@
+Listen %PUBLICPORT%
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined
+
+<Directory %NEUTRON_BIN%>
+    Require all granted
+</Directory>
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup neutron-server
+    WSGIScriptAlias / %NEUTRON_BIN%/neutron-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%M"
+    ErrorLog /var/log/%APACHE_NAME%/neutron.log
+    CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+
+%SSLLISTEN%<VirtualHost *:443>
+%SSLLISTEN%    %SSLENGINE%
+%SSLLISTEN%    %SSLCERTFILE%
+%SSLLISTEN%    %SSLKEYFILE%
+%SSLLISTEN%</VirtualHost>
+
+Alias /networking %NEUTRON_BIN%/neutron-api
+<Location /networking>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup neutron-server
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/functions-common b/functions-common
index 25e28bd..af95bfb 100644
--- a/functions-common
+++ b/functions-common
@@ -228,9 +228,9 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
+    echo "$msg" 1>&2;
     if [[ -n ${LOGDIR} ]]; then
-        echo $msg >> "${LOGDIR}/error.log"
+        echo "$msg" >> "${LOGDIR}/error.log"
     fi
     $xtrace
     return $exitcode
@@ -283,7 +283,7 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg
+    echo "$msg"
     $xtrace
     return $exitcode
 }
@@ -374,8 +374,10 @@
     elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
         DISTRO="opensuse-$os_RELEASE"
         # Tumbleweed uses "n/a" as a codename, and the release is a datestring
-        # like 20180218, so not very useful.
-        [ "$os_CODENAME" = "n/a" ] && DISTRO="opensuse-tumbleweed"
+        # like 20180218, so not very useful. Leap however uses a release
+        # with a "dot", so for example 15.0
+        [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \
+            DISTRO="opensuse-tumbleweed"
     elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
         # just use major release
         DISTRO="sle${os_RELEASE%.*}"
@@ -1376,7 +1378,7 @@
     [[ "$(id -u)" = "0" ]] && sudo="env"
     $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
+        zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@"
 }
 
 function write_user_unit_file {
@@ -1439,24 +1441,24 @@
     # do some sanity checks on $cmd to see things we don't expect to work
 
     if [[ "$cmd" =~ "sudo" ]]; then
-        local msg=<<EOF
+        read -r -d '' msg << EOF || true  # read returns 1 for EOF, but it is ok here
 You are trying to use run_process with sudo, this is not going to work under systemd.
 
-If you need to run a service as a user other than $STACK_USER call it with:
+If you need to run a service as a user other than \$STACK_USER call it with:
 
    run_process \$name \$cmd \$group \$user
 EOF
-        die $LINENO $msg
+        die $LINENO "$msg"
     fi
 
     if [[ ! "$cmd" =~ ^/ ]]; then
-        local msg=<<EOF
+        read -r -d '' msg << EOF || true  # read returns 1 for EOF, but it is ok here
 The cmd="$cmd" does not start with an absolute path. It will fail to
 start under systemd.
 
 Please update your run_process stanza to have an absolute path.
 EOF
-        die $LINENO $msg
+        die $LINENO "$msg"
     fi
 
 }
@@ -2052,6 +2054,11 @@
     fi
 }
 
+# Remove "[]" around urlquoted IPv6 addresses
+function ipv6_unquote {
+    echo $1 | tr -d []
+}
+
 # Gracefully cp only if source file/dir exists
 # cp_it source destination
 function cp_it {
diff --git a/inc/python b/inc/python
index 96be107..d8b8169 100644
--- a/inc/python
+++ b/inc/python
@@ -49,7 +49,7 @@
     fi
     $xtrace
 
-    if python3_enabled && [ "$os_VENDOR" = "Fedora" -a $os_RELEASE -gt 26 ]; then
+    if python3_enabled && [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then
         # Default Python 3 install prefix changed to /usr/local in Fedora 27:
         # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe
         echo "/usr/local/bin"
diff --git a/lib/cinder b/lib/cinder
index f6cad9d..76bf928 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -67,7 +67,7 @@
 CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
 CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to auto, which will do thin provisioning if it's a fresh
@@ -96,9 +96,9 @@
 # https://bugs.launchpad.net/cinder/+bug/1180976
 CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
 
-# Centos7 switched to using LIO and that's all that's supported,
-# although the tgt bits are in EPEL we don't want that for CI
-if is_fedora; then
+# Centos7 and OpenSUSE switched to using LIO and that's all that's supported,
+# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI
+if is_fedora || is_suse; then
     CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
     if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
         die "lioadm is the only valid Cinder target_helper config on this platform"
@@ -268,7 +268,12 @@
         configure_cinder_image_volume_cache
     fi
 
-    if is_service_enabled swift; then
+    if is_service_enabled c-bak; then
+        # NOTE(mriedem): The default backup driver uses swift and if we're
+        # on a subnode we might not know if swift is enabled, but chances are
+        # good that it is on the controller so configure the backup service
+        # to use it. If we want to configure the backup service to use
+        # a non-swift driver, we'll likely need environment variables.
         iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
     fi
 
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 00a0bb3..33c9706 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -65,7 +65,7 @@
         sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
-        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
+        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
         iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
         iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
         iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0089663..ac0c083 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -16,7 +16,7 @@
 register_database mysql
 
 MYSQL_SERVICE_NAME=mysql
-if is_fedora && ! is_oraclelinux; then
+if is_suse || is_fedora && ! is_oraclelinux; then
     MYSQL_SERVICE_NAME=mariadb
 fi
 
@@ -93,7 +93,7 @@
 
     # Change bind-address from localhost (127.0.0.1) to any (::) and
     # set default db type to InnoDB
-    iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS"
+    iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
     iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
     iniset -sudo $my_conf mysqld default-storage-engine InnoDB
     iniset -sudo $my_conf mysqld max_connections 1024
diff --git a/lib/glance b/lib/glance
index 528a05f..94f6a22 100644
--- a/lib/glance
+++ b/lib/glance
@@ -65,7 +65,7 @@
 
 # Glance connection info.  Note the port must be specified.
 GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST}
-GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292}
 GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
@@ -151,7 +151,7 @@
 
     # Store specific configs
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
-    iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
+    iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
 
     # CORS feature support - to allow calls from Horizon by default
     if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
@@ -217,7 +217,7 @@
     iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
     iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
     iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
-    iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
+    iniset $GLANCE_CACHE_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
 
     # Store specific confs
     iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
diff --git a/lib/keystone b/lib/keystone
index 7978fea..02e2822 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -49,7 +49,6 @@
 
 KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
-KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
 KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini
 KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
@@ -64,9 +63,6 @@
     KEYSTONE_DEPLOY=mod_wsgi
 fi
 
-# Select the token persistence backend driver
-KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql}
-
 # Select the Identity backend driver
 KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql}
 
@@ -80,7 +76,8 @@
 KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql}
 
 # Select Keystone's token provider (and format)
-# Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
+# Refer keystone doc for supported token provider:
+# https://docs.openstack.org/keystone/latest/admin/token-provider.html
 KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
@@ -203,23 +200,7 @@
 
     if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
         install -m 600 /dev/null $KEYSTONE_CONF
-        if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then
-            cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI"
-        fi
     fi
-    if [[ -f "$KEYSTONE_PASTE_INI" ]]; then
-        iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI"
-    else
-        # compatibility with mixed cfg and paste.deploy configuration
-        KEYSTONE_PASTE_INI="$KEYSTONE_CONF"
-    fi
-
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        # Only Identity v3 API should be available; then disable v2 pipelines
-        inidelete $KEYSTONE_PASTE_INI composite:main \\/v2.0
-        inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0
-    fi
-
     # Populate ``keystone.conf``
     if is_service_enabled ldap; then
         iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains"
@@ -236,7 +217,7 @@
     iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
     iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
 
-    iniset_rpc_backend keystone $KEYSTONE_CONF
+    iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
 
     local service_port=$KEYSTONE_SERVICE_PORT
     local auth_port=$KEYSTONE_AUTH_PORT
@@ -262,8 +243,6 @@
 
     iniset $KEYSTONE_CONF database connection `database_connection_url keystone`
 
-    iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND"
-
     # Set up logging
     if [ "$SYSLOG" != "False" ]; then
         iniset $KEYSTONE_CONF DEFAULT use_syslog "True"
@@ -460,11 +439,6 @@
     $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync
     time_stop "dbsync"
 
-    if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
-        # Set up certificates
-        rm -rf $KEYSTONE_CONF_DIR/ssl
-        $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF pki_setup
-    fi
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then
         rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
         $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup
diff --git a/lib/neutron b/lib/neutron
index 9f9b132..4847e87 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -28,6 +28,12 @@
 # Set up default directories
 GITDIR["python-neutronclient"]=$DEST/python-neutronclient
 
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False}
 NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
 NEUTRON_DIR=$DEST/neutron
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -58,6 +64,8 @@
 NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+
 # By default, use the ML2 plugin
 NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
 NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
@@ -286,7 +294,7 @@
     # Format logging
     setup_logging $NEUTRON_CONF
 
-    if is_service_enabled tls-proxy; then
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
         iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -357,6 +365,15 @@
 
 # create_neutron_accounts() - Create required service accounts
 function create_neutron_accounts_new {
+    local neutron_url
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+    else
+        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+    fi
+
+
     if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
 
         create_service_user "neutron"
@@ -364,8 +381,7 @@
         neutron_service=$(get_or_create_service "neutron" \
             "network" "Neutron Service")
         get_or_create_endpoint $neutron_service \
-            "$REGION_NAME" \
-            "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/"
+            "$REGION_NAME" "$neutron_url"
     fi
 }
 
@@ -427,6 +443,7 @@
 function start_neutron_api {
     local service_port=$NEUTRON_SERVICE_PORT
     local service_protocol=$NEUTRON_SERVICE_PROTOCOL
+    local neutron_url
     if is_service_enabled tls-proxy; then
         service_port=$NEUTRON_SERVICE_PORT_INT
         service_protocol="http"
@@ -440,17 +457,24 @@
         opts+=" --config-file $cfg_file"
     done
 
-    # Start the Neutron service
-    # TODO(sc68cal) Stop hard coding this
-    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
-
-    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then
-        die $LINENO "neutron-api did not start"
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
+        enable_service neutron-rpc-server
+        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
+    else
+        # Start the Neutron service
+        # TODO(sc68cal) Stop hard coding this
+        run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
+        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+        fi
     fi
 
-    # Start proxy if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+    if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
+        die $LINENO "neutron-api did not start"
     fi
 }
 
@@ -497,6 +521,10 @@
         stop_process $serv
     done
 
+    if is_service_enabled neutron-rpc-server; then
+        stop_process neutron-rpc-server
+    fi
+
     if is_service_enabled neutron-dhcp; then
         stop_process neutron-dhcp
         pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
@@ -551,6 +579,13 @@
 # neutron-legacy is removed.
 # TODO(sc68cal) Remove when neutron-legacy is no more.
 function cleanup_neutron {
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-api
+        stop_process neutron-rpc-server
+        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+        sudo rm -f $(apache_site_config_for neutron-api)
+    fi
+
     if is_neutron_legacy_enabled; then
         # Call back to old function
         cleanup_mutnauq "$@"
@@ -566,6 +601,10 @@
     else
         configure_neutron_new "$@"
     fi
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
+    fi
 }
 
 function configure_neutron_nova {
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index bee032a..be5b73f 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -86,6 +86,15 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False}
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
 AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -111,7 +120,7 @@
 # Default protocol
 Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
 # Default listen address
-Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
@@ -121,7 +130,7 @@
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
 Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
 # Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
+Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
@@ -402,6 +411,13 @@
 
 # Migrated from keystone_data.sh
 function create_mutnauq_accounts {
+    local neutron_url
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
+    else
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
+    fi
+
     if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
 
         create_service_user "neutron"
@@ -409,8 +425,7 @@
         get_or_create_service "neutron" "network" "Neutron Service"
         get_or_create_endpoint \
             "network" \
-            "$REGION_NAME" \
-            "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
+            "$REGION_NAME" "$neutron_url"
     fi
 }
 
@@ -460,6 +475,7 @@
     local service_port=$Q_PORT
     local service_protocol=$Q_PROTOCOL
     local cfg_file_options
+    local neutron_url
 
     cfg_file_options="$(determine_config_files neutron-server)"
 
@@ -468,16 +484,24 @@
         service_protocol="http"
     fi
     # Start the Neutron service
-    run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        enable_service neutron-api
+        run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
+        enable_service neutron-rpc-server
+        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+    else
+        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+        neutron_url=$service_protocol://$Q_HOST:$service_port
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+        fi
+    fi
     echo "Waiting for Neutron to start..."
 
-    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port"
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
     test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-
-    # Start proxy if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
-    fi
 }
 
 # Control of the l2 agent is separated out to make it easier to test partial
@@ -532,7 +556,12 @@
         [ ! -z "$pid" ] && sudo kill -9 $pid
     fi
 
-    stop_process q-svc
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-rpc-server
+        stop_process neutron-api
+    else
+        stop_process q-svc
+    fi
 
     if is_service_enabled q-l3; then
         sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
@@ -604,7 +633,7 @@
             IP_UP="sudo ip link set $to_intf up"
             if [[ "$af" == "inet" ]]; then
                 IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
-                ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP "
+                ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP "
             fi
         fi
 
@@ -715,7 +744,7 @@
     # Format logging
     setup_logging $NEUTRON_CONF
 
-    if is_service_enabled tls-proxy; then
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
         # Set the service port for a proxy to take the original
         iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
         iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 52c6ad5..d3f5bd5 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Neuton Big Switch/FloodLight plugin
+# Neutron Big Switch/FloodLight plugin
 # ------------------------------------
 
 # Save trace setting
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 9be32b7..ec289f6 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -103,7 +103,7 @@
 default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
 die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
 
-default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}')
+default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
 
 function _determine_config_l3 {
     local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
@@ -395,6 +395,10 @@
 
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+        # Ensure IPv6 forwarding is enabled on the host
+        sudo sysctl -w net.ipv6.conf.all.forwarding=1
+        # if the Linux host considers itself to be a router then it will
+        # ignore all router advertisements
         # Ensure IPv6 RAs are accepted on interfaces with a default route.
         # This is needed for neutron-based devstack clouds to work in
         # IPv6-only clouds in the gate. Please do not remove this without
@@ -405,8 +409,6 @@
             # device name would be reinterpreted as a slash, causing an error.
             sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
         done
-        # Ensure IPv6 forwarding is enabled on the host
-        sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
         IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
diff --git a/lib/nova b/lib/nova
index 20f2995..8b49116 100644
--- a/lib/nova
+++ b/lib/nova
@@ -92,7 +92,7 @@
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
-NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
 
 # Option to enable/disable config drive
@@ -518,7 +518,7 @@
     iniset $NOVA_CONF upgrade_levels compute "auto"
 
     write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
-    write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}"
+    write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
 
     if is_service_enabled ceilometer; then
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -922,6 +922,9 @@
         # RPC, we also disable track_instance_changes.
         iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False
         iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
+        # Make sure we nuke any database config
+        inidelete $NOVA_CPU_CONF database connection
+        inidelete $NOVA_CPU_CONF api_database connection
     fi
 
     # Console proxies were configured earlier in create_nova_conf. Now that the
diff --git a/lib/swift b/lib/swift
index 762f1dd..3b3e608 100644
--- a/lib/swift
+++ b/lib/swift
@@ -55,7 +55,7 @@
 SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
 SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
 SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
-SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
+SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 
 # TODO: add logging to different location.
 
diff --git a/lib/tempest b/lib/tempest
index 60f571c..fba8826 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -279,8 +279,8 @@
         iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
     fi
     iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3}
-    if [[ "$TEMPEST_AUTH_VERSION" != "v2.0" ]]; then
-        # we're going to disable v2 admin unless we're using v2.0 by default.
+    if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then
+        # we're going to disable v2 admin unless we're using v2 by default.
         iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False
     fi
 
@@ -456,9 +456,6 @@
         TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
     fi
     iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
-    # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
-    iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
-    iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1)
     local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
     local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
     # Reset microversions to None where v2 is running which does not support microversion.
@@ -577,11 +574,11 @@
     fi
 
     # The requirements might be on a different branch, while tempest needs master requirements.
-    (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt
+    (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
     tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt
 
     # Auth:
-    iniset $TEMPEST_CONFIG auth tempest_roles "Member"
+    iniset $TEMPEST_CONFIG auth tempest_roles "member"
     if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
         if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
             tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
diff --git a/lib/tls b/lib/tls
index a72b708..217f40e 100644
--- a/lib/tls
+++ b/lib/tls
@@ -37,7 +37,7 @@
 
 if is_service_enabled tls-proxy; then
     # TODO(dtroyer): revisit this below after the search for HOST_IP has been done
-    TLS_IP=${TLS_IP:-$SERVICE_IP}
+    TLS_IP=${TLS_IP:-$(ipv6_unquote $SERVICE_HOST)}
 fi
 
 DEVSTACK_HOSTNAME=$(hostname -f)
@@ -67,9 +67,9 @@
     # build common config file
 
     # Verify ``TLS_IP`` is good
-    if [[ -n "$HOST_IP" && "$HOST_IP" != "$TLS_IP" ]]; then
+    if [[ -n "$SERVICE_HOST" && "$(ipv6_unquote $SERVICE_HOST)" != "$TLS_IP" ]]; then
         # auto-discover has changed the IP
-        TLS_IP=$HOST_IP
+        TLS_IP=$(ipv6_unquote $SERVICE_HOST)
     fi
 }
 
@@ -227,8 +227,13 @@
 function init_cert {
     if [[ ! -r $DEVSTACK_CERT ]]; then
         if [[ -n "$TLS_IP" ]]; then
-            # Lie to let incomplete match routines work
-            TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
+            if python3_enabled; then
+                TLS_IP="IP:$TLS_IP"
+            else
+                # Lie to let incomplete match routines work with python2
+                # see https://bugs.python.org/issue23239
+                TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
+            fi
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -246,11 +251,6 @@
     local alt_names=$4
 
     if [ "$common_name" != "$SERVICE_HOST" ]; then
-        if [[ -z "$alt_names" ]]; then
-            alt_names="DNS:$SERVICE_HOST"
-        else
-            alt_names="$alt_names,DNS:$SERVICE_HOST"
-        fi
         if is_ipv4_address "$SERVICE_HOST" ; then
             alt_names="$alt_names,IP:$SERVICE_HOST"
         fi
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index afbf11d..bd44153 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -49,7 +49,7 @@
 
 function _check_elasticsearch_ready {
     # poll elasticsearch to see if it's started
-    if ! wait_for_service 30 http://localhost:9200; then
+    if ! wait_for_service 120 http://localhost:9200; then
         die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
     fi
 }
diff --git a/stack.sh b/stack.sh
index 56e00bf..be3c4be 100755
--- a/stack.sh
+++ b/stack.sh
@@ -221,7 +221,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -1137,6 +1137,7 @@
     echo_summary "Configuring Neutron"
 
     configure_neutron
+
     # Run init_neutron only on the node hosting the Neutron API server
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then
         init_neutron
diff --git a/stackrc b/stackrc
index 4861819..34bd677 100644
--- a/stackrc
+++ b/stackrc
@@ -258,7 +258,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="rocky"
+DEVSTACK_SERIES="stein"
 
 ##############
 #
@@ -887,10 +887,10 @@
 
     DEF_SERVICE_HOST=[$HOST_IPV6]
     DEF_SERVICE_LOCAL_HOST=::1
-    DEF_SERVICE_LISTEN_ADDRESS=::
+    DEF_SERVICE_LISTEN_ADDRESS="[::]"
 fi
 
-# This is either 0.0.0.0 for IPv4 or :: for IPv6
+# This is either 0.0.0.0 for IPv4 or [::] for IPv6
 SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}}
 
 # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 9147932..a939e30 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -205,6 +205,19 @@
     fi
 }
 
+function fixup_suse {
+    if ! is_suse; then
+        return
+    fi
+
+    # Disable apparmor profiles in openSUSE distros
+    # to avoid issues with haproxy and dnsmasq
+    if [ -x /usr/sbin/aa-enabled ] && sudo /usr/sbin/aa-enabled -q; then
+        sudo systemctl disable apparmor
+        sudo /usr/sbin/aa-teardown
+    fi
+}
+
 # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
 # connection issues under proxy so re-install the latest version using
 # pip. To avoid having pip's virtualenv overwritten by the distro's
@@ -239,5 +252,6 @@
     fixup_uca
     fixup_python_packages
     fixup_fedora
+    fixup_suse
     fixup_virtualenv
 }
diff --git a/tox.ini b/tox.ini
index 74436b0..f643fdb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,7 @@
 install_command = pip install {opts} {packages}
 
 [testenv:bashate]
+basepython = python3
 # if you want to test out some changes you have made to bashate
 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
 # modified bashate tree
@@ -34,6 +35,7 @@
          -print0 | xargs -0 bashate -v -iE006 -eE005,E042"
 
 [testenv:docs]
+basepython = python3
 deps = -r{toxinidir}/doc/requirements.txt
 whitelist_externals = bash
 setenv =
@@ -42,5 +44,6 @@
   python setup.py build_sphinx
 
 [testenv:venv]
+basepython = python3
 deps = -r{toxinidir}/doc/requirements.txt
 commands = {posargs}