Merge "Separate start/stop control of Neutron L2 agent."
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index eeb1f21..d4968a6 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -63,11 +63,6 @@
 * YAMAMOTO Takashi <yamamoto@valinux.co.jp>
 * Fumihiko Kakuma <kakuma@valinux.co.jp>
 
-Sahara
-~~~~~~
-
-* Sergey Lukjanov <slukjanov@mirantis.com>
-
 Swift
 ~~~~~
 
diff --git a/README.md b/README.md
index 455e1c6..acc3e5a 100644
--- a/README.md
+++ b/README.md
@@ -117,19 +117,13 @@
 
 # RPC Backend
 
-Multiple RPC backends are available. Currently, this
-includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of
-choice may be selected via the `localrc` section.
+Support for a RabbitMQ RPC backend is included. Additional RPC backends may
+be available via external plugins.  Enabling or disabling RabbitMQ is handled
+via the usual service functions and ``ENABLED_SERVICES``.
 
-Note that selecting more than one RPC backend will result in a failure.
+Example disabling RabbitMQ in ``local.conf``:
 
-Example (ZeroMQ):
-
-    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq"
-
-Example (Qpid):
-
-    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid"
+    disable_service rabbit
 
 # Apache Frontend
 
@@ -193,7 +187,7 @@
 
 Basic Setup
 
-In order to enable Neutron a single node setup, you'll need the
+In order to enable Neutron in a single node setup, you'll need the
 following settings in your `local.conf`:
 
     disable_service n-net
@@ -203,47 +197,38 @@
     enable_service q-l3
     enable_service q-meta
     enable_service q-metering
-    # Optional, to enable tempest configuration as part of DevStack
-    enable_service tempest
 
 Then run `stack.sh` as normal.
 
 DevStack supports setting specific Neutron configuration flags to the
-service, Open vSwitch plugin and LinuxBridge plugin configuration files.
-To make use of this feature, the settings can be added to ``local.conf``.
-The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed
-in the near future.  The ``local.conf`` headers for the replacements are:
-
-* ``Q_SRV_EXTRA_OPTS``:
+service, ML2 plugin, DHCP and L3 configuration files:
 
     [[post-config|/$Q_PLUGIN_CONF_FILE]]
-    [linuxbridge]   # or [ovs]
-
-Example extra config in `local.conf`:
-
-    [[post-config|/$Q_PLUGIN_CONF_FILE]]
-    [agent]
-    tunnel_type=vxlan
-    vxlan_udp_port=8472
+    [ml2]
+    mechanism_drivers=openvswitch,l2population
 
     [[post-config|$NEUTRON_CONF]]
     [DEFAULT]
-    tenant_network_type=vxlan
+    quota_port=42
 
-DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin
-can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This
-is a simple way to configure the ml2 plugin:
+    [[post-config|$Q_L3_CONF_FILE]]
+    [DEFAULT]
+    agent_mode=legacy
+
+    [[post-config|$Q_DHCP_CONF_FILE]]
+    [DEFAULT]
+    dnsmasq_dns_servers = 8.8.8.8,8.8.4.4
+
+The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute
+hosts. This is a simple way to configure the ml2 plugin:
 
     # VLAN configuration
-    Q_PLUGIN=ml2
     ENABLE_TENANT_VLANS=True
 
     # GRE tunnel configuration
-    Q_PLUGIN=ml2
     ENABLE_TENANT_TUNNELS=True
 
     # VXLAN tunnel configuration
-    Q_PLUGIN=ml2
     Q_ML2_TENANT_NETWORK_TYPE=vxlan
 
 The above will default in DevStack to using the OVS on each compute host.
@@ -376,6 +361,22 @@
 one being security groups.  The exercises have been patched to disable
 functionality not supported by cells.
 
+# IPv6
+
+By default, most Openstack services are bound to 0.0.0.0
+and service endpoints are registered as IPv4 addresses.
+A new variable was created to control this behavior, and to
+allow for operation over IPv6 instead of IPv4.
+
+For this, add the following to `local.conf`:
+
+    SERVICE_IP_VERSION=6
+
+When set to "6" devstack services will open listen sockets on ::
+and service endpoints will be registered using HOST_IPV6 as the
+address.  The default value for this setting is `4`.  Dual-mode
+support, for example `4+6` is not currently supported.
+
 
 # Local Configuration
 
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 3e9aa45..6e3ec02 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -26,7 +26,7 @@
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'oslosphinx' ]
+extensions = [ 'oslosphinx', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ]
 
 todo_include_todos = True
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 8e2e7ff..6052576 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -137,6 +137,11 @@
 available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IP`` is not set
 by default.
 
+``HOST_IPV6`` is normally detected on the first run of ``stack.sh`` but
+will not be set if there is no IPv6 address on the default Ethernet interface.
+Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``.
+``HOST_IPV6`` is not set by default.
+
 Common Configuration Variables
 ==============================
 
@@ -201,7 +206,7 @@
 
     | *Defaults: ``LOGFILE="" LOGDAYS=7 LOG_COLOR=True``*
     |  By default ``stack.sh`` output is only written to the console
-       where is runs. It can be sent to a file in addition to the console
+       where it runs. It can be sent to a file in addition to the console
        by setting ``LOGFILE`` to the fully-qualified name of the
        destination log file. A timestamp will be appended to the given
        filename for each run of ``stack.sh``.
@@ -391,6 +396,8 @@
         ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api
 
 IP Version
+----------
+
     | Default: ``IP_VERSION=4+6``
     | This setting can be used to configure DevStack to create either an IPv4,
       IPv6, or dual stack tenant data network by setting ``IP_VERSION`` to
@@ -418,6 +425,25 @@
     | *Note: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
       can be configured with any valid IPv6 prefix. The default values make
       use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.*
+    |
+
+    | Default: ``SERVICE_IP_VERSION=4``
+    | This setting can be used to configure DevStack to enable services to
+      operate over either IPv4 or IPv6, by setting ``SERVICE_IP_VERSION`` to
+      either ``SERVICE_IP_VERSION=4`` or ``SERVICE_IP_VERSION=6`` respectively.
+      When set to ``4`` devstack services will open listen sockets on 0.0.0.0
+      and service endpoints will be registered using ``HOST_IP`` as the address.
+      When set to ``6`` devstack services will open listen sockets on :: and
+      service endpoints will be registered using ``HOST_IPV6`` as the address.
+      The default value for this setting is ``4``.  Dual-mode support, for
+      example ``4+6`` is not currently supported.
+    | The following optional variable can be used to alter the default IPv6
+      address used:
+    |
+
+    ::
+
+        HOST_IPV6=${some_local_ipv6_address}
 
 Examples
 ========
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index d3b491f..0db8932 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -2,151 +2,157 @@
 FAQ
 ===
 
--  `General Questions <#general>`__
--  `Operation and Configuration <#ops_conf>`__
--  `Miscellaneous <#misc>`__
+.. contents::
+   :local:
 
 General Questions
 =================
 
-Q: Can I use DevStack for production?
-    A: No. We mean it. Really. DevStack makes some implementation
-    choices that are not appropriate for production deployments. We
-    warned you!
-Q: Then why selinux in enforcing mode?
-    A: That is the default on current Fedora and RHEL releases. DevStack
-    has (rightly so) a bad reputation for its security practices; it has
-    always been meant as a development tool first and system integration
-    later. This is changing as the security issues around OpenStack's
-    use of root (for example) have been tightened and developers need to
-    be better equipped to work in these environments. ``stack.sh``'s use
-    of root is primarily to support the activities that would be handled
-    by packaging in "real" deployments. To remove additional protections
-    that will be desired/required in production would be a step
-    backward.
-Q: But selinux is disabled in RHEL!
-    A: Today it is, yes. That is a specific exception that certain
-    DevStack contributors fought strongly against. The primary reason it
-    was allowed was to support using RHEL6 as the Python 2.6 test
-    platform and that took priority time-wise. This will not be the case
-    with RHEL 7.
-Q: Why a shell script, why not chef/puppet/...
-    A: The script is meant to be read by humans (as well as ran by
-    computers); it is the primary documentation after all. Using a
-    recipe system requires everyone to agree and understand chef or
-    puppet.
-Q: Why not use Crowbar?
-    A: DevStack is optimized for documentation & developers. As some of
-    us use `Crowbar <https://github.com/dellcloudedge/crowbar>`__ for
-    production deployments, we hope developers documenting how they
-    setup systems for new features supports projects like Crowbar.
-Q: I'd like to help!
-    A: That isn't a question, but please do! The source for DevStack is
-    at
-    `git.openstack.org <https://git.openstack.org/cgit/openstack-dev/devstack>`__
-    and bug reports go to
-    `LaunchPad <http://bugs.launchpad.net/devstack/>`__. Contributions
-    follow the usual process as described in the `developer
-    guide <http://docs.openstack.org/infra/manual/developers.html>`__. This Sphinx
-    documentation is housed in the doc directory.
-Q: Why not use packages?
-    A: Unlike packages, DevStack leaves your cloud ready to develop -
-    checkouts of the code and services running in screen. However, many
-    people are doing the hard work of packaging and recipes for
-    production deployments. We hope this script serves as a way to
-    communicate configuration changes between developers and packagers.
-Q: Why isn't $MY\_FAVORITE\_DISTRO supported?
-    A: DevStack is meant for developers and those who want to see how
-    OpenStack really works. DevStack is known to run on the
-    distro/release combinations listed in ``README.md``. DevStack is
-    only supported on releases other than those documented in
-    ``README.md`` on a best-effort basis.
-Q: What about Fedora/RHEL/CentOS?
-    A: Fedora and CentOS/RHEL are supported via rpm dependency files and
-    specific checks in ``stack.sh``. Support will follow the pattern set
-    with the Ubuntu testing, i.e. only a single release of the distro
-    will receive regular testing, others will be handled on a
-    best-effort basis.
-Q: Are there any differences between Ubuntu and Fedora support?
-    A: Neutron is not fully supported prior to Fedora 18 due lack of
-    OpenVSwitch packages.
-Q: Why can't I use another shell?
-    A: DevStack now uses some specific bash-ism that require Bash 4, such
-    as associative arrays. Simple compatibility patches have been accepted
-    in the past when they are not complex, at this point no additional
-    compatibility patches will be considered except for shells matching
-    the array functionality as it is very ingrained in the repo and project
-    management.
-Q: But, but, can't I test on OS/X?
-   A: Yes, even you, core developer who complained about this, needs to
-   install bash 4 via homebrew to keep running tests on OS/X.  Get a Real
-   Operating System.   (For most of you who don't know, I am referring to
-   myself.)
+Can I use DevStack for production?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DevStack is targeted at developers and CI systems to use the raw
+upstream code.  It makes many choices that are not appropriate for
+production systems.
+
+Your best choice is probably to choose a `distribution of OpenStack
+<https://www.openstack.org/marketplace/distros/>`__.
+
+Why a shell script, why not chef/puppet/...
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The script is meant to be read by humans (as well as ran by
+computers); it is the primary documentation after all. Using a recipe
+system requires everyone to agree and understand chef or puppet.
+
+I'd like to help!
+~~~~~~~~~~~~~~~~~
+
+That isn't a question, but please do! The source for DevStack is at
+`git.openstack.org
+<https://git.openstack.org/cgit/openstack-dev/devstack>`__ and bug
+reports go to `LaunchPad
+<http://bugs.launchpad.net/devstack/>`__. Contributions follow the
+usual process as described in the `developer guide
+<http://docs.openstack.org/infra/manual/developers.html>`__. This
+Sphinx documentation is housed in the doc directory.
+
+Why not use packages?
+~~~~~~~~~~~~~~~~~~~~~
+
+Unlike packages, DevStack leaves your cloud ready to develop -
+checkouts of the code and services running in screen. However, many
+people are doing the hard work of packaging and recipes for production
+deployments.
+
+Why isn't $MY\_FAVORITE\_DISTRO supported?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DevStack is meant for developers and those who want to see how
+OpenStack really works. DevStack is known to run on the distro/release
+combinations listed in ``README.md``. DevStack is only supported on
+releases other than those documented in ``README.md`` on a best-effort
+basis.
+
+Are there any differences between Ubuntu and Centos/Fedora support?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both should work well and are tested by DevStack CI.
+
+Why can't I use another shell?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DevStack now uses some specific bash-ism that require Bash 4, such as
+associative arrays. Simple compatibility patches have been accepted in
+the past when they are not complex, at this point no additional
+compatibility patches will be considered except for shells matching
+the array functionality as it is very ingrained in the repo and
+project management.
+
+Can I test on OS/X?
+~~~~~~~~~~~~~~~~~~~
+
+Some people have success with bash 4 installed via homebrew to keep
+running tests on OS/X.
+
+Can I at least source ``openrc`` with ``zsh``?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+People have reported success with a special function to run ``openrc``
+through bash for this
+
+.. code-block:: bash
+
+   function sourceopenrc {
+       pushd ~/devstack >/dev/null
+       eval $(bash -c ". openrc $1 $2;env|sed -n '/OS_/ { s/^/export /;p}'")
+       popd >/dev/null
+   }
+
 
 Operation and Configuration
 ===========================
 
-Q: Can DevStack handle a multi-node installation?
-    A: Indirectly, yes. You run DevStack on each node with the
-    appropriate configuration in ``local.conf``. The primary
-    considerations are turning off the services not required on the
-    secondary nodes, making sure the passwords match and setting the
-    various API URLs to the right place.
-Q: How can I document the environment that DevStack is using?
-    A: DevStack includes a script (``tools/info.sh``) that gathers the
-    versions of the relevant installed apt packages, pip packages and
-    git repos. This is a good way to verify what Python modules are
-    installed.
-Q: How do I turn off a service that is enabled by default?
-    A: Services can be turned off by adding ``disable_service xxx`` to
-    ``local.conf`` (using ``n-vol`` in this example):
+Can DevStack handle a multi-node installation?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Yes, see :doc:`multinode lab guide <guides/multinode-lab>`
+
+How can I document the environment that DevStack is using?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DevStack includes a script (``tools/info.sh``) that gathers the
+versions of the relevant installed apt packages, pip packages and git
+repos. This is a good way to verify what Python modules are
+installed.
+
+How do I turn off a service that is enabled by default?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Services can be turned off by adding ``disable_service xxx`` to
+``local.conf`` (using ``n-vol`` in this example):
 
     ::
 
         disable_service n-vol
 
-Q: Is enabling a service that defaults to off done with the reverse of the above?
-    A: Of course!
+Is enabling a service that defaults to off done with the reverse of the above?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Of course!
 
     ::
 
-        enable_service qpid
+        enable_service q-svc
 
-Q: How do I run a specific OpenStack milestone?
-    A: OpenStack milestones have tags set in the git repo. Set the appropriate tag in the ``*_BRANCH`` variables in ``local.conf``.  Swift is on its own release schedule so pick a tag in the Swift repo that is just before the milestone release. For example:
+How do I run a specific OpenStack milestone?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+OpenStack milestones have tags set in the git repo. Set the
+appropriate tag in the ``*_BRANCH`` variables in ``local.conf``.
+Swift is on its own release schedule so pick a tag in the Swift repo
+that is just before the milestone release. For example:
 
     ::
 
         [[local|localrc]]
-        GLANCE_BRANCH=stable/juno
-        HORIZON_BRANCH=stable/juno
-        KEYSTONE_BRANCH=stable/juno
-        NOVA_BRANCH=stable/juno
-        GLANCE_BRANCH=stable/juno
-        NEUTRON_BRANCH=stable/juno
-        SWIFT_BRANCH=2.2.1
+        GLANCE_BRANCH=stable/kilo
+        HORIZON_BRANCH=stable/kilo
+        KEYSTONE_BRANCH=stable/kilo
+        NOVA_BRANCH=stable/kilo
+        GLANCE_BRANCH=stable/kilo
+        NEUTRON_BRANCH=stable/kilo
+        SWIFT_BRANCH=2.3.0
 
-Q: Why not use [STRIKEOUT:``tools/pip-requires``]\ ``requirements.txt`` to grab project dependencies?
-    [STRIKEOUT:The majority of deployments will use packages to install
-    OpenStack that will have distro-based packages as dependencies.
-    DevStack installs as many of these Python packages as possible to
-    mimic the expected production environment.] Certain Linux
-    distributions have a 'lack of workaround' in their Python
-    configurations that installs vendor packaged Python modules and
-    pip-installed modules to the SAME DIRECTORY TREE. This is causing
-    heartache and moving us in the direction of installing more modules
-    from PyPI than vendor packages. However, that is only being done as
-    necessary as the packaging needs to catch up to the development
-    cycle anyway so this is kept to a minimum.
-Q: What can I do about RabbitMQ not wanting to start on my fresh new VM?
-    A: This is often caused by ``erlang`` not being happy with the
-    hostname resolving to a reachable IP address. Make sure your
-    hostname resolves to a working IP address; setting it to 127.0.0.1
-    in ``/etc/hosts`` is often good enough for a single-node
-    installation. And in an extreme case, use ``clean.sh`` to eradicate
-    it and try again.
-Q: How can I set up Heat in stand-alone configuration?
-    A: Configure ``local.conf`` thusly:
+What can I do about RabbitMQ not wanting to start on my fresh new VM?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is often caused by ``erlang`` not being happy with the hostname
+resolving to a reachable IP address. Make sure your hostname resolves
+to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is
+often good enough for a single-node installation. And in an extreme
+case, use ``clean.sh`` to eradicate it and try again.
+
+Configure ``local.conf`` thusly:
 
     ::
 
@@ -156,22 +162,25 @@
         KEYSTONE_SERVICE_HOST=<keystone-host>
         KEYSTONE_AUTH_HOST=<keystone-host>
 
-Q: Why are my configuration changes ignored?
-    A: You may have run into the package prerequisite installation
-    timeout. ``tools/install_prereqs.sh`` has a timer that skips the
-    package installation checks if it was run within the last
-    ``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set
-    ``FORCE_PREREQ=1`` and the package checks will never be skipped.
+Why are my configuration changes ignored?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You may have run into the package prerequisite installation
+timeout. ``tools/install_prereqs.sh`` has a timer that skips the
+package installation checks if it was run within the last
+``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set
+``FORCE_PREREQ=1`` and the package checks will never be skipped.
 
 Miscellaneous
 =============
 
-Q: ``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages.
-    A: [Another not-a-question] No it isn't. Stuff in there is to
-    correct problems in an environment that need to be fixed elsewhere
-    or may/will be fixed in a future release. In the case of
-    ``httplib2`` and ``prettytable`` specific problems with specific
-    versions are being worked around. If later releases have those
-    problems than we'll add them to the script. Knowing about the broken
-    future releases is valuable rather than polling to see if it has
-    been fixed.
+``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Stuff in there is to correct problems in an environment that need to
+be fixed elsewhere or may/will be fixed in a future release. In the
+case of ``httplib2`` and ``prettytable`` specific problems with
+specific versions are being worked around. If later releases have
+those problems than we'll add them to the script. Knowing about the
+broken future releases is valuable rather than polling to see if it
+has been fixed.
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index b35492e..c652bac 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -88,7 +88,7 @@
     parm:           nested:int
 
 To make the above value persistent across reboots, add an entry in
-/etc/modprobe.ddist.conf so it looks as below::
+/etc/modprobe.d/dist.conf so it looks as below::
 
     cat /etc/modprobe.d/dist.conf
     options kvm-amd nested=y
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index b2617c9..27d71f1 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -178,7 +178,7 @@
     MYSQL_HOST=192.168.42.11
     RABBIT_HOST=192.168.42.11
     GLANCE_HOSTPORT=192.168.42.11:9292
-    ENABLED_SERVICES=n-cpu,n-net,n-api,c-sch,c-api,c-vol
+    ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol
     NOVA_VNC_ENABLED=True
     NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html"
     VNCSERVER_LISTEN=$HOST_IP
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index b0a8907..40a5632 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -5,11 +5,77 @@
 This guide will walk you through using OpenStack neutron with the ML2
 plugin and the Open vSwitch mechanism driver.
 
-Network Interface Configuration
-===============================
 
-To use neutron, it is suggested that two network interfaces be present
-in the host operating system.
+Using Neutron with a Single Interface
+=====================================
+
+In some instances, like on a developer laptop, there is only one
+network interface that is available. In this scenario, the physical
+interface is added to the Open vSwitch bridge, and the IP address of
+the laptop is migrated onto the bridge interface. That way, the
+physical interface can be used to transmit tenant network traffic,
+the OpenStack API traffic, and management traffic.
+
+
+Physical Network Setup
+----------------------
+
+In most cases where DevStack is being deployed with a single
+interface, there is a hardware router that is being used for external
+connectivity and DHCP. The developer machine is connected to this
+network and is on a shared subnet with other machines.
+
+.. nwdiag::
+
+        nwdiag {
+                inet [ shape = cloud ];
+                router;
+                inet -- router;
+
+                network hardware_network {
+                        address = "172.18.161.0/24"
+                        router [ address = "172.18.161.1" ];
+                        devstack_laptop [ address = "172.18.161.6" ];
+                }
+        }
+
+
+DevStack Configuration
+----------------------
+
+
+::
+
+        HOST_IP=172.18.161.6
+        SERVICE_HOST=172.18.161.6
+        MYSQL_HOST=172.18.161.6
+        RABBIT_HOST=172.18.161.6
+        GLANCE_HOSTPORT=172.18.161.6:9292
+        ADMIN_PASSWORD=secrete
+        MYSQL_PASSWORD=secrete
+        RABBIT_PASSWORD=secrete
+        SERVICE_PASSWORD=secrete
+        SERVICE_TOKEN=secrete
+
+        ## Neutron options
+        Q_USE_SECGROUP=True
+        FLOATING_RANGE="172.18.161.1/24"
+        FIXED_RANGE="10.0.0.0/24"
+        Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
+        PUBLIC_NETWORK_GATEWAY="172.18.161.1"
+        Q_L3_ENABLED=True
+        PUBLIC_INTERFACE=eth0
+        Q_USE_PROVIDERNET_FOR_PUBLIC=True
+        OVS_PHYSICAL_BRIDGE=br-ex
+        PUBLIC_BRIDGE=br-ex
+        OVS_BRIDGE_MAPPINGS=public:br-ex
+
+
+
+
+
+Using Neutron with Multiple Interfaces
+======================================
 
 The first interface, eth0 is used for the OpenStack management (API,
 message bus, etc) as well as for ssh for an administrator to access
@@ -195,15 +261,18 @@
 
         ## Neutron Networking options used to create Neutron Subnets
 
-        FIXED_RANGE="10.1.1.0/24"
+        FIXED_RANGE="203.0.113.0/24"
         PROVIDER_SUBNET_NAME="provider_net"
         PROVIDER_NETWORK_TYPE="vlan"
         SEGMENTATION_ID=2010
 
 In this configuration we are defining FIXED_RANGE to be a
-subnet that exists in the private RFC1918 address space - however
-in a real setup FIXED_RANGE would be a public IP address range, so
-that you could access your instances from the public internet.
+publicly routed IPv4 subnet. In this specific instance we are using
+the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
+which is used for documentation.  In your DevStack setup, FIXED_RANGE
+would be a public IP address range that you or your organization has
+allocated to you, so that you could access your instances from the
+public internet.
 
 The following is a snippet of the DevStack configuration on the
 compute node.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index e0c3f3a..2dd0241 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -10,6 +10,7 @@
    overview
    configuration
    plugins
+   plugin-registry
    faq
    changes
    hacking
@@ -19,9 +20,9 @@
 
 #. Select a Linux Distribution
 
-   Only Ubuntu 14.04 (Trusty), Fedora 20 and CentOS/RHEL 7 are
-   documented here. OpenStack also runs and is packaged on other flavors
-   of Linux such as OpenSUSE and Debian.
+   Only Ubuntu 14.04 (Trusty), Fedora 21 (or Fedora 22) and CentOS/RHEL
+   7 are documented here. OpenStack also runs and is packaged on other
+   flavors of Linux such as OpenSUSE and Debian.
 
 #. Install Selected OS
 
@@ -169,7 +170,6 @@
 * `lib/nova <lib/nova.html>`__
 * `lib/oslo <lib/oslo.html>`__
 * `lib/rpc\_backend <lib/rpc_backend.html>`__
-* `lib/sahara <lib/sahara.html>`__
 * `lib/swift <lib/swift.html>`__
 * `lib/tempest <lib/tempest.html>`__
 * `lib/tls <lib/tls.html>`__
@@ -180,7 +180,6 @@
 
 * `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
 * `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
-* `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
 * `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
 * `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
 * `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
@@ -237,7 +236,6 @@
 * `exercises/floating\_ips.sh <exercises/floating_ips.sh.html>`__
 * `exercises/horizon.sh <exercises/horizon.sh.html>`__
 * `exercises/neutron-adv-test.sh <exercises/neutron-adv-test.sh.html>`__
-* `exercises/sahara.sh <exercises/sahara.sh.html>`__
 * `exercises/sec\_groups.sh <exercises/sec_groups.sh.html>`__
 * `exercises/swift.sh <exercises/swift.sh.html>`__
 * `exercises/volumes.sh <exercises/volumes.sh.html>`__
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
new file mode 100644
index 0000000..99bfb85
--- /dev/null
+++ b/doc/source/plugin-registry.rst
@@ -0,0 +1,75 @@
+..
+  Note to reviewers: the intent of this file is to be easy for
+  community members to update. As such fast approving (single core +2)
+  is fine as long as you've identified that the plugin listed actually exists.
+
+==========================
+ DevStack Plugin Registry
+==========================
+
+Since we've created the external plugin mechanism, it's gotten used by
+a lot of projects. The following is a list of plugins that currently
+exist. Any project that wishes to list their plugin here is welcomed
+to.
+
+Official OpenStack Projects
+===========================
+
+The following are plugins that exist for official OpenStack projects.
+
++--------------------+-------------------------------------------+--------------------+
+|Plugin Name         |URL                                        |Comments            |
++--------------------+-------------------------------------------+--------------------+
+|magnum              |git://git.openstack.org/openstack/magnum   |                    |
++--------------------+-------------------------------------------+--------------------+
+|sahara              |git://git.openstack.org/openstack/sahara   |                    |
++--------------------+-------------------------------------------+--------------------+
+|trove               |git://git.openstack.org/openstack/trove    |                    |
++--------------------+-------------------------------------------+--------------------+
+|zaqar               |git://git.openstack.org/openstack/zaqar    |                    |
++--------------------+-------------------------------------------+--------------------+
+
+
+
+Drivers
+=======
+
++--------------------+-------------------------------------------------+------------------+
+|Plugin Name         |URL                                              |Comments          |
++--------------------+-------------------------------------------------+------------------+
+|dragonflow          |git://git.openstack.org/openstack/dragonflow     |[d1]_             |
++--------------------+-------------------------------------------------+------------------+
+|odl                 |git://git.openstack.org/openstack/networking-odl |[d2]_             |
++--------------------+-------------------------------------------------+------------------+
+
+.. [d1] demonstrates example of installing 3rd party SDN controller
+.. [d2] demonstrates a pretty advanced set of modes that that allow
+        one to run OpenDayLight either from a pre-existing install, or
+        also from source
+
+Alternate Configs
+=================
+
++-------------+------------------------------------------------------------+------------+
+| Plugin Name | URL                                                        | Comments   |
+|             |                                                            |            |
++-------------+------------------------------------------------------------+------------+
+|glusterfs    |git://git.openstack.org/stackforge/devstack-plugin-glusterfs|            |
++-------------+------------------------------------------------------------+------------+
+|             |                                                            |            |
++-------------+------------------------------------------------------------+------------+
+
+Additional Services
+===================
+
++-------------+------------------------------------------+------------+
+| Plugin Name | URL                                      | Comments   |
+|             |                                          |            |
++-------------+------------------------------------------+------------+
+|ec2-api      |git://git.openstack.org/stackforge/ec2api |[as1]_      |
++-------------+------------------------------------------+------------+
+|             |                                          |            |
++-------------+------------------------------------------+------------+
+
+.. [as1] first functional devstack plugin, hence why used in most of
+         the examples.
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index c4ed228..1b6f5e3 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -2,103 +2,21 @@
 Plugins
 =======
 
-DevStack has a couple of plugin mechanisms to allow easily adding
-support for additional projects and features.
+The OpenStack ecosystem is wide and deep, and only growing more so
+every day. The value of DevStack is that it's simple enough to
+understand what it's doing clearly. And yet we'd like to support as
+much of the OpenStack Ecosystem as possible. We do that with plugins.
 
-Extras.d Hooks
-==============
+DevStack plugins are bits of bash code that live outside the DevStack
+tree. They are called through a strong contract, so these plugins can
+be sure that they will continue to work in the future as DevStack
+evolves.
 
-These hooks are an extension of the service calls in
-``stack.sh`` at specific points in its run, plus ``unstack.sh`` and
-``clean.sh``. A number of the higher-layer projects are implemented in
-DevStack using this mechanism.
+Plugin Interface
+================
 
-The script in ``extras.d`` is expected to be mostly a dispatcher to
-functions in a ``lib/*`` script. The scripts are named with a
-zero-padded two digits sequence number prefix to control the order that
-the scripts are called, and with a suffix of ``.sh``. DevStack reserves
-for itself the sequence numbers 00 through 09 and 90 through 99.
-
-Below is a template that shows handlers for the possible command-line
-arguments:
-
-::
-
-    # template.sh - DevStack extras.d dispatch script template
-
-    # check for service enabled
-    if is_service_enabled template; then
-
-        if [[ "$1" == "source" ]]; then
-            # Initial source of lib script
-            source $TOP_DIR/lib/template
-        fi
-
-        if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
-            # Set up system services
-            echo_summary "Configuring system services Template"
-            install_package cowsay
-
-        elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-            # Perform installation of service source
-            echo_summary "Installing Template"
-            install_template
-
-        elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-            # Configure after the other layer 1 and 2 services have been configured
-            echo_summary "Configuring Template"
-            configure_template
-
-        elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-            # Initialize and start the template service
-            echo_summary "Initializing Template"
-            ##init_template
-        fi
-
-        if [[ "$1" == "unstack" ]]; then
-            # Shut down template services
-            # no-op
-            :
-        fi
-
-        if [[ "$1" == "clean" ]]; then
-            # Remove state and transient data
-            # Remember clean.sh first calls unstack.sh
-            # no-op
-            :
-        fi
-    fi
-
-The arguments are:
-
--  **source** - Called by each script that utilizes ``extras.d`` hooks;
-   this replaces directly sourcing the ``lib/*`` script.
--  **stack** - Called by ``stack.sh`` three times for different phases
-   of its run:
-
-   -  **pre-install** - Called after system (OS) setup is complete and
-      before project source is installed.
-   -  **install** - Called after the layer 1 and 2 projects source and
-      their dependencies have been installed.
-   -  **post-config** - Called after the layer 1 and 2 services have
-      been configured. All configuration files for enabled services
-      should exist at this point.
-   -  **extra** - Called near the end after layer 1 and 2 services have
-      been started. This is the existing hook and has not otherwise
-      changed.
-
--  **unstack** - Called by ``unstack.sh`` before other services are shut
-   down.
--  **clean** - Called by ``clean.sh`` before other services are cleaned,
-   but after ``unstack.sh`` has been called.
-
-
-Externally Hosted Plugins
-=========================
-
-Based on the extras.d hooks, DevStack supports a standard mechansim
-for including plugins from external repositories. The plugin interface
-assumes the following:
+DevStack supports a standard mechansim for including plugins from
+external repositories. The plugin interface assumes the following:
 
 An external git repository that includes a ``devstack/`` top level
 directory. Inside this directory there can be 2 files.
@@ -118,11 +36,10 @@
   default value only if the variable is unset or empty; e.g. in bash
   syntax ``FOO=${FOO:-default}``.
 
-- ``plugin.sh`` - the actual plugin. It will be executed by devstack
-  during it's run. The run order will be done in the registration
-  order for these plugins, and will occur immediately after all in
-  tree extras.d dispatch at the phase in question.  The plugin.sh
-  looks like the extras.d dispatcher above.
+- ``plugin.sh`` - the actual plugin. It is executed by devstack at
+  well defined points during a ``stack.sh`` run. The plugin.sh
+  internal structure is discussed bellow.
+
 
 Plugins are registered by adding the following to the localrc section
 of ``local.conf``.
@@ -141,49 +58,121 @@
 
   enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
 
-Plugins for gate jobs
----------------------
+plugin.sh contract
+==================
 
-All OpenStack plugins that wish to be used as gate jobs need to exist
-in OpenStack's gerrit. Both ``openstack`` namespace and ``stackforge``
-namespace are fine. This allows testing of the plugin as well as
-provides network isolation against upstream git repository failures
-(which we see often enough to be an issue).
+``plugin.sh`` is a bash script that will be called at specific points
+during ``stack.sh``, ``unstack.sh``, and ``clean.sh``. It will be
+called in the following way::
 
-Ideally plugins will be implemented as ``devstack`` directory inside
-the project they are testing. For example, the stackforge/ec2-api
-project has it's pluggin support in it's tree.
+  source $PATH/TO/plugin.sh <mode> [phase]
 
-In the cases where there is no "project tree" per say (like
-integrating a backend storage configuration such as ceph or glusterfs)
-it's also allowed to build a dedicated
-``stackforge/devstack-plugin-FOO`` project to house the plugin.
+``mode`` can be thought of as the major mode being called, currently
+one of: ``stack``, ``unstack``, ``clean``. ``phase`` is used by modes
+which have multiple points during their run where it's necessary to
+be able to execute code. All existing ``mode`` and ``phase`` points
+are considered **strong contracts** and won't be removed without a
+reasonable deprecation period. Additional new ``mode`` or ``phase``
+points may be added at any time if we discover we need them to support
+additional kinds of plugins in devstack.
 
-Note jobs must not require cloning of repositories during tests.
-Tests must list their repository in the ``PROJECTS`` variable for
-`devstack-gate
-<https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_
-for the repository to be available to the test.  Further information
-is provided in the project creator's guide.
+The current full list of ``mode`` and ``phase`` are:
 
-Hypervisor
-==========
+-  **stack** - Called by ``stack.sh`` four times for different phases
+   of its run:
 
-Hypervisor plugins are fairly new and condense most hypervisor
-configuration into one place.
+   -  **pre-install** - Called after system (OS) setup is complete and
+      before project source is installed.
+   -  **install** - Called after the layer 1 and 2 projects source and
+      their dependencies have been installed.
+   -  **post-config** - Called after the layer 1 and 2 services have
+      been configured. All configuration files for enabled services
+      should exist at this point.
+   -  **extra** - Called near the end after layer 1 and 2 services have
+      been started.
 
-The initial plugin implemented was for Docker support and is a useful
-template for the required support. Plugins are placed in
-``lib/nova_plugins`` and named ``hypervisor-<name>`` where ``<name>`` is
-the value of ``VIRT_DRIVER``. Plugins must define the following
-functions:
+-  **unstack** - Called by ``unstack.sh`` before other services are shut
+   down.
+-  **clean** - Called by ``clean.sh`` before other services are cleaned,
+   but after ``unstack.sh`` has been called.
 
--  ``install_nova_hypervisor`` - install any external requirements
--  ``configure_nova_hypervisor`` - make configuration changes, including
-   those to other services
--  ``start_nova_hypervisor`` - start any external services
--  ``stop_nova_hypervisor`` - stop any external services
--  ``cleanup_nova_hypervisor`` - remove transient data and cache
+Example plugin
+====================
+
+An example plugin would look something as follows.
+
+``devstack/settings``::
+
+    # settings file for template
+  enable_service template
+
+
+``devstack/plugin.sh``::
+
+    # plugin.sh - DevStack plugin.sh dispatch script template
+
+    function install_template {
+        ...
+    }
+
+    function init_template {
+        ...
+    }
+
+    function configure_template {
+        ...
+    }
+
+    # check for service enabled
+    if is_service_enabled template; then
+
+        if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+            # Set up system services
+            echo_summary "Configuring system services Template"
+            install_package cowsay
+
+        elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+            # Perform installation of service source
+            echo_summary "Installing Template"
+            install_template
+
+        elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+            # Configure after the other layer 1 and 2 services have been configured
+            echo_summary "Configuring Template"
+            configure_template
+
+        elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+            # Initialize and start the template service
+            echo_summary "Initializing Template"
+            init_template
+        fi
+
+        if [[ "$1" == "unstack" ]]; then
+            # Shut down template services
+            # no-op
+            :
+        fi
+
+        if [[ "$1" == "clean" ]]; then
+            # Remove state and transient data
+            # Remember clean.sh first calls unstack.sh
+            # no-op
+            :
+        fi
+    fi
+
+Plugin Execution Order
+======================
+
+Plugins are run after in tree services at each of the stages
+above. For example, if you need something to happen before Keystone
+starts, you should do that at the ``post-config`` phase.
+
+Multiple plugins can be specified in your ``local.conf``. When that
+happens the plugins will be executed **in order** at each phase. This
+allows plugins to conceptually depend on each other through
+documenting to the user the order they must be declared. A formal
+dependency mechanism is beyond the scope of the current work.
 
 System Packages
 ===============
@@ -205,3 +194,49 @@
 
 - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
   running on SUSE Linux or openSUSE.
+
+
+Using Plugins in the OpenStack Gate
+===================================
+
+For everyday use, DevStack plugins can exist in any git tree that's
+accessible on the internet. However, when using DevStack plugins in
+the OpenStack gate, they must live in projects in OpenStack's
+gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are
+fine. This allows testing of the plugin as well as provides network
+isolation against upstream git repository failures (which we see often
+enough to be an issue).
+
+Ideally a plugin will be included within the ``devstack`` directory of
+the project they are being tested. For example, the stackforge/ec2-api
+project has its pluggin support in its own tree.
+
+However, some times a DevStack plugin might be used solely to
+configure a backend service that will be used by the rest of
+OpenStack, so there is no "project tree" per say. Good examples
+include: integration of back end storage (e.g. ceph or glusterfs),
+integration of SDN controllers (e.g. ovn, OpenDayLight), or
+integration of alternate RPC systems (e.g. zmq, qpid). In these cases
+the best practice is to build a dedicated
+``stackforge/devstack-plugin-FOO`` project.
+
+To enable a plugin to be used in a gate job, the following lines will
+be needed in your ``jenkins/jobs/<project>.yaml`` definition in
+`project-config
+<http://git.openstack.org/cgit/openstack-infra/project-config/>`_::
+
+  # Because we are testing a non standard project, add the
+  # our project repository. This makes zuul do the right
+  # reference magic for testing changes.
+  export PROJECTS="stackforge/ec2-api $PROJECTS"
+
+  # note the actual url here is somewhat irrelevant because it
+  # caches in nodepool, however make it a valid url for
+  # documentation purposes.
+  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api"
+
+See Also
+========
+
+For additional inspiration on devstack plugins you can check out the
+`Plugin Registry <plugin-registry.html>`_.
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 04892b0..a0de4cc 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -19,18 +19,6 @@
 
 set -o errtrace
 
-trap failed ERR
-function failed {
-    local r=$?
-    set +o errtrace
-    set +o xtrace
-    echo "Failed to execute"
-    echo "Starting cleanup..."
-    delete_all
-    echo "Finished cleanup"
-    exit $r
-}
-
 # Print the commands being run so that we can see the command that triggers
 # an error.  It is also useful for following allowing as the install occurs.
 set -o xtrace
@@ -441,6 +429,18 @@
     fi
 }
 
+trap failed ERR
+function failed {
+    local r=$?
+    set +o errtrace
+    set +o xtrace
+    echo "Failed to execute"
+    echo "Starting cleanup..."
+    delete_all
+    echo "Finished cleanup"
+    exit $r
+}
+
 # Kick off script
 # ---------------
 
diff --git a/exercises/sahara.sh b/exercises/sahara.sh
deleted file mode 100755
index 2589e28..0000000
--- a/exercises/sahara.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-
-# **sahara.sh**
-
-# Sanity check that Sahara started if enabled
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-is_service_enabled sahara || exit 55
-
-$CURL_GET http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
deleted file mode 100644
index f177766..0000000
--- a/extras.d/70-sahara.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# sahara.sh - DevStack extras script to install Sahara
-
-if is_service_enabled sahara; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/sahara
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing sahara"
-        install_sahara
-        install_python_saharaclient
-        cleanup_sahara
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring sahara"
-        configure_sahara
-        create_sahara_accounts
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing sahara"
-        sahara_register_images
-        start_sahara
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_sahara
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_sahara
-    fi
-fi
diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template
index 1c57b32..79f14c3 100644
--- a/files/apache-ceilometer.template
+++ b/files/apache-ceilometer.template
@@ -1,7 +1,7 @@
 Listen %PORT%
 
 <VirtualHost *:%PORT%>
-    WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP}
+    WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup ceilometer-api
     WSGIScriptAlias / %WSGIAPP%
     WSGIApplicationGroup %{GLOBAL}
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 0b914e2..6dd1ad9 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -33,3 +33,23 @@
     %SSLCERTFILE%
     %SSLKEYFILE%
 </VirtualHost>
+
+Alias /identity %PUBLICWSGI%
+<Location /identity>
+    SetHandler wsgi-script
+    Options +ExecCGI
+
+    WSGIProcessGroup keystone-public
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
+
+Alias /identity_admin %ADMINWSGI%
+<Location /identity_admin>
+    SetHandler wsgi-script
+    Options +ExecCGI
+
+    WSGIProcessGroup keystone-admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
index 70ccedd..4908152 100644
--- a/files/apache-nova-api.template
+++ b/files/apache-nova-api.template
@@ -1,7 +1,7 @@
 Listen %PUBLICPORT%
 
 <VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess nova-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIDaemonProcess nova-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup nova-api
     WSGIScriptAlias / %PUBLICWSGI%
     WSGIApplicationGroup %{GLOBAL}
@@ -13,4 +13,13 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-</VirtualHost>
\ No newline at end of file
+</VirtualHost>
+
+Alias /compute %PUBLICWSGI%
+<Location /compute>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup nova-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
index ae4cf94..235d958 100644
--- a/files/apache-nova-ec2-api.template
+++ b/files/apache-nova-ec2-api.template
@@ -1,7 +1,7 @@
 Listen %PUBLICPORT%
 
 <VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess nova-ec2-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIDaemonProcess nova-ec2-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup nova-ec2-api
     WSGIScriptAlias / %PUBLICWSGI%
     WSGIApplicationGroup %{GLOBAL}
@@ -13,4 +13,4 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-</VirtualHost>
\ No newline at end of file
+</VirtualHost>
diff --git a/files/debs/neutron b/files/debs/neutron
index 2d69a71..b5a457e 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -9,11 +9,9 @@
 postgresql-server-dev-all
 python-mysqldb
 python-mysql.connector
-python-qpid # NOPRIME
 dnsmasq-base
 dnsmasq-utils # for dhcp_release only available in dist:precise
 rabbitmq-server # NOPRIME
-qpidd # NOPRIME
 sqlite3
 vlan
 radvd # NOPRIME
diff --git a/files/debs/nova b/files/debs/nova
index 9d9acde..346b8b3 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -24,10 +24,8 @@
 curl
 genisoimage # required for config_drive
 rabbitmq-server # NOPRIME
-qpidd # NOPRIME
 socat # used by ajaxterm
 python-libvirt # NOPRIME
 python-libxml2
 python-numpy # used by websockify for spice console
 python-m2crypto
-python-qpid # NOPRIME
diff --git a/files/debs/qpid b/files/debs/qpid
deleted file mode 100644
index e3bbf09..0000000
--- a/files/debs/qpid
+++ /dev/null
@@ -1 +0,0 @@
-sasl2-bin # NOPRIME
diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs
index bdb630a..54d13a3 100644
--- a/files/rpms-suse/devlibs
+++ b/files/rpms-suse/devlibs
@@ -1,6 +1,5 @@
 libffi-devel  # pyOpenSSL
 libopenssl-devel  # pyOpenSSL
-libxml2-devel  # lxml
 libxslt-devel  # lxml
 postgresql-devel  # psycopg2
 libmysqlclient-devel # MySQL-python
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index 0e58425..bf512de 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -1,2 +1 @@
-libxml2-devel
 python-devel
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index e75db89..1339799 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -11,6 +11,3 @@
 sudo
 vlan
 radvd # NOPRIME
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-qpidd # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 6f8aef1..039456f 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -22,7 +22,3 @@
 sqlite3
 sudo
 vlan
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-python-qpid # NOPRIME
-qpidd # NOPRIME
diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3
new file mode 100644
index 0000000..a7a190c
--- /dev/null
+++ b/files/rpms-suse/q-l3
@@ -0,0 +1,2 @@
+conntrack-tools
+keepalived
diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove
deleted file mode 100644
index 96f8f29..0000000
--- a/files/rpms-suse/trove
+++ /dev/null
@@ -1 +0,0 @@
-libxslt1-dev
diff --git a/files/rpms/general b/files/rpms/general
index 7b2c00a..c3f3de8 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -25,6 +25,7 @@
 libyaml-devel
 gettext  # used for compiling message catalogs
 net-tools
-java-1.7.0-openjdk-headless  # NOPRIME rhel7,f20
+java-1.7.0-openjdk-headless  # NOPRIME rhel7
 java-1.8.0-openjdk-headless  # NOPRIME f21,f22
 pyOpenSSL # version in pip uses too much memory
+iptables-services  # NOPRIME f21,f22
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 8292e7b..29851be 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,7 +11,6 @@
 openvswitch # NOPRIME
 postgresql-devel
 rabbitmq-server # NOPRIME
-qpid-cpp-server        # NOPRIME
 sqlite
 sudo
 radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index ebd6674..6eeb623 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -10,6 +10,7 @@
 iputils
 kpartx
 kvm # NOPRIME
+qemu-kvm # NOPRIME
 libvirt-bin # NOPRIME
 libvirt-devel # NOPRIME
 libvirt-python # NOPRIME
@@ -22,6 +23,5 @@
 parted
 polkit
 rabbitmq-server # NOPRIME
-qpid-cpp-server # NOPRIME
 sqlite
 sudo
diff --git a/files/rpms/qpid b/files/rpms/qpid
deleted file mode 100644
index 41dd2f6..0000000
--- a/files/rpms/qpid
+++ /dev/null
@@ -1,3 +0,0 @@
-qpid-proton-c-devel # NOPRIME
-cyrus-sasl-lib # NOPRIME
-cyrus-sasl-plain # NOPRIME
diff --git a/functions-common b/functions-common
index 3a2f5f7..60cf04c 100644
--- a/functions-common
+++ b/functions-common
@@ -43,6 +43,26 @@
 
 TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 
+# Save these variables to .stackenv
+STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
+    KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \
+    LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP \
+    HOST_IPV6 SERVICE_IP_VERSION"
+
+
+# Saves significant environment variables to .stackenv for later use
+# Refers to a lot of globals, only TOP_DIR and STACK_ENV_VARS are required to
+# function, the rest are simply saved and do not cause problems if they are undefined.
+# save_stackenv [tag]
+function save_stackenv {
+    local tag=${1:-""}
+    # Save some values we generated for later use
+    time_stamp=$(date "+$TIMESTAMP_FORMAT")
+    echo "# $time_stamp $tag" >$TOP_DIR/.stackenv
+    for i in $STACK_ENV_VARS; do
+        echo $i=${!i} >>$TOP_DIR/.stackenv
+    done
+}
 
 # Normalize config values to True or False
 # Accepts as False: 0 no No NO false False FALSE
@@ -68,6 +88,7 @@
     [[ -v "$1" ]]
 }
 
+
 # Control Functions
 # =================
 
@@ -249,8 +270,9 @@
         # Fedora release 16 (Verne)
         # XenServer release 6.2.0-70446c (xenenterprise)
         # Oracle Linux release 7
+        # CloudLinux release 7.1
         os_CODENAME=""
-        for r in "Red Hat" CentOS Fedora XenServer; do
+        for r in "Red Hat" CentOS Fedora XenServer CloudLinux; do
             os_VENDOR=$r
             if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
                 ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
@@ -354,7 +376,8 @@
     fi
 
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
-        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ]
+        [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] || \
+        [ "$os_VENDOR" = "CloudLinux" ]
 }
 
 
@@ -558,13 +581,14 @@
     local floating_range=$2
     local host_ip_iface=$3
     local host_ip=$4
+    local af=$5
 
     # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
     if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
         host_ip=""
         # Find the interface used for the default route
-        host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)}
-        local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}')
+        host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
+        local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | awk /$af'/ {split($2,parts,"/");  print parts[1]}')
         local ip
         for ip in $host_ips; do
             # Attempt to filter out IP addresses that are part of the fixed and
@@ -573,6 +597,10 @@
             # will be printed and the first IP from the interface will be used.
             # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
             # address.
+            if [[ "$af" == "inet6" ]]; then
+                host_ip=$ip
+                break;
+            fi
             if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then
                 host_ip=$ip
                 break;
@@ -658,9 +686,10 @@
 # Gets or creates a domain
 # Usage: get_or_create_domain <name> <description>
 function get_or_create_domain {
+    local domain_id
     local os_url="$KEYSTONE_SERVICE_URI_V3"
     # Gets domain id
-    local domain_id=$(
+    domain_id=$(
         # Gets domain id
         openstack --os-token=$OS_TOKEN --os-url=$os_url \
             --os-identity-api-version=3 domain show $1 \
@@ -675,44 +704,41 @@
 }
 
 # Gets or creates group
-# Usage: get_or_create_group <groupname> [<domain> <description>]
+# Usage: get_or_create_group <groupname> <domain> [<description>]
 function get_or_create_group {
-    local domain=${2:+--domain ${2}}
     local desc="${3:-}"
     local os_url="$KEYSTONE_SERVICE_URI_V3"
+    local group_id
     # Gets group id
-    local group_id=$(
+    group_id=$(
         # Creates new group with --or-show
         openstack --os-token=$OS_TOKEN --os-url=$os_url \
             --os-identity-api-version=3 group create $1 \
-            $domain --description "$desc" --or-show \
+            --domain $2 --description "$desc" --or-show \
             -f value -c id
     )
     echo $group_id
 }
 
 # Gets or creates user
-# Usage: get_or_create_user <username> <password> [<email> [<domain>]]
+# Usage: get_or_create_user <username> <password> <domain> [<email>]
 function get_or_create_user {
-    if [[ ! -z "$3" ]]; then
-        local email="--email=$3"
+    local user_id
+    if [[ ! -z "$4" ]]; then
+        local email="--email=$4"
     else
         local email=""
     fi
-    local os_cmd="openstack"
-    local domain=""
-    if [[ ! -z "$4" ]]; then
-        domain="--domain=$4"
-        os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
-    fi
     # Gets user id
-    local user_id=$(
+    user_id=$(
         # Creates new user with --or-show
-        $os_cmd user create \
+        openstack user create \
             $1 \
             --password "$2" \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            --domain=$3 \
             $email \
-            $domain \
             --or-show \
             -f value -c id
     )
@@ -720,18 +746,16 @@
 }
 
 # Gets or creates project
-# Usage: get_or_create_project <name> [<domain>]
+# Usage: get_or_create_project <name> <domain>
 function get_or_create_project {
-    # Gets project id
-    local os_cmd="openstack"
-    local domain=""
-    if [[ ! -z "$2" ]]; then
-        domain="--domain=$2"
-        os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
-    fi
-    local project_id=$(
+    local project_id
+    project_id=$(
         # Creates new project with --or-show
-        $os_cmd project create $1 $domain --or-show -f value -c id
+        openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            project create $1 \
+            --domain=$2 \
+            --or-show -f value -c id
     )
     echo $project_id
 }
@@ -739,9 +763,13 @@
 # Gets or creates role
 # Usage: get_or_create_role <name>
 function get_or_create_role {
-    local role_id=$(
+    local role_id
+    role_id=$(
         # Creates role with --or-show
-        openstack role create $1 --or-show -f value -c id
+        openstack role create $1 \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            --or-show -f value -c id
     )
     echo $role_id
 }
@@ -749,11 +777,14 @@
 # Gets or adds user role to project
 # Usage: get_or_add_user_project_role <role> <user> <project>
 function get_or_add_user_project_role {
+    local user_role_id
     # Gets user role id
-    local user_role_id=$(openstack role list \
+    user_role_id=$(openstack role list \
         --user $2 \
-        --project $3 \
+        --os-url=$KEYSTONE_SERVICE_URI_V3 \
+        --os-identity-api-version=3 \
         --column "ID" \
+        --project $3 \
         --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
@@ -762,6 +793,8 @@
             $1 \
             --user $2 \
             --project $3 \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
             | grep " id " | get_field 2)
     fi
     echo $user_role_id
@@ -770,20 +803,27 @@
 # Gets or adds group role to project
 # Usage: get_or_add_group_project_role <role> <group> <project>
 function get_or_add_group_project_role {
+    local group_role_id
     # Gets group role id
-    local group_role_id=$(openstack role list \
+    group_role_id=$(openstack role list \
+        --os-url=$KEYSTONE_SERVICE_URI_V3 \
+        --os-identity-api-version=3 \
         --group $2 \
         --project $3 \
-        --column "ID" \
-        --column "Name" \
-        | grep " $1 " | get_field 1)
+        -c "ID" -f value)
     if [[ -z "$group_role_id" ]]; then
-        # Adds role to group
-        group_role_id=$(openstack role add \
-            $1 \
+        # Adds role to group and get it
+        openstack role add $1 \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            --group $2 \
+            --project $3
+        group_role_id=$(openstack role list \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
             --group $2 \
             --project $3 \
-            | grep " id " | get_field 2)
+            -c "ID" -f value)
     fi
     echo $group_role_id
 }
@@ -791,12 +831,15 @@
 # Gets or creates service
 # Usage: get_or_create_service <name> <type> <description>
 function get_or_create_service {
+    local service_id
     # Gets service id
-    local service_id=$(
+    service_id=$(
         # Gets service id
-        openstack service show $1 -f value -c id 2>/dev/null ||
+        openstack service show $2 -f value -c id 2>/dev/null ||
         # Creates new service if not exists
         openstack service create \
+            --os-url $KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
             $2 \
             --name $1 \
             --description="$3" \
@@ -805,29 +848,57 @@
     echo $service_id
 }
 
-# Gets or creates endpoint
-# Usage: get_or_create_endpoint <service> <region> <publicurl> <adminurl> <internalurl>
-function get_or_create_endpoint {
-    # Gets endpoint id
-    local endpoint_id=$(openstack endpoint list \
-        --column "ID" \
-        --column "Region" \
-        --column "Service Name" \
-        | grep " $2 " \
-        | grep " $1 " | get_field 1)
+# Create an endpoint with a specific interface
+# Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
+function _get_or_create_endpoint_with_interface {
+    local endpoint_id
+    endpoint_id=$(openstack endpoint list \
+        --os-url $KEYSTONE_SERVICE_URI_V3 \
+        --os-identity-api-version=3 \
+        --service $1 \
+        --interface $2 \
+        --region $4 \
+        -c ID -f value)
     if [[ -z "$endpoint_id" ]]; then
         # Creates new endpoint
         endpoint_id=$(openstack endpoint create \
-            $1 \
-            --region $2 \
-            --publicurl $3 \
-            --adminurl $4 \
-            --internalurl $5 \
-            | grep " id " | get_field 2)
+            --os-url $KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            $1 $2 $3 --region $4 -f value -c id)
     fi
+
     echo $endpoint_id
 }
 
+# Gets or creates endpoint
+# Usage: get_or_create_endpoint <service> <region> <publicurl> <adminurl> <internalurl>
+function get_or_create_endpoint {
+    # NOTE(jamielennnox): when converting to v3 endpoint creation we go from
+    # creating one endpoint with multiple urls to multiple endpoints each with
+    # a different interface.  To maintain the existing function interface we
+    # create 3 endpoints and return the id of the public one. In reality
+    # returning the public id will not make a lot of difference as there are no
+    # scenarios currently that use the returned id. Ideally this behaviour
+    # should be pushed out to the service setups and let them create the
+    # endpoints they need.
+    local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
+    _get_or_create_endpoint_with_interface $1 admin $4 $2
+    _get_or_create_endpoint_with_interface $1 internal $5 $2
+
+    # return the public id to indicate success, and this is the endpoint most likely wanted
+    echo $public_id
+}
+
+# Get a URL from the identity service
+# Usage: get_endpoint_url <service> <interface>
+function get_endpoint_url {
+    echo $(openstack endpoint list \
+            --service $1 --interface $2 \
+            --os-url $KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            -c URL -f value)
+}
+
 
 # Package Functions
 # =================
@@ -1330,7 +1401,7 @@
 
     if is_service_enabled $service; then
         # Clean up the screen window
-        screen -S $SCREEN_NAME -p $service -X kill
+        screen -S $SCREEN_NAME -p $service -X kill || true
     fi
 }
 
@@ -1671,7 +1742,7 @@
 # ``ENABLED_SERVICES`` list, if they are not already present.
 #
 # For example:
-#   enable_service qpid
+#   enable_service q-svc
 #
 # This function does not know about the special cases
 # for nova, glance, and neutron built into is_service_enabled().
@@ -1734,7 +1805,6 @@
         [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
         [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
-        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
         [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
@@ -1947,6 +2017,19 @@
     fi
 }
 
+# Test with a finite retry loop.
+#
+function test_with_retry {
+    local testcmd=$1
+    local failmsg=$2
+    local until=${3:-10}
+    local sleep=${4:-0.5}
+
+    if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then
+        die $LINENO "$failmsg"
+    fi
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/inc/python b/inc/python
index 3d329b5..54e19a7 100644
--- a/inc/python
+++ b/inc/python
@@ -66,7 +66,8 @@
 
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``
+# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``USE_CONSTRAINTS``
 # pip_install package [package ...]
 function pip_install {
     local xtrace=$(set +o | grep xtrace)
@@ -103,6 +104,13 @@
         fi
     fi
 
+    cmd_pip="$cmd_pip install"
+
+    # Handle a constraints file, if needed.
+    if [[ "$USE_CONSTRAINTS" == "True" ]]; then
+        cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
+    fi
+
     local pip_version=$(python -c "import pip; \
                         print(pip.__version__.strip('.')[0])")
     if (( pip_version<6 )); then
@@ -116,7 +124,7 @@
         https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
-        $cmd_pip install $upgrade \
+        $cmd_pip $upgrade \
         $@
 
     # Also install test requirements
@@ -128,7 +136,7 @@
             https_proxy=${https_proxy:-} \
             no_proxy=${no_proxy:-} \
             PIP_FIND_LINKS=$PIP_FIND_LINKS \
-            $cmd_pip install $upgrade \
+            $cmd_pip $upgrade \
             -r $test_req
     fi
 }
@@ -195,7 +203,7 @@
 function is_in_projects_txt {
     local project_dir=$1
     local project_name=$(basename $project_dir)
-    return grep "/$project_name\$" $REQUIREMENTS_DIR/projects.txt >/dev/null
+    grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
 }
 
 # ``pip install -e`` the package, which processes the dependencies
@@ -215,22 +223,26 @@
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements != "changed" ]]; then
-        if [[ "$REQUIREMENTS_MODE" == "soft" ]]; then
-            if is_in_projects_txt $project_dir; then
-                (cd $REQUIREMENTS_DIR; \
-                    python update.py $project_dir)
-            else
-                # soft update projects not found in requirements project.txt
-                (cd $REQUIREMENTS_DIR; \
-                    python update.py -s $project_dir)
-            fi
-        else
+    if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then
+        if is_in_projects_txt $project_dir; then
             (cd $REQUIREMENTS_DIR; \
-                python update.py $project_dir)
+                ./.venv/bin/python update.py $project_dir)
+        else
+            # soft update projects not found in requirements project.txt
+            echo "$project_dir not a constrained repository, soft enforcing requirements"
+            (cd $REQUIREMENTS_DIR; \
+                ./.venv/bin/python update.py -s $project_dir)
         fi
     fi
 
+    if [ -n "$REQUIREMENTS_DIR" ]; then
+        # Constrain this package to this project directory from here on out.
+        local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+        $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
+            $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
+            "$flags file://$project_dir#egg=$name"
+    fi
+
     setup_package $project_dir $flags
 
     # We've just gone and possibly modified the user's source tree in an
diff --git a/lib/ceilometer b/lib/ceilometer
index 1f72187..9226d85 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -78,8 +78,13 @@
 CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
 CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
 
-# Support potential entry-points console scripts
-CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
+# Support potential entry-points console scripts in VENV or not
+if [[ ${USE_VENV} = True ]]; then
+    PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv
+    CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin
+else
+    CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
+fi
 
 # Set up database backend
 CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
@@ -125,9 +130,8 @@
         create_service_user "ceilometer" "admin"
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            local ceilometer_service=$(get_or_create_service "ceilometer" \
-                "metering" "OpenStack Telemetry Service")
-            get_or_create_endpoint $ceilometer_service \
+            get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service"
+            get_or_create_endpoint "metering" \
                 "$REGION_NAME" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
@@ -165,16 +169,22 @@
 
     local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
     local apache_version=$(get_apache_version)
+    local venv_path=""
 
     # Copy proxy vhost and wsgi file
     sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
 
+    if [[ ${USE_VENV} = True ]]; then
+        venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages"
+    fi
+
     sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf
     sudo sed -e "
         s|%PORT%|$CEILOMETER_SERVICE_PORT|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
         s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g;
-        s|%USER%|$STACK_USER|g
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
     " -i $ceilometer_apache_conf
 }
 
@@ -201,6 +211,7 @@
     cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR
     cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR
     cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR
+    cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR
 
     if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then
         sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
@@ -232,12 +243,14 @@
         iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
         ${TOP_DIR}/pkg/elasticsearch.sh start
         cleanup_ceilometer
-    else
+    elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
         iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer
         iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
         iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer
         configure_mongodb
         cleanup_ceilometer
+    else
+        die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND"
     fi
 
     if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
@@ -263,10 +276,8 @@
     local packages=mongodb-server
 
     if is_fedora; then
-        # mongodb client + python bindings
-        packages="${packages} mongodb pymongo"
-    else
-        packages="${packages} python-pymongo"
+        # mongodb client
+        packages="${packages} mongodb"
     fi
 
     install_package ${packages}
@@ -319,6 +330,21 @@
         install_redis
     fi
 
+    if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
+        pip_install_gr pymongo
+    fi
+
+    # Only install virt drivers if we're running nova compute
+    if is_service_enabled n-cpu ; then
+        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+            pip_install_gr libvirt-python
+        fi
+
+        if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
+            pip_install_gr oslo.vmware
+        fi
+    fi
+
     if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
         ${TOP_DIR}/pkg/elasticsearch.sh download
         ${TOP_DIR}/pkg/elasticsearch.sh install
@@ -334,28 +360,15 @@
     fi
 }
 
-# install_ceilometermiddleware() - Collect source and prepare
-function install_ceilometermiddleware {
-    if use_library_from_git "ceilometermiddleware"; then
-        git_clone_by_name "ceilometermiddleware"
-        setup_dev_lib "ceilometermiddleware"
-    else
-        # BUG: this should be a pip_install_gr except it was never
-        # included in global-requirements. Needs to be fixed by
-        # https://bugs.launchpad.net/ceilometer/+bug/1441655
-        pip_install ceilometermiddleware
-    fi
-}
-
 # start_ceilometer() - Start running processes, including screen
 function start_ceilometer {
-    run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
-    run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
-    run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
-    run_process ceilometer-aipmi "ceilometer-agent-ipmi --config-file $CEILOMETER_CONF"
+    run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
+    run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF"
+    run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
+    run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-agent-ipmi --config-file $CEILOMETER_CONF"
 
     if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
-        run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+        run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
     else
         enable_apache_site ceilometer
         restart_apache_server
@@ -367,10 +380,10 @@
     # Start the compute agent last to allow time for the collector to
     # fully wake up and connect to the message bus. See bug #1355809
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP
+        run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP
     fi
     if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
-        run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF"
+        run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF"
     fi
 
     # Only die on API if it was actually intended to be turned on
@@ -381,8 +394,8 @@
         fi
     fi
 
-    run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
-    run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
+    run_process ceilometer-alarm-notifier "$CEILOMETER_BIN_DIR/ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
+    run_process ceilometer-alarm-evaluator "$CEILOMETER_BIN_DIR/ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
 }
 
 # stop_ceilometer() - Stop running processes
diff --git a/lib/ceph b/lib/ceph
index 4d6ca4a..6cf481e 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -74,6 +74,10 @@
 REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
 REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
 
+# Cinder encrypted volume tests are not supported with a Ceph backend due to
+# bug 1463525.
+ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
+
 
 # Functions
 # ------------
@@ -110,7 +114,7 @@
 
 # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
 function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f20|f21|f22) ]]; then
+    if [[ ! ${DISTRO} =~ (trusty|f21|f22) ]]; then
         echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
         if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
             die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
@@ -158,7 +162,6 @@
 
 function cleanup_ceph_general {
     undefine_virsh_secret
-    uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
 }
 
 
@@ -264,10 +267,6 @@
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
 
-    # NOTE(eharney): When Glance has fully migrated to Glance store,
-    # default_store can be removed from [DEFAULT].  (See lib/glance.)
-    iniset $GLANCE_API_CONF DEFAULT default_store rbd
-    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
     iniset $GLANCE_API_CONF glance_store default_store rbd
     iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
     iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
diff --git a/lib/cinder b/lib/cinder
index da22e29..e5ed2db 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -39,6 +39,7 @@
 
 # set up default directories
 GITDIR["python-cinderclient"]=$DEST/python-cinderclient
+GITDIR["os-brick"]=$DEST/os-brick
 CINDER_DIR=$DEST/cinder
 
 # Cinder virtual environment
@@ -64,7 +65,12 @@
 CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
 CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 
+# What type of LVM device should Cinder use for LVM backend
+# Defaults to default, which is thick, the other valid choice
+# is thin, which as the name implies utilizes lvm thin provisioning.
+CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
 
 # Default backends
 # The backend format is type:name where type is one of the supported backend
@@ -217,6 +223,7 @@
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
+    iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
     iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
@@ -322,16 +329,14 @@
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local cinder_service=$(get_or_create_service "cinder" \
-                "volume" "Cinder Volume Service")
-            get_or_create_endpoint $cinder_service "$REGION_NAME" \
+            get_or_create_service "cinder" "volume" "Cinder Volume Service"
+            get_or_create_endpoint "volume" "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
 
-            local cinder_v2_service=$(get_or_create_service "cinderv2" \
-                "volumev2" "Cinder Volume Service V2")
-            get_or_create_endpoint $cinder_v2_service "$REGION_NAME" \
+            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
+            get_or_create_endpoint "volumev2" "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
@@ -381,6 +386,13 @@
 
 # install_cinder() - Collect source and prepare
 function install_cinder {
+    # Install os-brick from git so we make sure we're testing
+    # the latest code.
+    if use_library_from_git "os-brick"; then
+        git_clone_by_name "os-brick"
+        setup_dev_lib "os-brick"
+    fi
+
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
     setup_develop $CINDER_DIR
     if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
@@ -424,12 +436,13 @@
             _configure_tgt_for_config_d
             if is_ubuntu; then
                 sudo service tgt restart
-            elif is_fedora || is_suse; then
-                restart_service tgtd
+            elif is_suse; then
+                # NOTE(dmllr): workaround restart bug
+                # https://bugzilla.suse.com/show_bug.cgi?id=934642
+                stop_service tgtd
+                start_service tgtd
             else
-                # note for other distros: unstack.sh also uses the tgt/tgtd service
-                # name, and would need to be adjusted too
-                exit_distro_not_supported "restarting tgt"
+                restart_service tgtd
             fi
             # NOTE(gfidente): ensure tgtd is running in debug mode
             sudo tgtadm --mode system --op update --name debug --value on
@@ -469,13 +482,12 @@
 function create_volume_types {
     # Create volume types
     if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
-        local be be_name be_type
+        local be be_name
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            be_type=${be%%:*}
             be_name=${be##*:}
-            # openstack volume type create --property volume_backend_name="${be_type}" ${be_name}
-            cinder type-create ${be_name} && \
-                cinder type-key ${be_name} set volume_backend_name="${be_name}"
+            # FIXME(jamielennox): Remove --os-volume-api-version pinning when
+            # osc supports volume type create on v2 api. bug #1475060
+            openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name}
         done
     fi
 }
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 35ad209..411b82c 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -51,6 +51,7 @@
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
     iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
 
     if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
         iniset $CINDER_CONF $be_name volume_clear none
diff --git a/lib/database b/lib/database
index ff1fafe..5bbbe31 100644
--- a/lib/database
+++ b/lib/database
@@ -70,10 +70,19 @@
 
     # For backward-compatibility, read in the MYSQL_HOST/USER variables and use
     # them as the default values for the DATABASE_HOST/USER variables.
-    MYSQL_HOST=${MYSQL_HOST:-127.0.0.1}
+    MYSQL_HOST=${MYSQL_HOST:-$SERVICE_LOCAL_HOST}
     MYSQL_USER=${MYSQL_USER:-root}
 
-    DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}}
+    # Set DATABASE_HOST equal to MYSQL_HOST. If SERVICE_IP_VERSION is equal to 6,
+    # set DATABASE_HOST equal to [MYSQL_HOST]. MYSQL_HOST cannot use brackets due
+    # to mysql not using bracketing for IPv6 addresses. DATABASE_HOST must have brackets
+    # due to sqlalchemy only reading IPv6 addresses with brackets.
+    if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+        DATABASE_HOST=${DATABASE_HOST:-[$MYSQL_HOST]}
+    else
+        DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}}
+    fi
+
     DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}}
 
     if [ -n "$MYSQL_PASSWORD" ]; then
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 7cd2856..fb55b60 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -11,7 +11,7 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-MYSQL_DRIVER=${MYSQL_DRIVER:-MySQL-python}
+MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL}
 # Force over to pymysql driver by default if we are using it.
 if is_service_enabled mysql; then
     if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then
@@ -90,12 +90,15 @@
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
-    # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and
+    # Change ‘bind-address’ from localhost (127.0.0.1) to any (::) and
     # set default db type to InnoDB
     sudo bash -c "source $TOP_DIR/functions && \
-        iniset $my_conf mysqld bind-address 0.0.0.0 && \
+        iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \
         iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \
-        iniset $my_conf mysqld default-storage-engine InnoDB"
+        iniset $my_conf mysqld default-storage-engine InnoDB && \
+        iniset $my_conf mysqld max_connections 1024 && \
+        iniset $my_conf mysqld query_cache_type OFF && \
+        iniset $my_conf mysqld query_cache_size 0"
 
 
     if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
@@ -165,6 +168,8 @@
     pip_install_gr $MYSQL_DRIVER
     if [[ "$MYSQL_DRIVER" == "MySQL-python" ]]; then
         ADDITIONAL_VENV_PACKAGES+=",MySQL-python"
+    elif [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then
+        ADDITIONAL_VENV_PACKAGES+=",PyMySQL"
     fi
 }
 
diff --git a/lib/glance b/lib/glance
index 4e1bd24..f200dca 100644
--- a/lib/glance
+++ b/lib/glance
@@ -56,6 +56,7 @@
 GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
 GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
 GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
+GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
 
 if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then
     GLANCE_SERVICE_PROTOCOL="https"
@@ -63,6 +64,7 @@
 
 # Glance connection info.  Note the port must be specified.
 GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST}
+GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292}
 GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
@@ -105,29 +107,27 @@
     # Copy over our glance configurations and update them
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
     inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
     local dburl=`database_connection_url glance`
-    iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl
+    iniset $GLANCE_REGISTRY_CONF database connection $dburl
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
     configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
-    fi
+    iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
     iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
 
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
     iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
     inicomment $GLANCE_API_CONF DEFAULT log_file
-    iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl
+    iniset $GLANCE_API_CONF database connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
     configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
-    fi
+    iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
     iniset_rpc_backend glance $GLANCE_API_CONF
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
@@ -139,21 +139,32 @@
 
     # Store specific configs
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+    iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
 
     iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
 
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
         iniset $GLANCE_API_CONF glance_store default_store swift
-        iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
-        iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift
-        iniset $GLANCE_API_CONF glance_store swift_store_key $SERVICE_PASSWORD
         iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
+
+        iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
+        iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
         iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v2.0/
+
+        # commenting is not strictly necessary but it's confusing to have bad values in conf
+        inicomment $GLANCE_API_CONF glance_store swift_store_user
+        inicomment $GLANCE_API_CONF glance_store swift_store_key
+        inicomment $GLANCE_API_CONF glance_store swift_store_auth_address
     fi
 
     if is_service_enabled tls-proxy; then
         iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
+        iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT
         iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
     fi
 
@@ -195,6 +206,7 @@
     iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
     iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
+    iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
 
     # Store specific confs
     iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
@@ -216,9 +228,10 @@
     if is_service_enabled g-search; then
         cp $GLANCE_DIR/etc/glance-search.conf $GLANCE_SEARCH_CONF
         iniset $GLANCE_SEARCH_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $GLANCE_SEARCH_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
         inicomment $GLANCE_SEARCH_CONF DEFAULT log_file
         iniset $GLANCE_SEARCH_CONF DEFAULT use_syslog $SYSLOG
-        iniset $GLANCE_SEARCH_CONF DEFAULT sql_connection $dburl
+        iniset $GLANCE_SEARCH_CONF database connection $dburl
         iniset $GLANCE_SEARCH_CONF paste_deploy flavor keystone
         configure_auth_token_middleware $GLANCE_SEARCH_CONF glance $GLANCE_AUTH_CACHE_DIR/search
 
@@ -253,15 +266,14 @@
         if is_service_enabled s-proxy; then
 
             local glance_swift_user=$(get_or_create_user "glance-swift" \
-                "$SERVICE_PASSWORD" "glance-swift@example.com")
+                "$SERVICE_PASSWORD" "default" "glance-swift@example.com")
             get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local glance_service=$(get_or_create_service "glance" \
-                "image" "Glance Image Service")
-            get_or_create_endpoint $glance_service \
+            get_or_create_service "glance" "image" "Glance Image Service"
+            get_or_create_endpoint "image" \
                 "$REGION_NAME" \
                 "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
                 "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
@@ -272,10 +284,9 @@
     # Add glance-search service and endpoints
     if is_service_enabled g-search; then
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            local glance_search_service=$(get_or_create_service "glance-search" \
-                "search" "EXPERIMENTAL - Glance Graffiti Search Service")
+            get_or_create_service "glance-search" "search" "EXPERIMENTAL - Glance Graffiti Search Service"
 
-            get_or_create_endpoint $glance_search_service \
+            get_or_create_endpoint "search" \
                 "$REGION_NAME" \
                 "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \
                 "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \
diff --git a/lib/heat b/lib/heat
index 5cb0dbf..cedddd2 100644
--- a/lib/heat
+++ b/lib/heat
@@ -250,17 +250,15 @@
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local heat_service=$(get_or_create_service "heat" \
-                    "orchestration" "Heat Orchestration Service")
-            get_or_create_endpoint $heat_service \
+            get_or_create_service "heat" "orchestration" "Heat Orchestration Service"
+            get_or_create_endpoint "orchestration" \
                 "$REGION_NAME" \
                 "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
                 "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
                 "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
 
-            local heat_cfn_service=$(get_or_create_service "heat-cfn" \
-                    "cloudformation" "Heat CloudFormation Service")
-            get_or_create_endpoint $heat_cfn_service \
+            get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service"
+            get_or_create_endpoint "cloudformation"  \
                 "$REGION_NAME" \
                 "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
                 "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
diff --git a/lib/infra b/lib/infra
index c825b4e..3d68e45 100644
--- a/lib/infra
+++ b/lib/infra
@@ -29,8 +29,17 @@
 
 # install_infra() - Collect source and prepare
 function install_infra {
+    local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
     # bring down global requirements
     git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+    [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
+    # We don't care about testing git pbr in the requirements venv.
+    PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
+    PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
+
+    # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped
+    # down the VENV well
+    unset PIP_VIRTUAL_ENV
 
     # Install pbr
     if use_library_from_git "pbr"; then
diff --git a/lib/ironic b/lib/ironic
index 4984be1..1323446 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -285,7 +285,7 @@
 
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $IRONIC_CONF_FILE DEFAULT
+        setup_colorized_logging $IRONIC_CONF_FILE DEFAULT tenant user
     fi
 
     if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then
@@ -366,7 +366,7 @@
         fi
         iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
         iniset $IRONIC_CONF_FILE glance swift_api_version v1
-        local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME)
+        local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
         iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
         iniset $IRONIC_CONF_FILE glance swift_container glance
         iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
@@ -411,9 +411,8 @@
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local ironic_service=$(get_or_create_service "ironic" \
-                "baremetal" "Ironic baremetal provisioning service")
-            get_or_create_endpoint $ironic_service \
+            get_or_create_service "ironic" "baremetal" "Ironic baremetal provisioning service"
+            get_or_create_endpoint "baremetal" \
                 "$REGION_NAME" \
                 "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
@@ -658,6 +657,10 @@
         # agent ramdisk gets instance image from swift
         sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true
     fi
+
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
+    fi
 }
 
 function configure_tftpd {
diff --git a/lib/keystone b/lib/keystone
index 7a949cf..e2448c9 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -35,6 +35,7 @@
 # --------
 
 # Set up default directories
+GITDIR["keystoneauth"]=$DEST/keystoneauth
 GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient
 GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware
 KEYSTONE_DIR=$DEST/keystone
@@ -313,6 +314,8 @@
 
     iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS"
     # Public workers will use the server default, typically number of CPU.
+
+    iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/"
 }
 
 function configure_keystone_extensions {
@@ -357,13 +360,13 @@
 function create_keystone_accounts {
 
     # admin
-    local admin_tenant=$(get_or_create_project "admin")
-    local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD")
+    local admin_tenant=$(get_or_create_project "admin" default)
+    local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
     local admin_role=$(get_or_create_role "admin")
     get_or_add_user_project_role $admin_role $admin_user $admin_tenant
 
     # Create service project/role
-    get_or_create_project "$SERVICE_TENANT_NAME"
+    get_or_create_project "$SERVICE_TENANT_NAME" default
 
     # Service role, so service users do not have to be admins
     get_or_create_role service
@@ -382,12 +385,12 @@
     local another_role=$(get_or_create_role "anotherrole")
 
     # invisible tenant - admin can't see this one
-    local invis_tenant=$(get_or_create_project "invisible_to_admin")
+    local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
 
     # demo
-    local demo_tenant=$(get_or_create_project "demo")
+    local demo_tenant=$(get_or_create_project "demo" default)
     local demo_user=$(get_or_create_user "demo" \
-        "$ADMIN_PASSWORD" "demo@example.com")
+        "$ADMIN_PASSWORD" "default" "demo@example.com")
 
     get_or_add_user_project_role $member_role $demo_user $demo_tenant
     get_or_add_user_project_role $admin_role $admin_user $demo_tenant
@@ -406,9 +409,8 @@
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        KEYSTONE_SERVICE=$(get_or_create_service "keystone" \
-            "identity" "Keystone Identity Service")
-        get_or_create_endpoint $KEYSTONE_SERVICE \
+        get_or_create_service "keystone" "identity" "Keystone Identity Service"
+        get_or_create_endpoint "identity" \
             "$REGION_NAME" \
             "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
             "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \
@@ -426,7 +428,7 @@
 function create_service_user {
     local role=${2:-service}
 
-    local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD")
+    local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
     get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
 }
 
@@ -476,11 +478,23 @@
         $KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}"
     done
 
-    if [[ "$KEYSTONE_TOKEN_FORMAT" != "uuid" ]]; then
+    if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
         # Set up certificates
         rm -rf $KEYSTONE_CONF_DIR/ssl
         $KEYSTONE_BIN_DIR/keystone-manage pki_setup
     fi
+    if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then
+        rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
+        $KEYSTONE_BIN_DIR/keystone-manage fernet_setup
+    fi
+}
+
+# install_keystoneauth() - Collect source and prepare
+function install_keystoneauth {
+    if use_library_from_git "keystoneauth"; then
+        git_clone_by_name "keystoneauth"
+        setup_dev_lib "keystoneauth"
+    fi
 }
 
 # install_keystoneclient() - Collect source and prepare
diff --git a/lib/lvm b/lib/lvm
index 1fe2683..8afd543 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -78,7 +78,7 @@
 }
 
 
-# _create_volume_group creates default volume group
+# _create_lvm_volume_group creates default volume group
 #
 # Usage: _create_lvm_volume_group() $vg $size
 function _create_lvm_volume_group {
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 519200b..0cb2856 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -138,6 +138,8 @@
 Q_HOST=${Q_HOST:-$SERVICE_HOST}
 # Default protocol
 Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
+# Default listen address
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
@@ -463,6 +465,8 @@
     fi
 
     _configure_neutron_debug_command
+
+    iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
 }
 
 function create_nova_conf_neutron {
@@ -515,9 +519,8 @@
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local neutron_service=$(get_or_create_service "neutron" \
-                "network" "Neutron Service")
-            get_or_create_endpoint $neutron_service \
+            get_or_create_service "neutron" "network" "Neutron Service"
+            get_or_create_endpoint "network" \
                 "$REGION_NAME" \
                 "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
                 "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
@@ -694,9 +697,10 @@
     if is_ssl_enabled_service "neutron"; then
         ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
     fi
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then
-        die $LINENO "Neutron did not start"
-    fi
+
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port"
+    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
+
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
         start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
@@ -718,7 +722,7 @@
                 sudo ip addr del $IP dev $PUBLIC_INTERFACE
                 sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
             done
-            sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
         fi
     fi
 }
@@ -800,6 +804,7 @@
     local from_intf=$1
     local to_intf=$2
     local add_ovs_port=$3
+    local af=$4
 
     if [[ -n "$from_intf" && -n "$to_intf" ]]; then
         # Remove the primary IP address from $from_intf and add it to $to_intf,
@@ -807,10 +812,18 @@
         # on configure we will also add $from_intf as a port on $to_intf,
         # assuming it is an OVS bridge.
 
-        local IP_BRD=$(ip -4 a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }')
+        local IP_BRD=$(ip -f $af a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }')
         local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
         local ADD_OVS_PORT=""
 
+        if [[ $af == "inet" ]]; then
+            IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }')
+        fi
+
+        if [[ $af == "inet6" ]]; then
+            IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }')
+        fi
+
         if [ "$DEFAULT_ROUTE_GW" != "" ]; then
             ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
         fi
@@ -827,7 +840,13 @@
 # runs that a clean run would need to clean up
 function cleanup_neutron {
 
-    _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False
+    if [[ $(ip -f inet a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
+    fi
+
+    if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
+    fi
 
     if is_provider_network && is_ironic_hardware; then
         for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
@@ -841,6 +860,10 @@
         neutron_ovs_base_cleanup
     fi
 
+    if [[ $Q_AGENT == "linuxbridge" ]]; then
+        neutron_lb_cleanup
+    fi
+
     # delete all namespaces created by neutron
     for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
         sudo ip netns delete ${ns}
@@ -881,6 +904,7 @@
     iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
     iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
     iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
+    iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
     # If addition config files are set, make sure their path name is set as well
     if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then
         die $LINENO "Neutron additional plugin config not set.. exiting"
@@ -1004,8 +1028,12 @@
 
     neutron_plugin_configure_l3_agent
 
-    if [[ $(ip -4 a s dev "$PUBLIC_INTERFACE" | grep -c 'inet') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True
+    if [[ $(ip -f inet a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
+    fi
+
+    if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6"
     fi
 }
 
@@ -1277,16 +1305,26 @@
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3; then
         # Configure and enable public bridge
+        local ext_gw_interface="none"
         if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
-            local ext_gw_interface=$(_neutron_get_ext_gw_interface)
+            ext_gw_interface=$(_neutron_get_ext_gw_interface)
+        elif [[ "$Q_AGENT" = "linuxbridge" ]]; then
+            # Search for the brq device the neutron router and network for $FIXED_RANGE
+            # will be using.
+            # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102
+            ext_gw_interface=brq${EXT_NET_ID:0:11}
+        fi
+        if [[ "$ext_gw_interface" != "none" ]]; then
             local cidr_len=${FLOATING_RANGE#*/}
+            local testcmd="ip -o link | grep -q $ext_gw_interface"
+            test_with_retry "$testcmd" "$ext_gw_interface creation failed"
             if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
             ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'`
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
-            sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
+            sudo ip route replace  $FIXED_RANGE via $ROUTER_GW_IP
         fi
         _neutron_set_router_id
     fi
@@ -1321,7 +1359,7 @@
 
             # Configure interface for public bridge
             sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
-            sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
+            sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
         fi
         _neutron_set_router_id
     fi
@@ -1391,9 +1429,8 @@
     local timeout_sec=$5
     local probe_cmd = ""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
-    if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then
-        die $LINENO "server didn't become ssh-able!"
-    fi
+    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
+    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
 }
 
 # Neutron 3rd party programs
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
old mode 100644
new mode 100755
index b348af9..fefc1c3
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -9,6 +9,20 @@
 
 function neutron_lb_cleanup {
     sudo brctl delbr $PUBLIC_BRIDGE
+
+    if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then
+        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
+            sudo ip link delete $port
+        done
+    elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then
+        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
+            sudo ip link delete $port
+        done
+    fi
+    for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do
+        sudo ip link set $bridge down
+        sudo brctl delbr $bridge
+    done
 }
 
 function is_neutron_ovs_base_plugin {
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 9e72aa0..ca0b70c 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,4 +1,10 @@
 #!/bin/bash
 
-# REVISIT(devvesa): This file is intentionally left empty
-# in order to keep Q_PLUGIN=midonet work.
+# REVISIT(devvesa): This file is needed so Q_PLUGIN=midonet will work.
+
+# FIXME(yamamoto): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+    # 0 means True here
+    return 0
+}
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
old mode 100644
new mode 100755
index 2733f1f..13ffee9
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -107,11 +107,6 @@
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
 
-    if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
-        # Set local_ip if TENANT_TUNNELS are enabled.
-        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
-    fi
-
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
old mode 100644
new mode 100755
index 1d24f3b..1ff3a40
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -50,6 +50,7 @@
             die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
         fi
         iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
+        iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_bridge $OVS_TUNNEL_BRIDGE
     fi
 
     # Setup physical network bridge mappings.  Override
@@ -59,7 +60,7 @@
         OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
 
         # Configure bridge manually with physical interface as port for multi-node
-        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+        _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
     fi
     if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
@@ -92,7 +93,7 @@
         # Set up domU's L2 agent:
 
         # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
-        sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT"
+        _neutron_ovs_base_add_bridge "br-$GUEST_INTERFACE_DEFAULT"
         # Add $GUEST_INTERFACE_DEFAULT to that bridge
         sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
 
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
old mode 100644
new mode 100755
index 81561d3..f1f7f85
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -9,19 +9,28 @@
 
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
 OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""}
+OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun}
 
 function is_neutron_ovs_base_plugin {
     # Yes, we use OVS.
     return 0
 }
 
+function _neutron_ovs_base_add_bridge {
+    local bridge=$1
+    local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge"
+
+    if [ "$OVS_DATAPATH_TYPE" != "" ] ; then
+        addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
+    fi
+
+    $addbr_cmd
+}
+
 function _neutron_ovs_base_setup_bridge {
     local bridge=$1
     neutron-ovs-cleanup
-    sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
-    if [[ $OVS_DATAPATH_TYPE != "" ]]; then
-        sudo ovs-vsctl set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}
-    fi
+    _neutron_ovs_base_add_bridge $bridge
     sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
 
@@ -32,7 +41,7 @@
     done
 
     # remove all OVS bridges created by Neutron
-    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE} -e ${OVS_TUNNEL_BRIDGE}); do
         sudo ovs-vsctl del-br ${bridge}
     done
 }
@@ -92,7 +101,7 @@
         sudo ip link set $Q_PUBLIC_VETH_EX up
         sudo ip addr flush dev $Q_PUBLIC_VETH_EX
     else
-        sudo ovs-vsctl -- --may-exist add-br $PUBLIC_BRIDGE
+        _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE
         sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE
     fi
 }
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index f465cc9..34190f9 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -42,7 +42,7 @@
 
 function neutron_lbaas_stop {
     pids=$(ps aux | awk '/haproxy/ { print $2 }')
-    [ ! -z "$pids" ] && sudo kill $pids
+    [ ! -z "$pids" ] && sudo kill $pids || true
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/vmware_dvs b/lib/neutron_plugins/vmware_dvs
new file mode 100644
index 0000000..587d5a6
--- /dev/null
+++ b/lib/neutron_plugins/vmware_dvs
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# This file is needed so Q_PLUGIN=vmware_dvs will work.
+
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+    # 0 means True here
+    return 0
+}
diff --git a/lib/neutron_plugins/vmware_nsx_v3 b/lib/neutron_plugins/vmware_nsx_v3
new file mode 100644
index 0000000..6d8a6e6
--- /dev/null
+++ b/lib/neutron_plugins/vmware_nsx_v3
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# This file is needed so Q_PLUGIN=vmware_nsx_v3 will work.
+
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+    # 0 means True here
+    return 0
+}
diff --git a/lib/nova b/lib/nova
index da288d3..6441a89 100644
--- a/lib/nova
+++ b/lib/nova
@@ -53,6 +53,7 @@
 NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
 NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
+NOVA_API_DB=${NOVA_API_DB:-nova_api}
 
 NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
 # NOVA_API_VERSION valid options
@@ -84,6 +85,8 @@
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
+NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
 EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
 
@@ -231,6 +234,10 @@
     #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
     #    cleanup_nova_hypervisor
     #fi
+
+    if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+        _cleanup_nova_apache_wsgi
+    fi
 }
 
 # _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
@@ -276,6 +283,7 @@
         s|%SSLKEYFILE%|$nova_keyfile|g;
         s|%USER%|$STACK_USER|g;
         s|%VIRTUALENV%|$venv_path|g
+        s|%APIWORKERS%|$API_WORKERS|g
     " -i $nova_apache_conf
 
     sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf
@@ -288,6 +296,7 @@
         s|%SSLKEYFILE%|$nova_keyfile|g;
         s|%USER%|$STACK_USER|g;
         s|%VIRTUALENV%|$venv_path|g
+        s|%APIWORKERS%|$API_WORKERS|g
     " -i $nova_ec2_apache_conf
 }
 
@@ -395,22 +404,26 @@
         create_service_user "nova" "admin"
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            local nova_api_url
+            if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
+                nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
+            else
+                nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
+            fi
 
-            local nova_service=$(get_or_create_service "nova" \
-                "compute" "Nova Compute Service")
-            get_or_create_endpoint $nova_service \
+            get_or_create_service "nova" "compute" "Nova Compute Service"
+            get_or_create_endpoint "compute" \
                 "$REGION_NAME" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
+                "$nova_api_url/v2/\$(tenant_id)s" \
+                "$nova_api_url/v2/\$(tenant_id)s" \
+                "$nova_api_url/v2/\$(tenant_id)s"
 
-            local nova_v21_service=$(get_or_create_service "novav21" \
-                "computev21" "Nova Compute Service V2.1")
-            get_or_create_endpoint $nova_v21_service \
+            get_or_create_service "novav21" "computev21" "Nova Compute Service V2.1"
+            get_or_create_endpoint "computev21" \
                 "$REGION_NAME" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s"
+                "$nova_api_url/v2.1/\$(tenant_id)s" \
+                "$nova_api_url/v2.1/\$(tenant_id)s" \
+                "$nova_api_url/v2.1/\$(tenant_id)s"
         fi
     fi
 
@@ -425,9 +438,8 @@
         # EC2
         if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
 
-            local ec2_service=$(get_or_create_service "ec2" \
-                "ec2" "EC2 Compatibility Layer")
-            get_or_create_endpoint $ec2_service \
+            get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer"
+            get_or_create_endpoint "ec2" \
                 "$REGION_NAME" \
                 "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
                 "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
@@ -439,8 +451,8 @@
     if is_service_enabled n-obj swift3; then
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            local s3_service=$(get_or_create_service "s3" "s3" "S3")
-            get_or_create_endpoint $s3_service \
+            get_or_create_service "s3" "s3" "S3"
+            get_or_create_endpoint "s3" \
                 "$REGION_NAME" \
                 "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
                 "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
@@ -469,10 +481,19 @@
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
-    iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
+    if [[ $SERVICE_IP_VERSION == 6 ]]; then
+        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
+        iniset $NOVA_CONF DEFAULT use_ipv6 "True"
+    else
+        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
+    fi
     iniset $NOVA_CONF database connection `database_connection_url nova`
+    iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
-    iniset $NOVA_CONF osapi_v3 enabled "True"
+    iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
+    iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
+    iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
+    iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
 
     if is_fedora || is_suse; then
         # nova defaults to /usr/local/bin, but fedora and suse pip like to
@@ -489,6 +510,7 @@
         if is_service_enabled tls-proxy; then
             # Set the service port for a proxy to take the original
             iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
+            iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
         fi
 
         configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
@@ -551,11 +573,13 @@
     if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         # Address on which instance vncservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
-        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
-        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
         iniset $NOVA_CONF DEFAULT vnc_enabled true
         iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
         iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+        iniset $NOVA_CONF DEFAULT novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $NOVA_CONF DEFAULT xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     else
         iniset $NOVA_CONF DEFAULT vnc_enabled false
     fi
@@ -563,11 +587,12 @@
     if is_service_enabled n-spice; then
         # Address on which instance spiceservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
-        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
-        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
+        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
+        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         iniset $NOVA_CONF spice enabled true
         iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
         iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
+        iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     else
         iniset $NOVA_CONF spice enabled false
     fi
@@ -607,6 +632,7 @@
     fi
 
     if is_service_enabled n-sproxy; then
+        iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $NOVA_CONF serial_console enabled True
     fi
 }
@@ -674,6 +700,9 @@
         if is_service_enabled n-cell; then
             recreate_database $NOVA_CELLS_DB
         fi
+
+        recreate_database $NOVA_API_DB
+        $NOVA_BIN_DIR/nova-manage api_db sync
     fi
 
     create_nova_cache_dir
@@ -755,8 +784,8 @@
         enable_apache_site nova-api
         enable_apache_site nova-ec2-api
         restart_apache_server
-        tail_log nova /var/log/$APACHE_NAME/nova-api.log
-        tail_log nova /var/log/$APACHE_NAME/nova-ec2-api.log
+        tail_log nova-api /var/log/$APACHE_NAME/nova-api.log
+        tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log
     else
         run_process n-api "$NOVA_BIN_DIR/nova-api"
     fi
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 96d8a44..5525cfd 100755
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -28,16 +28,21 @@
         else
             install_package qemu-kvm
             install_package libguestfs0
-            install_package python-guestfs
         fi
         install_package libvirt-bin libvirt-dev
         pip_install_gr libvirt-python
         #pip_install_gr <there-si-no-guestfs-in-pypi>
     elif is_fedora || is_suse; then
         install_package kvm
+        # there is a dependency issue with kvm (which is really just a
+        # wrapper to qemu-system-x86) that leaves some bios files out,
+        # so install qemu-kvm (which shouldn't strictly be needed, as
+        # everything has been merged into qemu-system-x86) to bring in
+        # the right packages. see
+        # https://bugzilla.redhat.com/show_bug.cgi?id=1235890
+        install_package qemu-kvm
         install_package libvirt libvirt-devel
         pip_install_gr libvirt-python
-        install_package python-libguestfs
     fi
 }
 
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index a6a87f9..c54a716 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -25,9 +25,6 @@
 # Defaults
 # --------
 
-# File injection is disabled by default in Nova.  This will turn it back on.
-ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
-
 
 # Entry Points
 # ------------
@@ -60,22 +57,17 @@
         iniset $NOVA_CONF DEFAULT vnc_enabled "false"
     fi
 
-    ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION)
-    if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then
-        # When libguestfs is available for file injection, enable using
-        # libguestfs to inspect the image and figure out the proper
-        # partition to inject into.
-        iniset $NOVA_CONF libvirt inject_partition '-1'
-        iniset $NOVA_CONF libvirt inject_key 'true'
-    else
-        # File injection is being disabled by default in the near future -
-        # disable it here for now to avoid surprises later.
-        iniset $NOVA_CONF libvirt inject_partition '-2'
-    fi
+    # File injection is being disabled by default in the near future -
+    # disable it here for now to avoid surprises later.
+    iniset $NOVA_CONF libvirt inject_partition '-2'
 
     if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then
         iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
         iniset $NOVA_CONF libvirt images_type "ploop"
+        iniset $NOVA_CONF DEFAULT force_raw_images  "False"
+        iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address  $HOST_IP
+        iniset $NOVA_CONF DEFAULT vncserver_listen $HOST_IP
+        iniset $NOVA_CONF DEFAULT vnc_keymap
     fi
 }
 
diff --git a/lib/oslo b/lib/oslo
index d9688a0..123572c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -22,8 +22,11 @@
 
 # Defaults
 # --------
+GITDIR["automaton"]=$DEST/automaton
 GITDIR["cliff"]=$DEST/cliff
 GITDIR["debtcollector"]=$DEST/debtcollector
+GITDIR["futurist"]=$DEST/futurist
+GITDIR["oslo.cache"]=$DEST/oslo.cache
 GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
 GITDIR["oslo.config"]=$DEST/oslo.config
 GITDIR["oslo.context"]=$DEST/oslo.context
@@ -33,8 +36,10 @@
 GITDIR["oslo.messaging"]=$DEST/oslo.messaging
 GITDIR["oslo.middleware"]=$DEST/oslo.middleware
 GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.reports"]=$DEST/oslo.reports
 GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
 GITDIR["oslo.serialization"]=$DEST/oslo.serialization
+GITDIR["oslo.service"]=$DEST/oslo.service
 GITDIR["oslo.utils"]=$DEST/oslo.utils
 GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects
 GITDIR["oslo.vmware"]=$DEST/oslo.vmware
@@ -60,8 +65,11 @@
 
 # install_oslo() - Collect source and prepare
 function install_oslo {
+    _do_install_oslo_lib "automaton"
     _do_install_oslo_lib "cliff"
     _do_install_oslo_lib "debtcollector"
+    _do_install_oslo_lib "futurist"
+    _do_install_oslo_lib "oslo.cache"
     _do_install_oslo_lib "oslo.concurrency"
     _do_install_oslo_lib "oslo.config"
     _do_install_oslo_lib "oslo.context"
@@ -71,8 +79,10 @@
     _do_install_oslo_lib "oslo.messaging"
     _do_install_oslo_lib "oslo.middleware"
     _do_install_oslo_lib "oslo.policy"
+    _do_install_oslo_lib "oslo.reports"
     _do_install_oslo_lib "oslo.rootwrap"
     _do_install_oslo_lib "oslo.serialization"
+    _do_install_oslo_lib "oslo.service"
     _do_install_oslo_lib "oslo.utils"
     _do_install_oslo_lib "oslo.versionedobjects"
     _do_install_oslo_lib "oslo.vmware"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 33ab03d..03eacd8 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -1,72 +1,32 @@
 #!/bin/bash
 #
 # lib/rpc_backend
-# Interface for interactig with different RPC backends
+# Interface for installing RabbitMQ on the system
 
 # Dependencies:
 #
 # - ``functions`` file
 # - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used
-# - ``RPC_MESSAGING_PROTOCOL`` option for configuring the messaging protocol
 
 # ``stack.sh`` calls the entry points in this order:
 #
 # - check_rpc_backend
 # - install_rpc_backend
 # - restart_rpc_backend
-# - iniset_rpc_backend
+# - iniset_rpc_backend (stable interface)
+#
+# Note: if implementing an out of tree plugin for an RPC backend, you
+# should install all services through normal plugin methods, then
+# redefine ``iniset_rpc_backend`` in your code. That's the one portion
+# of this file which is a standard interface.
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9}
-
-# TODO(sdague): RPC backend selection is super wonky because we treat
-# messaging server as a service, which it really isn't for multi host
-QPID_HOST=${QPID_HOST:-}
-
-
 # Functions
 # ---------
 
-# Make sure we only have one rpc backend enabled.
-# Also check the specified rpc backend is available on your platform.
-function check_rpc_backend {
-    local c svc
-
-    local rpc_needed=1
-    # We rely on the fact that filenames in lib/* match the service names
-    # that can be passed as arguments to is_service_enabled.
-    # We check for a call to iniset_rpc_backend in these files, meaning
-    # the service needs a backend.
-    rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}')
-    for c in ${rpc_candidates}; do
-        if is_service_enabled $c; then
-            rpc_needed=0
-            break
-        fi
-    done
-    local rpc_backend_cnt=0
-    for svc in qpid zeromq rabbit; do
-        is_service_enabled $svc &&
-        (( rpc_backend_cnt++ )) || true
-    done
-    if [ "$rpc_backend_cnt" -gt 1 ]; then
-        echo "ERROR: only one rpc backend may be enabled,"
-        echo "       set only one of 'rabbit', 'qpid', 'zeromq'"
-        echo "       via ENABLED_SERVICES."
-    elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then
-        echo "ERROR: at least one rpc backend must be enabled,"
-        echo "       set one of 'rabbit', 'qpid', 'zeromq'"
-        echo "       via ENABLED_SERVICES."
-    fi
-
-    if is_service_enabled qpid && ! qpid_is_supported; then
-        die $LINENO "Qpid support is not available for this version of your distribution."
-    fi
-}
-
 # clean up after rpc backend - eradicate all traces so changing backends
 # produces a clean switch
 function cleanup_rpc_backend {
@@ -79,110 +39,14 @@
             # And the Erlang runtime too
             apt_get purge -y erlang*
         fi
-    elif is_service_enabled qpid; then
-        if is_fedora; then
-            uninstall_package qpid-cpp-server
-        elif is_ubuntu; then
-            uninstall_package qpidd
-        else
-            exit_distro_not_supported "qpid installation"
-        fi
-    elif is_service_enabled zeromq; then
-        if is_fedora; then
-            uninstall_package zeromq python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis python-redis
-            fi
-        elif is_ubuntu; then
-            uninstall_package libzmq1 python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis-server python-redis
-            fi
-        elif is_suse; then
-            uninstall_package libzmq1 python-pyzmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis python-redis
-            fi
-        else
-            exit_distro_not_supported "zeromq installation"
-        fi
-    fi
-
-    # Remove the AMQP 1.0 messaging libraries
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        if is_fedora; then
-            uninstall_package qpid-proton-c-devel
-            uninstall_package python-qpid-proton
-        fi
-        # TODO(kgiusti) ubuntu cleanup
     fi
 }
 
 # install rpc backend
 function install_rpc_backend {
-    # Regardless of the broker used, if AMQP 1.0 is configured load
-    # the necessary messaging client libraries for oslo.messaging
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        if is_fedora; then
-            install_package qpid-proton-c-devel
-            install_package python-qpid-proton
-        elif is_ubuntu; then
-            # TODO(kgiusti) The QPID AMQP 1.0 protocol libraries
-            # are not yet in the ubuntu repos. Enable these installs
-            # once they are present:
-            #install_package libqpid-proton2-dev
-            #install_package python-qpid-proton
-            # Also add 'uninstall' directives in cleanup_rpc_backend()!
-            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
-        else
-            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
-        fi
-        # Install pyngus client API
-        # TODO(kgiusti) can remove once python qpid bindings are
-        # available on all supported platforms _and_ pyngus is added
-        # to the requirements.txt file in oslo.messaging
-        pip_install_gr pyngus
-    fi
-
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         install_package rabbitmq-server
-    elif is_service_enabled qpid; then
-        if is_fedora; then
-            install_package qpid-cpp-server
-        elif is_ubuntu; then
-            install_package qpidd
-        else
-            exit_distro_not_supported "qpid installation"
-        fi
-        _configure_qpid
-    elif is_service_enabled zeromq; then
-        if is_fedora; then
-            install_package zeromq python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis python-redis
-            fi
-        elif is_ubuntu; then
-            install_package libzmq1 python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis-server python-redis
-            fi
-        elif is_suse; then
-            install_package libzmq1 python-pyzmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis python-redis
-            fi
-        else
-            exit_distro_not_supported "zeromq installation"
-        fi
-        # Necessary directory for socket location.
-        sudo mkdir -p /var/run/openstack
-        sudo chown $STACK_USER /var/run/openstack
-    fi
-
-    # If using the QPID broker, install the QPID python client API
-    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        install_package python-qpid
     fi
 }
 
@@ -232,17 +96,12 @@
                 sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*"
             fi
         fi
-    elif is_service_enabled qpid; then
-        echo_summary "Starting qpid"
-        restart_service qpidd
     fi
 }
 
 # builds transport url string
 function get_transport_url {
-    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/"
-    elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+    if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/"
     fi
 }
@@ -252,29 +111,7 @@
     local package=$1
     local file=$2
     local section=${3:-DEFAULT}
-    if is_service_enabled zeromq; then
-        iniset $file $section rpc_backend "zmq"
-        iniset $file $section rpc_zmq_host `hostname`
-        if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-            iniset $file $section rpc_zmq_matchmaker "redis"
-            MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
-            iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
-        else
-            die $LINENO "Other matchmaker drivers not supported"
-        fi
-    elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
-        if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-            iniset $file $section rpc_backend "amqp"
-        else
-            iniset $file $section rpc_backend "qpid"
-        fi
-        iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST}
-        if [ -n "$QPID_USERNAME" ]; then
-            iniset $file $section qpid_username $QPID_USERNAME
-            iniset $file $section qpid_password $QPID_PASSWORD
-        fi
-    elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+    if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend "rabbit"
         iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST
         iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
@@ -288,17 +125,6 @@
     fi
 }
 
-# Check if qpid can be used on the current distro.
-# qpid_is_supported
-function qpid_is_supported {
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-
-    # Qpid is not in openSUSE
-    ( ! is_suse )
-}
-
 function rabbit_setuser {
     local user="$1" pass="$2" found="" out=""
     out=$(sudo rabbitmqctl list_users) ||
@@ -314,85 +140,6 @@
     sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*"
 }
 
-# Set up the various configuration files used by the qpidd broker
-function _configure_qpid {
-
-    # the location of the configuration files have changed since qpidd 0.14
-    local qpid_conf_file
-    if [ -e /etc/qpid/qpidd.conf ]; then
-        qpid_conf_file=/etc/qpid/qpidd.conf
-    elif [ -e /etc/qpidd.conf ]; then
-        qpid_conf_file=/etc/qpidd.conf
-    else
-        exit_distro_not_supported "qpidd.conf file not found!"
-    fi
-
-    # force the ACL file to a known location
-    local qpid_acl_file=/etc/qpid/qpidd.acl
-    if [ ! -e $qpid_acl_file ]; then
-        sudo mkdir -p -m 755 `dirname $qpid_acl_file`
-        sudo touch $qpid_acl_file
-        sudo chmod o+r $qpid_acl_file
-    fi
-    sudo sed -i.bak '/^acl-file=/d' $qpid_conf_file
-    echo "acl-file=$qpid_acl_file" | sudo tee --append $qpid_conf_file
-
-    sudo sed -i '/^auth=/d' $qpid_conf_file
-    if [ -z "$QPID_USERNAME" ]; then
-        # no QPID user configured, so disable authentication
-        # and access control
-        echo "auth=no" | sudo tee --append $qpid_conf_file
-        cat <<EOF | sudo tee $qpid_acl_file
-acl allow all all
-EOF
-    else
-        # Configure qpidd to use PLAIN authentication, and add
-        # QPID_USERNAME to the ACL:
-        echo "auth=yes" | sudo tee --append $qpid_conf_file
-        if [ -z "$QPID_PASSWORD" ]; then
-            read_password QPID_PASSWORD "ENTER A PASSWORD FOR QPID USER $QPID_USERNAME"
-        fi
-        # Create ACL to allow $QPID_USERNAME full access
-        cat <<EOF | sudo tee $qpid_acl_file
-group admin ${QPID_USERNAME}@QPID
-acl allow admin all
-acl deny all all
-EOF
-        # Add user to SASL database
-        if is_ubuntu; then
-            install_package sasl2-bin
-        elif is_fedora; then
-            install_package cyrus-sasl-lib
-            install_package cyrus-sasl-plain
-        fi
-        local sasl_conf_file=/etc/sasl2/qpidd.conf
-        sudo sed -i.bak '/PLAIN/!s/mech_list: /mech_list: PLAIN /' $sasl_conf_file
-        local sasl_db=`sudo grep sasldb_path $sasl_conf_file | cut -f 2 -d ":" | tr -d [:blank:]`
-        if [ ! -e $sasl_db ]; then
-            sudo mkdir -p -m 755 `dirname $sasl_db`
-        fi
-        echo $QPID_PASSWORD | sudo saslpasswd2 -c -p -f $sasl_db -u QPID $QPID_USERNAME
-        sudo chmod o+r $sasl_db
-    fi
-
-    # If AMQP 1.0 is specified, ensure that the version of the
-    # broker can support AMQP 1.0 and configure the queue and
-    # topic address patterns used by oslo.messaging.
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        QPIDD=$(type -p qpidd)
-        if ! $QPIDD --help | grep -q "queue-patterns"; then
-            exit_distro_not_supported "qpidd with AMQP 1.0 support"
-        fi
-        if ! grep -q "queue-patterns=exclusive" $qpid_conf_file; then
-            cat <<EOF | sudo tee --append $qpid_conf_file
-queue-patterns=exclusive
-queue-patterns=unicast
-topic-patterns=broadcast
-EOF
-        fi
-    fi
-}
-
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/sahara b/lib/sahara
deleted file mode 100644
index 51e431a..0000000
--- a/lib/sahara
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/bash
-#
-# lib/sahara
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_sahara
-# install_python_saharaclient
-# configure_sahara
-# sahara_register_images
-# start_sahara
-# stop_sahara
-# cleanup_sahara
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-
-# Set up default directories
-GITDIR["python-saharaclient"]=$DEST/python-saharaclient
-SAHARA_DIR=$DEST/sahara
-
-SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
-SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
-
-if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
-    SAHARA_SERVICE_PROTOCOL="https"
-fi
-SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
-SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
-SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386}
-SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
-
-SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake}
-
-# Support entry points installation of console scripts
-if [[ -d $SAHARA_DIR/bin ]]; then
-    SAHARA_BIN_DIR=$SAHARA_DIR/bin
-else
-    SAHARA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,sahara
-
-# Functions
-# ---------
-
-# create_sahara_accounts() - Set up common required sahara accounts
-#
-# Tenant      User       Roles
-# ------------------------------
-# service     sahara    admin
-function create_sahara_accounts {
-
-    create_service_user "sahara"
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-        # TODO: remove "data_processing" service when #1356053 will be fixed
-        local sahara_service_old=$(openstack service create \
-            "data_processing" \
-            --name "sahara" \
-            --description "Sahara Data Processing" \
-            -f value -c id
-        )
-        local sahara_service_new=$(openstack service create \
-            "data-processing" \
-            --name "sahara" \
-            --description "Sahara Data Processing" \
-            -f value -c id
-        )
-        get_or_create_endpoint $sahara_service_old \
-            "$REGION_NAME" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-        get_or_create_endpoint $sahara_service_new \
-            "$REGION_NAME" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-    fi
-}
-
-# cleanup_sahara() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_sahara {
-
-    # Cleanup auth cache dir
-    sudo rm -rf $SAHARA_AUTH_CACHE_DIR
-}
-
-# configure_sahara() - Set config files, create data dirs, etc
-function configure_sahara {
-    sudo install -d -o $STACK_USER $SAHARA_CONF_DIR
-
-    if [[ -f $SAHARA_DIR/etc/sahara/policy.json ]]; then
-        cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR
-    fi
-
-    # Create auth cache dir
-    sudo install -d -o $STACK_USER -m 700 $SAHARA_AUTH_CACHE_DIR
-    rm -rf $SAHARA_AUTH_CACHE_DIR/*
-
-    configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR
-
-    iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT
-
-    # Set configuration to send notifications
-
-    if is_service_enabled ceilometer; then
-        iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true"
-        iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging"
-    fi
-
-    iniset $SAHARA_CONF_FILE DEFAULT verbose True
-    iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
-    iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS
-
-    iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
-
-    if is_service_enabled neutron; then
-        iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
-
-        if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
-            iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE
-        fi
-    else
-        iniset $SAHARA_CONF_FILE DEFAULT use_neutron false
-    fi
-
-    if is_service_enabled heat; then
-        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
-
-        if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then
-            iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE
-        fi
-    else
-        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
-    fi
-
-    if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE
-    fi
-
-    # Register SSL certificates if provided
-    if is_ssl_enabled_service sahara; then
-        ensure_certificates SAHARA
-
-        iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT"
-        iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY"
-    fi
-
-    iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
-    # Format logging
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $SAHARA_CONF_FILE DEFAULT
-    fi
-
-    if is_service_enabled tls-proxy; then
-        # Set the service port for a proxy to take the original
-        iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT
-    fi
-
-    recreate_database sahara
-    $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
-}
-
-# install_sahara() - Collect source and prepare
-function install_sahara {
-    git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
-    setup_develop $SAHARA_DIR
-}
-
-# install_python_saharaclient() - Collect source and prepare
-function install_python_saharaclient {
-    if use_library_from_git "python-saharaclient"; then
-        git_clone_by_name "python-saharaclient"
-        setup_dev_lib "python-saharaclient"
-    fi
-}
-
-# sahara_register_images() - Registers images in sahara image registry
-function sahara_register_images {
-    if is_service_enabled heat && [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
-        # Register heat image for Fake plugin
-        local fake_plugin_properties="--property _sahara_tag_0.1=True"
-        fake_plugin_properties+=" --property _sahara_tag_fake=True"
-        fake_plugin_properties+=" --property _sahara_username=fedora"
-        openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image set $(basename "$HEAT_CFN_IMAGE_URL" ".qcow2") $fake_plugin_properties
-    fi
-}
-
-# start_sahara() - Start running processes, including screen
-function start_sahara {
-    local service_port=$SAHARA_SERVICE_PORT
-    local service_protocol=$SAHARA_SERVICE_PROTOCOL
-    if is_service_enabled tls-proxy; then
-        service_port=$SAHARA_SERVICE_PORT_INT
-        service_protocol="http"
-    fi
-
-    run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
-    run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
-    run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE"
-
-    echo "Waiting for Sahara to start..."
-    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then
-        die $LINENO "Sahara did not start"
-    fi
-
-    # Start proxies if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT &
-    fi
-}
-
-# stop_sahara() - Stop running processes
-function stop_sahara {
-    # Kill the Sahara screen windows
-    stop_process sahara
-    stop_process sahara-api
-    stop_process sahara-eng
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/swift b/lib/swift
index 820042d..826f233 100644
--- a/lib/swift
+++ b/lib/swift
@@ -45,6 +45,7 @@
 
 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
+SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
 
 # TODO: add logging to different location.
 
@@ -607,29 +608,31 @@
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        local swift_service=$(get_or_create_service "swift" \
-            "object-store" "Swift Service")
-        get_or_create_endpoint $swift_service \
+        get_or_create_service "swift" "object-store" "Swift Service"
+        get_or_create_endpoint "object-store" \
             "$REGION_NAME" \
             "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
             "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \
             "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    local swift_tenant_test1=$(get_or_create_project swifttenanttest1)
+    local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
     die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
-    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password "test@example.com")
+    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
+                        "default" "test@example.com")
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
     get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
 
-    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password "test3@example.com")
+    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+                                "default" "test3@example.com")
     die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
     get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
 
-    local swift_tenant_test2=$(get_or_create_project swifttenanttest2)
+    local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
     die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
 
-    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password "test2@example.com")
+    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+                                "default" "test2@example.com")
     die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
     get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
 
@@ -639,7 +642,8 @@
     local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
     die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
 
-    local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password "test4@example.com" $swift_domain)
+    local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
+                                $swift_domain "test4@example.com")
     die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
     get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
 }
@@ -664,9 +668,9 @@
         swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
 
         for node_number in ${SWIFT_REPLICAS_SEQ}; do
-            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
-            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
-            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
         done
         swift-ring-builder object.builder rebalance
         swift-ring-builder container.builder rebalance
@@ -693,6 +697,21 @@
     fi
 }
 
+# install_ceilometermiddleware() - Collect source and prepare
+#   note that this doesn't really have anything to do with ceilometer;
+#   though ceilometermiddleware has ceilometer in its name as an
+#   artifact of history, it is not a ceilometer specific tool. It
+#   simply generates pycadf-based notifications about requests and
+#   responses on the swift proxy
+function install_ceilometermiddleware {
+    if use_library_from_git "ceilometermiddleware"; then
+        git_clone_by_name "ceilometermiddleware"
+        setup_dev_lib "ceilometermiddleware"
+    else
+        pip_install_gr ceilometermiddleware
+    fi
+}
+
 # start_swift() - Start running processes, including screen
 function start_swift {
     # (re)start memcached to make sure we have a clean memcache.
@@ -768,7 +787,7 @@
         stop_process s-${type}
     done
     # Blast out any stragglers
-    pkill -f swift-
+    pkill -f swift- || true
 }
 
 function swift_configure_tempurls {
diff --git a/lib/tempest b/lib/tempest
index 059709d..68ddd44 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -30,6 +30,7 @@
 # - ``DEFAULT_INSTANCE_TYPE``
 # - ``DEFAULT_INSTANCE_USER``
 # - ``CINDER_ENABLED_BACKENDS``
+# - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
 #
 # ``stack.sh`` calls the entry points in this order:
 #
@@ -270,11 +271,11 @@
         fi
     fi
 
-    EC2_URL=$(openstack endpoint show -f value -c publicurl ec2 || true)
+    EC2_URL=$(get_endpoint_url ec2 public || true)
     if [[ -z $EC2_URL ]]; then
         EC2_URL="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
     fi
-    S3_URL=$(openstack endpoint show -f value -c publicurl s3 || true)
+    S3_URL=$(get_endpoint_url s3 public || true)
     if [[ -z $S3_URL ]]; then
         S3_URL="http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     fi
@@ -329,6 +330,10 @@
     if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
         iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
     fi
+    iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true
+
+    # Image Features
+    iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
 
     # Auth
     TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN}
@@ -375,6 +380,11 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True
     # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
+    iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
+    # TODO(mriedem): Remove this when kilo-eol happens since the
+    # neutron.allow_duplicate_networks option was removed from nova in Liberty
+    # and is now the default behavior.
+    iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True}
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -435,6 +445,7 @@
     # Ceilometer API optimization happened in Juno that allows to run more tests in tempest.
     # Once Tempest retires support for icehouse this flag can be removed.
     iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False"
+    iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True"
 
     # Object Store
     local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
@@ -447,6 +458,9 @@
     iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions
 
     # Volume
+    # TODO(dkranz): Remove the bootable flag when Juno is end of life.
+    iniset $TEMPEST_CONFIG volume-feature-enabled bootable True
+
     local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then
         # Enabled extensions are either the ones explicitly specified or those available on the API endpoint
@@ -542,8 +556,8 @@
     if is_service_enabled tempest; then
         # Tempest has some tests that validate various authorization checks
         # between two regular users in separate tenants
-        get_or_create_project alt_demo
-        get_or_create_user alt_demo "$ADMIN_PASSWORD" "alt_demo@example.com"
+        get_or_create_project alt_demo default
+        get_or_create_user alt_demo "$ADMIN_PASSWORD" "default" "alt_demo@example.com"
         get_or_add_user_project_role Member alt_demo alt_demo
     fi
 }
diff --git a/lib/tls b/lib/tls
index 09f1c2d..8ff2027 100644
--- a/lib/tls
+++ b/lib/tls
@@ -202,6 +202,7 @@
 # Create root and intermediate CAs
 # init_CA
 function init_CA {
+    fix_system_ca_bundle_path
     # Ensure CAs are built
     make_root_CA $ROOT_CA_DIR
     make_int_CA $INT_CA_DIR $ROOT_CA_DIR
@@ -338,6 +339,29 @@
         -outform PEM
 }
 
+# If a non-system python-requests is installed then it will use the
+# built-in CA certificate store rather than the distro-specific
+# CA certificate store. Detect this and symlink to the correct
+# one. If the value for the CA is not rooted in /etc then we know
+# we need to change it.
+function fix_system_ca_bundle_path {
+    if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
+        local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
+
+        if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
+            if is_fedora; then
+                sudo rm -f $capath
+                sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath
+            elif is_ubuntu; then
+                sudo rm -f $capath
+                sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath
+            else
+                echo "Don't know how to set the CA bundle, expect the install to fail."
+            fi
+        fi
+    fi
+}
+
 
 # Certificate Input Configuration
 # ===============================
diff --git a/lib/zaqar b/lib/zaqar
index 8d51910..fdab3a2 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -128,10 +128,9 @@
         configure_redis
     fi
 
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $ZAQAR_CONF DEFAULT notification_driver messaging
-        iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
-    fi
+    iniset $ZAQAR_CONF DEFAULT notification_driver messaging
+    iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
+
     iniset_rpc_backend zaqar $ZAQAR_CONF
 
     cleanup_zaqar
@@ -211,9 +210,8 @@
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        local zaqar_service=$(get_or_create_service "zaqar" \
-            "messaging" "Zaqar Service")
-        get_or_create_endpoint $zaqar_service \
+        get_or_create_service "zaqar" "messaging" "Zaqar Service"
+        get_or_create_endpoint "messaging" \
             "$REGION_NAME" \
             "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
             "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
diff --git a/openrc b/openrc
index 64faa58..71ba5a6 100644
--- a/openrc
+++ b/openrc
@@ -56,18 +56,26 @@
 # Region
 export OS_REGION_NAME=${REGION_NAME:-RegionOne}
 
-# Set api HOST_IP endpoint.  SERVICE_HOST may also be used to specify the endpoint,
-# which is convenient for some localrc configurations.
-HOST_IP=${HOST_IP:-127.0.0.1}
-SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+# Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION
+# is 4, else HOST_IPV6 if it's 6. SERVICE_HOST may also be used to specify the
+# endpoint, which is convenient for some localrc configurations. Additionally,
+# some exercises call Glance directly. On a single-node installation, Glance
+# should be listening on a local IP address, depending on the setting of
+# SERVICE_IP_VERSION. If its running elsewhere, it can be set here.
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+    HOST_IPV6=${HOST_IPV6:-::1}
+    SERVICE_HOST=${SERVICE_HOST:-[$HOST_IPV6]}
+    GLANCE_HOST=${GLANCE_HOST:-[$HOST_IPV6]}
+else
+    HOST_IP=${HOST_IP:-127.0.0.1}
+    SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+    GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
+fi
+
 SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
 KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
 KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
 
-# Some exercises call glance directly.  On a single-node installation, Glance
-# should be listening on HOST_IP.  If its running elsewhere, it can be set here
-GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
-
 # Identity API version
 export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
 
diff --git a/samples/local.conf b/samples/local.conf
index bd0cd9c..ce70073 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -32,14 +32,15 @@
 RABBIT_PASSWORD=stackqueue
 SERVICE_PASSWORD=$ADMIN_PASSWORD
 
-# ``HOST_IP`` should be set manually for best results if the NIC configuration
-# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the
-# public interface.  It is auto-detected in ``stack.sh`` but often is indeterminate
-# on later runs due to the IP moving from an Ethernet interface to a bridge on
-# the host. Setting it here also makes it available for ``openrc`` to include
-# when setting ``OS_AUTH_URL``.
-# ``HOST_IP`` is not set by default.
+# ``HOST_IP`` and ``HOST_IPV6`` should be set manually for best results if
+# the NIC configuration of the host is unusual, i.e. ``eth1`` has the default
+# route but ``eth0`` is the public interface.  They are auto-detected in
+# ``stack.sh`` but often is indeterminate on later runs due to the IP moving
+# from an Ethernet interface to a bridge on the host. Setting it here also
+# makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``.
+# Neither is set by default.
 #HOST_IP=w.x.y.z
+#HOST_IPV6=2001:db8::7
 
 
 # Logging
diff --git a/stack.sh b/stack.sh
index dc79fa9..cc8bc8c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -173,7 +173,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f20|f21|f22|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -212,6 +212,15 @@
 sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
     echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers
 
+# Conditionally setup detailed logging for sudo
+if [[ -n "$LOG_SUDO" ]]; then
+    TEMPFILE=`mktemp`
+    echo "Defaults log_output" > $TEMPFILE
+    chmod 0440 $TEMPFILE
+    sudo chown root:root $TEMPFILE
+    sudo mv $TEMPFILE /etc/sudoers.d/00_logging
+fi
+
 # Set up DevStack sudoers
 TEMPFILE=`mktemp`
 echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE
@@ -263,6 +272,7 @@
 EOF
     # Enable a bootstrap repo.  It is removed after finishing
     # the epel-release installation.
+    is_package_installed yum-utils || install_package yum-utils
     sudo yum-config-manager --enable epel-bootstrap
     yum_install epel-release || \
         die $LINENO "Error installing EPEL repo, cannot continue"
@@ -270,11 +280,10 @@
     sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
 
     # ... and also optional to be enabled
-    is_package_installed yum-utils || install_package yum-utils
     sudo yum-config-manager --enable rhel-7-server-optional-rpms
 
-    RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm"}
-    RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-juno"}
+    RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"}
+    RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"}
 
     if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then
         echo "RDO repo not detected; installing"
@@ -500,12 +509,8 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
-# Make sure we only have one rpc backend enabled,
-# and the specified rpc backend is available on your platform.
-check_rpc_backend
-
 # Service to enable with SSL if ``USE_SSL`` is True
-SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara"
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
 
 if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
     die $LINENO "tls-proxy and SSL are mutually exclusive"
@@ -669,6 +674,9 @@
     fi
 fi
 
+# Save configuration values
+save_stackenv $LINENO
+
 
 # Install Packages
 # ================
@@ -680,6 +688,9 @@
 echo_summary "Installing package prerequisites"
 source $TOP_DIR/tools/install_prereqs.sh
 
+# Normalise USE_CONSTRAINTS
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
+
 # Configure an appropriate Python environment
 if [[ "$OFFLINE" != "True" ]]; then
     PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
@@ -739,6 +750,7 @@
 install_oslo
 
 # Install client libraries
+install_keystoneauth
 install_keystoneclient
 install_glanceclient
 install_cinderclient
@@ -950,6 +962,9 @@
 # Initialize the directory for service status check
 init_service_check
 
+# Save configuration values
+save_stackenv $LINENO
+
 
 # Start Services
 # ==============
@@ -1006,6 +1021,9 @@
     # Begone token auth
     unset OS_TOKEN OS_URL
 
+    # force set to use v2 identity authentication even with v3 commands
+    export OS_AUTH_TYPE=v2password
+
     # Set up password auth credentials now that Keystone is bootstrapped
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
@@ -1014,15 +1032,6 @@
     export OS_REGION_NAME=$REGION_NAME
 fi
 
-
-# ZeroMQ
-# ------
-if is_service_enabled zeromq; then
-    echo_summary "Starting zeromq receiver"
-    run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
-fi
-
-
 # Horizon
 # -------
 
@@ -1287,35 +1296,44 @@
 
 
 # Save some values we generated for later use
-CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
-echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
-for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
-    SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do
-    echo $i=${!i} >>$TOP_DIR/.stackenv
-done
+save_stackenv
 
-# Write out a clouds.yaml file
-# putting the location into a variable to allow for easier refactoring later
-# to make it overridable. There is current no usecase where doing so makes
-# sense, so I'm not actually doing it now.
+# Update/create user clouds.yaml file.
+# clouds.yaml will have
+# - A `devstack` entry for the `demo` user for the `demo` project.
+# - A `devstack-admin` entry for the `admin` user for the `admin` project.
+
+# The location is a variable to allow for easier refactoring later to make it
+# overridable. There is currently no usecase where doing so makes sense, so
+# it's not currently configurable.
 CLOUDS_YAML=~/.config/openstack/clouds.yaml
-if [ ! -e $CLOUDS_YAML ]; then
-    mkdir -p $(dirname $CLOUDS_YAML)
-    cat >"$CLOUDS_YAML" <<EOF
-clouds:
-  devstack:
-    auth:
-      auth_url: $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION
-      username: demo
-      project_name: demo
-      password: $ADMIN_PASSWORD
-    region_name: $REGION_NAME
-    identity_api_version: $IDENTITY_API_VERSION
-EOF
-    if [ -f "$SSL_BUNDLE_FILE" ]; then
-        echo "    cacert: $SSL_BUNDLE_FILE" >>"$CLOUDS_YAML"
-    fi
+
+mkdir -p $(dirname $CLOUDS_YAML)
+
+CA_CERT_ARG=''
+if [ -f "$SSL_BUNDLE_FILE" ]; then
+    CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
 fi
+$TOP_DIR/tools/update_clouds_yaml.py \
+    --file $CLOUDS_YAML \
+    --os-cloud devstack \
+    --os-region-name $REGION_NAME \
+    --os-identity-api-version $IDENTITY_API_VERSION \
+    $CA_CERT_ARG \
+    --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
+    --os-username demo \
+    --os-password $ADMIN_PASSWORD \
+    --os-project-name demo
+$TOP_DIR/tools/update_clouds_yaml.py \
+    --file $CLOUDS_YAML \
+    --os-cloud devstack-admin \
+    --os-region-name $REGION_NAME \
+    --os-identity-api-version $IDENTITY_API_VERSION \
+    $CA_CERT_ARG \
+    --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
+    --os-username admin \
+    --os-password $ADMIN_PASSWORD \
+    --os-project-name admin
 
 
 # Wrapup configuration
@@ -1395,7 +1413,10 @@
 echo ""
 echo ""
 echo ""
-echo "This is your host ip: $HOST_IP"
+echo "This is your host IP address: $HOST_IP"
+if [ "$HOST_IPV6" != "" ]; then
+    echo "This is your host IPv6 address: $HOST_IPV6"
+fi
 
 # If you installed Horizon on this server you should be able
 # to access the site using your browser.
diff --git a/stackrc b/stackrc
index 09ba3e9..8beef96 100644
--- a/stackrc
+++ b/stackrc
@@ -149,17 +149,12 @@
 # Zero disables timeouts
 GIT_TIMEOUT=${GIT_TIMEOUT:-0}
 
-# Requirements enforcing mode
+# Constraints mode
+# - False (default) : update git projects dependencies from global-requirements.
 #
-# - strict (default) : ensure all project requirements files match
-#   what's in global requirements.
-#
-# - soft : enforce requirements on everything in
-#   requirements/projects.txt, but do soft updates on all other
-#   repositories (i.e. sync versions for requirements that are in g-r,
-#   but pass through any extras)
-REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict}
-
+# - True : use upper-constraints.txt to constrain versions of packages intalled
+#          and do not edit projects at all.
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
 
 # Repositories
 # ------------
@@ -236,10 +231,6 @@
 NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
 NOVA_BRANCH=${NOVA_BRANCH:-master}
 
-# data processing service
-SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
-SAHARA_BRANCH=${SAHARA_BRANCH:-master}
-
 # object storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
@@ -289,6 +280,10 @@
 GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
 GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
 
+# the base authentication plugins that clients use to authenticate
+GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
+GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master}
+
 # python keystone client library to nova that horizon uses
 GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
 GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master}
@@ -301,10 +296,6 @@
 GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
 GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master}
 
-# python saharaclient
-GITREPO["python-saharaclient"]=${SAHARACLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
-GITBRANCH["python-saharaclient"]=${SAHARACLIENT_BRANCH:-master}
-
 # python swift client library
 GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
 GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
@@ -326,10 +317,22 @@
 GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
 GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
 
+# async framework/helpers
+GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git}
+GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master}
+
 # debtcollector deprecation framework/helpers
 GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
 GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master}
 
+# helpful state machines
+GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git}
+GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master}
+
+# oslo.cache
+GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git}
+GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master}
+
 # oslo.concurrency
 GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
 GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
@@ -366,6 +369,10 @@
 GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
 GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
 
+# oslo.reports
+GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
+GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
+
 # oslo.rootwrap
 GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
 GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
@@ -374,6 +381,10 @@
 GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
 GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master}
 
+# oslo.service
+GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git}
+GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master}
+
 # oslo.utils
 GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
 GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master}
@@ -441,6 +452,10 @@
 GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
 GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master}
 
+# os-brick library to manage local volume attaches
+GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
+GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master}
+
 
 ##################
 #
@@ -564,8 +579,8 @@
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
         IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};;
     xenserver)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
-        IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"}
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk}
+        IMAGE_URLS=${IMAGE_URLS:-"http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"}
         IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
     ironic)
         # Ironic can do both partition and full disk images, depending on the driver
@@ -658,14 +673,54 @@
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
+HOST_IPV6=${HOST_IPV6:-}
 
-HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
+HOST_IP=$(get_default_host_ip "$FIXED_RANGE" "$FLOATING_RANGE" "$HOST_IP_IFACE" "$HOST_IP" "inet")
 if [ "$HOST_IP" == "" ]; then
     die $LINENO "Could not determine host ip address.  See local.conf for suggestions on setting HOST_IP."
 fi
 
-# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
-SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6")
+
+# SERVICE IP version
+# This is the IP version that services should be listening on, as well
+# as using to register their endpoints with keystone.
+SERVICE_IP_VERSION=${SERVICE_IP_VERSION:-4}
+
+# Validate SERVICE_IP_VERSION
+# It would be nice to support "4+6" here as well, but that will require
+# multiple calls into keystone to register endpoints, so for now let's
+# just support one or the other.
+if [[ $SERVICE_IP_VERSION != "4" ]] && [[ $SERVICE_IP_VERSION != "6" ]]; then
+    die $LINENO "SERVICE_IP_VERSION must be either 4 or 6"
+fi
+
+if [[ "$SERVICE_IP_VERSION" == 4 ]]; then
+    DEF_SERVICE_HOST=$HOST_IP
+    DEF_SERVICE_LOCAL_HOST=127.0.0.1
+    DEF_SERVICE_LISTEN_ADDRESS=0.0.0.0
+fi
+
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+    if [ "$HOST_IPV6" == "" ]; then
+        die $LINENO "Could not determine host IPv6 address.  See local.conf for suggestions on setting HOST_IPV6."
+    fi
+
+    DEF_SERVICE_HOST=[$HOST_IPV6]
+    DEF_SERVICE_LOCAL_HOST=::1
+    DEF_SERVICE_LISTEN_ADDRESS=::
+fi
+
+# This is either 0.0.0.0 for IPv4 or :: for IPv6
+SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}}
+
+# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for
+# service endpoints.  Default is dependent on SERVICE_IP_VERSION above.
+SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}}
+# This is either 127.0.0.1 for IPv4 or ::1 for IPv6
+SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}}
+
+REGION_NAME=${REGION_NAME:-RegionOne}
 
 # Configure services to use syslog instead of writing to individual log files
 SYSLOG=$(trueorfalse False SYSLOG)
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 8210d0a..d10cd0e 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -35,11 +35,13 @@
 ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
-ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth"
+ALL_LIBS+=" oslo.serialization django_openstack_auth"
 ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
 ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
-ALL_LIBS+=" debtcollector"
+ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
+ALL_LIBS+=" oslo.cache oslo.reports"
+ALL_LIBS+=" keystoneauth"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh
new file mode 100755
index 0000000..f407d40
--- /dev/null
+++ b/tests/test_worlddump.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Simple test of worlddump.py
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+source $TOP/tests/unittest.sh
+
+OUT_DIR=$(mktemp -d)
+
+$TOP/tools/worlddump.py -d $OUT_DIR
+
+if [[ $? -ne 0 ]]; then
+    fail "worlddump failed"
+else
+
+    # worlddump creates just one output file
+    OUT_FILE=($OUT_DIR/*.txt)
+
+    if [ ! -r $OUT_FILE ]; then
+        failed "worlddump output not seen"
+    else
+        passed "worlddump output $OUT_FILE"
+
+        if [[ $(stat -c %s $OUT_DIR/*.txt) -gt 0 ]]; then
+            passed "worlddump output is not zero sized"
+        fi
+
+        # put more extensive examination here, if required.
+    fi
+fi
+
+rm -rf $OUT_DIR
+
+report_results
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index f067ed1..c2dbe1a 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -16,45 +16,49 @@
 
 usage: $0 <options..>
 
-This script creates certificates and sourcable rc files per tenant/user.
+This script creates certificates and sourcable rc files per project/user.
 
 Target account directory hierarchy:
 target_dir-|
            |-cacert.pem
-           |-tenant1-name|
-           |             |- user1
-           |             |- user1-cert.pem
-           |             |- user1-pk.pem
-           |             |- user2
-           |             ..
-           |-tenant2-name..
+           |-project1-name|
+           |              |- user1
+           |              |- user1-cert.pem
+           |              |- user1-pk.pem
+           |              |- user2
+           |              ..
+           |-project2-name..
            ..
 
 Optional Arguments
 -P include password to the rc files; with -A it assume all users password is the same
 -A try with all user
 -u <username> create files just for the specified user
--C <tenant_name> create user and tenant, the specifid tenant will be the user's tenant
--r <name> when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member)
+-C <project_name> create user and project, the specifid project will be the user's project
+-r <name> when combined with -C and the (-u) user exists it will be the user's project role in the (-C)project (default: Member)
 -p <userpass> password for the user
 --heat-url <heat_url>
 --os-username <username>
 --os-password <admin password>
---os-tenant-name <tenant_name>
---os-tenant-id <tenant_id>
+--os-project-name <project_name>
+--os-project-id <project_id>
+--os-user-domain-id <user_domain_id>
+--os-user-domain-name <user_domain_name>
+--os-project-domain-id <project_domain_id>
+--os-project-domain-name <project_domain_name>
 --os-auth-url <auth_url>
 --os-cacert <cert file>
 --target-dir <target_directory>
---skip-tenant <tenant-name>
+--skip-project <project-name>
 --debug
 
 Example:
 $0 -AP
-$0 -P -C mytenant -u myuser -p mypass
+$0 -P -C myproject -u myuser -p mypass
 EOF
 }
 
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,heat-url:,skip-tenant:,os-cacert:,help,debug -- "$@"); then
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-project-domain-id:,os-project-domain-name:,os-user-domain-id:,os-user-domain-name:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then
     display_help
     exit 1
 fi
@@ -62,10 +66,10 @@
 ADDPASS=""
 HEAT_URL=""
 
-# The services users usually in the service tenant.
+# The services users usually in the service project.
 # rc files for service users, is out of scope.
-# Supporting different tenant for services is out of scope.
-SKIP_TENANT="service"
+# Supporting different project for services is out of scope.
+SKIP_PROJECT="service"
 MODE=""
 ROLE=Member
 USER_NAME=""
@@ -75,9 +79,16 @@
     -h|--help) display_help; exit 0 ;;
     --os-username) export OS_USERNAME=$2; shift ;;
     --os-password) export OS_PASSWORD=$2; shift ;;
-    --os-tenant-name) export OS_TENANT_NAME=$2; shift ;;
-    --os-tenant-id) export OS_TENANT_ID=$2; shift ;;
-    --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
+    --os-tenant-name) export OS_PROJECT_NAME=$2; shift ;;
+    --os-tenant-id) export OS_PROJECT_ID=$2; shift ;;
+    --os-project-name) export OS_PROJECT_NAME=$2; shift ;;
+    --os-project-id) export OS_PROJECT_ID=$2; shift ;;
+    --os-user-domain-id) export OS_USER_DOMAIN_ID=$2; shift ;;
+    --os-user-domain-name) export OS_USER_DOMAIN_NAME=$2; shift ;;
+    --os-project-domain-id) export OS_PROJECT_DOMAIN_ID=$2; shift ;;
+    --os-project-domain-name) export OS_PROJECT_DOMAIN_NAME=$2; shift ;;
+    --skip-tenant) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;;
+    --skip-project) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;;
     --os-auth-url) export OS_AUTH_URL=$2; shift ;;
     --os-cacert) export OS_CACERT=$2; shift ;;
     --target-dir) ACCOUNT_DIR=$2; shift ;;
@@ -87,7 +98,7 @@
     -p) USER_PASS=$2; shift ;;
     -A) MODE=all; ;;
     -P) ADDPASS="yes" ;;
-    -C) MODE=create; TENANT=$2; shift ;;
+    -C) MODE=create; PROJECT=$2; shift ;;
     -r) ROLE=$2; shift ;;
     (--) shift; break ;;
     (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;;
@@ -105,8 +116,16 @@
     fi
 fi
 
-if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then
-    export OS_TENANT_NAME=admin
+if [ -z "$OS_PROJECT_ID" -a "$OS_TENANT_ID" ]; then
+    export OS_PROJECT_ID=$OS_TENANT_ID
+fi
+
+if [ -z "$OS_PROJECT_NAME" -a "$OS_TENANT_NAME" ]; then
+    export OS_PROJECT_NAME=$OS_TENANT_NAME
+fi
+
+if [ -z "$OS_PROJECT_NAME" -a -z "$OS_PROJECT_ID" ]; then
+    export OS_PROJECT_NAME=admin
 fi
 
 if [ -z "$OS_USERNAME" ]; then
@@ -117,6 +136,16 @@
     export OS_AUTH_URL=http://localhost:5000/v2.0/
 fi
 
+if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then
+    # purposefully not exported as it would force v3 auth within this file.
+    OS_USER_DOMAIN_ID=default
+fi
+
+if [ -z "$OS_PROJECT_DOMAIN_ID" -a -z "$OS_PROJECT_DOMAIN_NAME" ]; then
+    # purposefully not exported as it would force v3 auth within this file.
+    OS_PROJECT_DOMAIN_ID=default
+fi
+
 USER_PASS=${USER_PASS:-$OS_PASSWORD}
 USER_NAME=${USER_NAME:-$OS_USERNAME}
 
@@ -156,21 +185,21 @@
 function add_entry {
     local user_id=$1
     local user_name=$2
-    local tenant_id=$3
-    local tenant_name=$4
+    local project_id=$3
+    local project_name=$4
     local user_passwd=$5
 
     # The admin user can see all user's secret AWS keys, it does not looks good
-    local line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "`
+    local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
     if [ -z "$line" ]; then
-        openstack ec2 credentials create --user $user_id --project $tenant_id 1>&2
-        line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "`
+        openstack ec2 credentials create --user $user_id --project $project_id 1>&2
+        line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
     fi
     local ec2_access_key ec2_secret_key
     read ec2_access_key ec2_secret_key <<<  `echo $line | awk '{print $2 " " $4 }'`
-    mkdir -p "$ACCOUNT_DIR/$tenant_name"
-    local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name"
-    # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN
+    mkdir -p "$ACCOUNT_DIR/$project_name"
+    local rcfile="$ACCOUNT_DIR/$project_name/$user_name"
+    # The certs subject part are the project ID "dash" user ID, but the CN should be the first part of the DN
     # Generally the subject DN parts should be in reverse order like the Issuer
     # The Serial does not seams correctly marked either
     local ec2_cert="$rcfile-cert.pem"
@@ -183,7 +212,7 @@
         mv -f "$ec2_cert" "$ec2_cert.old"
     fi
     # It will not create certs when the password is incorrect
-    if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then
+    if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-project-name "$project_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then
         if [ -e "$ec2_private_key.old" ]; then
             mv -f "$ec2_private_key.old" "$ec2_private_key"
         fi
@@ -199,8 +228,8 @@
 export S3_URL="$S3_URL"
 # OpenStack USER ID = $user_id
 export OS_USERNAME="$user_name"
-# OpenStack Tenant ID = $tenant_id
-export OS_TENANT_NAME="$tenant_name"
+# OpenStack project ID = $project_id
+export OS_PROJECT_NAME="$project_name"
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
 export EC2_CERT="$ec2_cert"
@@ -208,14 +237,22 @@
 export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
 export EUCALYPTUS_CERT="$ACCOUNT_DIR/cacert.pem"
 export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
+export OS_AUTH_TYPE=v2password
 EOF
     if [ -n "$ADDPASS" ]; then
         echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
     fi
     if [ -n "$HEAT_URL" ]; then
-        echo "export HEAT_URL=\"$HEAT_URL/$tenant_id\"" >>"$rcfile"
+        echo "export HEAT_URL=\"$HEAT_URL/$project_id\"" >>"$rcfile"
         echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile"
     fi
+    for v in OS_USER_DOMAIN_ID OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_ID OS_PROJECT_DOMAIN_NAME; do
+        if [ ${!v} ]; then
+            echo "export $v=${!v}" >>"$rcfile"
+        else
+            echo "unset $v" >>"$rcfile"
+        fi
+    done
 }
 
 #admin users expected
@@ -245,9 +282,9 @@
 }
 
 if [ $MODE != "create" ]; then
-    # looks like I can't ask for all tenant related to a specified user
-    openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_TENANT}" | while IFS=, read tenant_id tenant_name desc enabled; do
-        openstack user list --project $tenant_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do
+    # looks like I can't ask for all project related to a specified user
+    openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_PROJECT}" | while IFS=, read project_id project_name desc enabled; do
+        openstack user list --project $project_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do
             if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then
                 continue;
             fi
@@ -259,21 +296,21 @@
             if [ -n "$SPECIFIC_UPASSWORD" ]; then
                 USER_PASS=$SPECIFIC_UPASSWORD
             fi
-            add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+            add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS"
         done
     done
 else
-    tenant_name=$TENANT
-    tenant_id=$(create_or_get_project "$TENANT")
+    project_name=$PROJECT
+    project_id=$(create_or_get_project "$PROJECT")
     user_name=$USER_NAME
     user_id=`get_user_id $user_name`
     if [ -z "$user_id" ]; then
-        eval $(openstack user create "$user_name" --project "$tenant_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id)
+        eval $(openstack user create "$user_name" --project "$project_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id)
         user_id=$id
-        add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+        add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS"
     else
         role_id=$(create_or_get_role "$ROLE")
-        openstack role add "$role_id" --user "$user_id" --project "$tenant_id"
-        add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
+        openstack role add "$role_id" --user "$user_id" --project "$project_id"
+        add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS"
     fi
 fi
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 31258d1..4fff57f 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -126,6 +126,9 @@
         # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html
         if is_package_installed firewalld; then
             sudo systemctl disable firewalld
+            # The iptables service files are no longer included by default,
+            # at least on a baremetal Fedora 21 Server install.
+            install_package iptables-services
             sudo systemctl enable iptables
             sudo systemctl stop firewalld
             sudo systemctl start iptables
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
new file mode 100755
index 0000000..3a364fe
--- /dev/null
+++ b/tools/update_clouds_yaml.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Update the clouds.yaml file.
+
+
+import argparse
+import os.path
+
+import yaml
+
+
+class UpdateCloudsYaml(object):
+    def __init__(self, args):
+        if args.file:
+            self._clouds_path = args.file
+            self._create_directory = False
+        else:
+            self._clouds_path = os.path.expanduser(
+                '~/.config/openstack/clouds.yaml')
+            self._create_directory = True
+        self._clouds = {}
+
+        self._cloud = args.os_cloud
+        self._cloud_data = {
+            'region_name': args.os_region_name,
+            'identity_api_version': args.os_identity_api_version,
+            'auth': {
+                'auth_url': args.os_auth_url,
+                'username': args.os_username,
+                'password': args.os_password,
+                'project_name': args.os_project_name,
+            },
+        }
+        if args.os_identity_api_version == '3':
+            self._cloud_data['auth']['user_domain_id'] = 'default'
+            self._cloud_data['auth']['project_domain_id'] = 'default'
+        if args.os_cacert:
+            self._cloud_data['cacert'] = args.os_cacert
+
+    def run(self):
+        self._read_clouds()
+        self._update_clouds()
+        self._write_clouds()
+
+    def _read_clouds(self):
+        try:
+            with open(self._clouds_path) as clouds_file:
+                self._clouds = yaml.load(clouds_file)
+        except IOError:
+            # The user doesn't have a clouds.yaml file.
+            print("The user clouds.yaml file didn't exist.")
+            self._clouds = {}
+
+    def _update_clouds(self):
+        self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data
+
+    def _write_clouds(self):
+
+        if self._create_directory:
+            clouds_dir = os.path.dirname(self._clouds_path)
+            os.makedirs(clouds_dir)
+
+        with open(self._clouds_path, 'w') as clouds_file:
+            yaml.dump(self._clouds, clouds_file, default_flow_style=False)
+
+
+def main():
+    parser = argparse.ArgumentParser('Update clouds.yaml file.')
+    parser.add_argument('--file')
+    parser.add_argument('--os-cloud', required=True)
+    parser.add_argument('--os-region-name', default='RegionOne')
+    parser.add_argument('--os-identity-api-version', default='3')
+    parser.add_argument('--os-cacert')
+    parser.add_argument('--os-auth-url', required=True)
+    parser.add_argument('--os-username', required=True)
+    parser.add_argument('--os-password', required=True)
+    parser.add_argument('--os-project-name', required=True)
+
+    args = parser.parse_args()
+
+    update_clouds_yaml = UpdateCloudsYaml(args)
+    update_clouds_yaml.run()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/tools/worlddump.py b/tools/worlddump.py
index d846f10..e4ba02b 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -21,6 +21,7 @@
 import fnmatch
 import os
 import os.path
+import subprocess
 import sys
 
 
@@ -46,7 +47,10 @@
     print cmd
     print "-" * len(cmd)
     print
-    print os.popen(cmd).read()
+    try:
+        subprocess.check_call(cmd, shell=True)
+    except subprocess.CalledProcessError:
+        print "*** Failed to run: %s" % cmd
 
 
 def _header(name):
@@ -106,6 +110,19 @@
             _dump_cmd("sudo cat %s" % fullpath)
 
 
+def guru_meditation_report():
+    _header("nova-compute Guru Meditation Report")
+
+    try:
+        subprocess.check_call(["pgrep","nova-compute"])
+    except subprocess.CalledProcessError:
+        print "Skipping as nova-compute does not appear to be running"
+        return
+
+    _dump_cmd("kill -s USR1 `pgrep nova-compute`")
+    print "guru meditation report in nova-compute log"
+
+
 def main():
     opts = get_options()
     fname = filename(opts.dir)
@@ -118,6 +135,7 @@
         network_dump()
         iptables_dump()
         compute_consoles()
+        guru_meditation_report()
 
 
 if __name__ == '__main__':
diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh
deleted file mode 100755
index 0eb2077..0000000
--- a/tools/xen/build_domU_multi.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-
-# Echo commands
-set -o xtrace
-
-# Head node host, which runs glance, api, keystone
-HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
-HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
-
-COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
-COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
-
-# Networking params
-FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
-
-# Variables common amongst all hosts in the cluster
-COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
-
-# Helper to launch containers
-function build_xva {
-    GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_xva.sh
-}
-
-# Launch the head node - headnode uses a non-ip domain name,
-# because rabbit won't launch with an ip addr hostname :(
-build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
-
-# Build the HA compute host
-build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"
diff --git a/tools/xen/files/fstab b/tools/xen/files/fstab
deleted file mode 100644
index 6c9b981..0000000
--- a/tools/xen/files/fstab
+++ /dev/null
@@ -1,5 +0,0 @@
-LABEL=vpxroot           /                       ext3    defaults        1 1
-tmpfs                   /dev/shm                tmpfs   defaults        0 0
-devpts                  /dev/pts                devpts  gid=5,mode=620  0 0
-sysfs                   /sys                    sysfs   defaults        0 0
-proc                    /proc                   proc    defaults        0 0
diff --git a/tools/xen/files/hvc0.conf b/tools/xen/files/hvc0.conf
deleted file mode 100644
index 4eedaf6..0000000
--- a/tools/xen/files/hvc0.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# hvc0 - getty
-#
-# This service maintains a getty on hvc0 from the point the system is
-# started until it is shut down again.
-
-start on stopped rc RUNLEVEL=[2345]
-stop on runlevel [!2345]
-
-respawn
-exec /sbin/getty -8 9600 hvc0
diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva
deleted file mode 100755
index 392c05b..0000000
--- a/tools/xen/scripts/mkxva
+++ /dev/null
@@ -1,365 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-
-set -eu
-
-set -o xtrace
-
-VBOX_IMG=/output/packages/vbox-img
-
-usage() {
-    cat >&2 <<EOF
-$0 -o <output filenames> -t <types> -x <xml files> <fs-staging-dir> <fs-size-MiB> <tmpdir>
-  -o: Colon-separated list of output filenames (one for each type).
-  -p: Create a disk label and partition within the output image
-  -t: Colon-separated list of types of output file.  xva and ovf supported.
-  -x: XML filenames (one for each type)
-
-EOF
-    exit 1
-}
-
-# parse cmdline
-
-OPT_USE_PARTITION=
-OPT_TYPES=
-OPT_OUTPUT_FILES=
-OPT_XML_FILES=
-
-while getopts o:pt:x: o
-do case "$o" in
-    o)    OPT_OUTPUT_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g')
-        ;;
-    p)    OPT_USE_PARTITION=1
-        ;;
-    t)    OPT_TYPES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g')
-        ;;
-    x)    OPT_XML_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g')
-        ;;
-    [?])  usage
-        ;;
-    esac
-done
-shift $((OPTIND-1))
-
-[ $# -ne 3 ] && usage
-FS_STAGING="$1"
-FS_SIZE_MIB="$2"
-TMPDIR="$3"
-
-if [ "$UID" = "0" ]
-then
-  SUDO=
-else
-  SUDO=sudo
-fi
-
-if [ "$FS_SIZE_MIB" = "0" ]
-then
-    # Just create a dummy file.  This allows developers to bypass bits of
-    # the build by setting the size to 0.
-    touch $OPT_OUTPUT_FILES
-    exit 0
-fi
-
-# create temporary files and dirs
-FS_TMPFILE=$(mktemp "$TMPDIR/mkxva-fsimg-XXXXX")
-XVA_TARBALL_STAGING=$(mktemp -d "$TMPDIR/mkxva-tarball-staging-XXXXX")
-OVF_STAGING=$(mktemp -d "$TMPDIR/mkxva-ovf-staging-XXXXX")
-
-# Find udevsettle and udevtrigger on this installation
-if [ -x "/sbin/udevsettle" ] ; then
-    UDEVSETTLE="/sbin/udevsettle --timeout=30"
-elif [ -x "/sbin/udevadm" ] ; then
-    UDEVSETTLE='/sbin/udevadm settle'
-else
-    UDEVSETTLE='/bin/true'
-fi
-
-if [ -x "/sbin/udevtrigger" ] ; then
-    UDEVTRIGGER=/sbin/udevtrigger
-elif [ -x "/sbin/udevadm" ] ; then
-    UDEVTRIGGER='/sbin/udevadm trigger'
-else
-    UDEVTRIGGER=
-fi
-
-# CLEAN_ variables track devices and mounts that must be taken down
-# no matter how the script exits.  Loop devices are vulnerable to
-# exhaustion so we make every effort to remove them
-
-CLEAN_KPARTX=
-CLEAN_LOSETUP=
-CLEAN_MOUNTPOINT=
-
-cleanup_devices () {
-    if [ -n "$CLEAN_MOUNTPOINT" ] ; then
-        echo "Mountpoint $CLEAN_MOUNTPOINT removed on abnormal exit"
-        $SUDO umount "$CLEAN_MOUNTPOINT" || echo "umount failed"
-        rmdir "$CLEAN_MOUNTPOINT" || echo "rmdir failed"
-    fi
-    if [ -n "$CLEAN_KPARTX" ] ; then
-        echo "kpartx devices for $CLEAN_KPARTX removed on abnormal exit"
-        $SUDO kpartx -d "$CLEAN_KPARTX" || echo "kpartx -d failed"
-    fi
-    if [ -n "$CLEAN_LOSETUP" ] ; then
-        echo "Loop device $CLEAN_LOSETUP removed on abnormal exit"
-        $SUDO losetup -d "$CLEAN_LOSETUP" # Allow losetup errors to propagate
-    fi
-}
-
-trap "cleanup_devices" EXIT
-
-make_fs_inner () {
-    local staging="$1"
-    local output="$2"
-    local options="$3"
-    CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX")
-
-    # copy staging dir contents to fs image
-    $SUDO mount $options "$output" "$CLEAN_MOUNTPOINT"
-    $SUDO tar -C "$staging" -c . | tar -C "$CLEAN_MOUNTPOINT" -x
-    $SUDO umount "$CLEAN_MOUNTPOINT"
-    rmdir "$CLEAN_MOUNTPOINT"
-    CLEAN_MOUNTPOINT=
-}
-
-# Turn a staging dir into an ext3 filesystem within a partition
-make_fs_in_partition () {
-    local staging="$1"
-    local output="$2"
-
-    # create new empty disk
-    dd if=/dev/zero of="$output" bs=1M count=$FS_SIZE_MIB
-    # Set up a loop device on the empty disk image
-    local loopdevice=$($SUDO losetup -f)
-    $SUDO losetup "$loopdevice" "$output"
-    CLEAN_LOSETUP="$loopdevice"
-    # Create a partition table and single partition.
-    # Start partition at sector 63 to allow space for grub
-    cat <<EOF
-Errors from sfdisk below are expected because the new disk is uninitialised
-  Expecting: sfdisk: ERROR: sector 0 does not have an msdos signature
-  Expecting: /dev/loop0: unrecognized partition table type
-EOF
-    $SUDO sfdisk -uS "$CLEAN_LOSETUP" <<EOF
-63 - - *
-EOF
-
-    # kpartx creates a device for the new partition
-    # in the form /dev/mapper/loop1p1
-    $SUDO kpartx -av "$CLEAN_LOSETUP"
-    CLEAN_KPARTX="$CLEAN_LOSETUP"
-    # Wait for the device to appear
-    $UDEVTRIGGER
-    $UDEVSETTLE  || echo "udev settle command return code non-zero"
-    # Infer the name of the partition device
-    local partition="${CLEAN_LOSETUP/dev/dev/mapper}p1"
-    # Set permissive privileges on the device
-    $SUDO chmod 0777 "$partition"
-    # Make an ext3 filesystem on the partition
-    /sbin/mkfs.ext3 -I 128 -m0 -F "$partition"
-    /sbin/e2label "$partition" vpxroot
-    make_fs_inner "$staging" "$partition" ""
-
-    # Now run grub on the image we've created
-    CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX")
-
-    # copy Set up[ grub files prior to installing grub within the image
-    $SUDO mount "$partition" "$CLEAN_MOUNTPOINT"
-    $SUDO cp $CLEAN_MOUNTPOINT/usr/share/grub/i386-redhat/* "$CLEAN_MOUNTPOINT/boot/grub"
-    kernel_version=$($SUDO chroot "$CLEAN_MOUNTPOINT" rpm -qv kernel | sed -e 's/kernel-//')
-    kernel_version_xen=$($SUDO chroot "$CLEAN_MOUNTPOINT" rpm -qv kernel-xen | sed -e 's/kernel-xen-//')
-    $SUDO cat > "$CLEAN_MOUNTPOINT/boot/grub/grub.conf" <<EOF
-default 0
-timeout 2
-
-title vmlinuz-$kernel_version (HVM)
-        root (hd0,0)
-        kernel /boot/vmlinuz-$kernel_version ro root=LABEL=vpxroot
-        initrd /boot/initrd-$kernel_version.img
-
-title vmlinuz-${kernel_version_xen}xen (PV)
-        root (hd0,0)
-        kernel /boot/vmlinuz-${kernel_version_xen}xen ro root=LABEL=vpxroot console=xvc0
-        initrd /boot/initrd-${kernel_version_xen}xen.img
-EOF
-
-    $SUDO umount "$CLEAN_MOUNTPOINT"
-    CLEAN_MOUNTPOINT=
-
-    # Grub expects a disk with name /dev/xxxx with a first partition
-    # named /dev/xxxx1, so we give it what it wants using symlinks
-    # Note: /dev is linked to the real /dev of the build machine, so
-    # must be cleaned up
-    local disk_name="/dev/osxva$$bld"
-    local disk_part1_name="${disk_name}1"
-    rm -f "$disk_name"
-    rm -f "$disk_part1_name"
-    ln -s "$CLEAN_LOSETUP" "$disk_name"
-    ln -s "$partition" "$disk_part1_name"
-
-    # Feed commands into the grub shell to setup the disk
-    grub --no-curses --device-map=/dev/null <<EOF
-device (hd0) $disk_name
-setup (hd0) (hd0,0)
-quit
-EOF
-
-    # Cleanup
-    rm -f "$disk_name"
-    rm -f "$disk_part1_name"
-    $SUDO kpartx -dv "$CLEAN_KPARTX"
-    CLEAN_KPARTX=
-    $SUDO losetup -d "$CLEAN_LOSETUP"
-    CLEAN_LOSETUP=
-}
-
-# turn a staging dir into an ext3 filesystem image
-make_fs () {
-    local staging="$1"
-    local output="$2"
-
-    # create new empty fs
-    dd if=/dev/zero of="$output" bs=1M count=0 seek=$FS_SIZE_MIB
-    /sbin/mkfs.ext3 -m0 -F "$output"
-    /sbin/e2label "$output" vpxroot
-    make_fs_inner "$staging" "$output" "-oloop"
-}
-
-
-# split a virtual disk image into the format expected inside an xva file
-splitvdi () {
-    local diskimg="$1"
-    local outputdir="$2"
-    local rio="$3"
-
-    local n_bytes=$(stat --printf=%s "$diskimg")
-    local n_meg=$((($n_bytes+$((1024*1024 -1)))/$((1024*1024))))
-    local i=0
-    while [ $i -lt $n_meg ] ; do
-	if [ $rio -eq 0 ] ; then
-		local file="$outputdir"/chunk-$(printf "%08d" $i)
-		dd if="$diskimg" of="$file" skip=$i bs=1M count=1 2>/dev/null
-		gzip "$file"
-	else
-		local file="$outputdir"/$(printf "%08d" $i)
-	        dd if="$diskimg" of="$file" skip=$i bs=1M count=1 2>/dev/null
-		local chksum=$(sha1sum -b "$file")
-		echo -n "${chksum/ */}" > "$file.checksum"
-	fi
-	i=$(($i + 1))
-    done
-}
-
-if [ -n "$OPT_USE_PARTITION" ] ; then
-    make_fs_in_partition "$FS_STAGING" "$FS_TMPFILE"
-else
-    make_fs "$FS_STAGING" "$FS_TMPFILE"
-fi
-
-VDI_SIZE=$(stat --format=%s "$FS_TMPFILE")
-
-make_xva () {
-    local output_file="$1"
-    local xml_file="$2"
-    local subdir
-    local rio
-
-    if [[ `cat $xml_file` =~ "<member>\s*<name>class</name>\s*<value>VDI</value>\s*</member>\s*<member>\s*<name>id</name>\s*<value>(Ref:[0-9]+)</value>" ]]
-    then
-        # it's a rio style xva
-        subdir="${BASH_REMATCH[1]}";
-        rio=1
-    else
-        # it's a geneva style xva
-        subdir="xvda"
-        rio=0
-    fi
-
-    cp "$xml_file" "$XVA_TARBALL_STAGING"/ova.xml
-    sed -i -e "s/@VDI_SIZE@/$VDI_SIZE/" "$XVA_TARBALL_STAGING"/ova.xml
-    mkdir "$XVA_TARBALL_STAGING/$subdir"
-    splitvdi "$FS_TMPFILE" "$XVA_TARBALL_STAGING/$subdir" "$rio"
-    TARFILE_MEMBERS=$(cd "$XVA_TARBALL_STAGING" && echo ova.xml $subdir/*)
-    tar -C "$XVA_TARBALL_STAGING" --format=v7 -c $TARFILE_MEMBERS -f "$output_file.tmp"
-    mv "$output_file.tmp" "$output_file"
-}
-
-make_ovf () {
-    local output_dir="$1"
-    local xml_file="$2"
-    local output_base=$(basename "$output_dir")
-    local disk="$output_dir/${output_base}.vmdk"
-    local manifest="$output_dir/${output_base}.mf"
-    local ovf="$output_dir/${output_base}.ovf"
-
-    mkdir -p "$output_dir"
-    rm -f "$disk"
-    $VBOX_IMG convert --srcfilename="$FS_TMPFILE" --dstfilename="$disk" \
-        --srcformat RAW --dstformat VMDK --variant Stream
-    chmod 0644 "$disk"
-
-    local n_bytes=$(stat --printf=%s "$disk")
-    cp "$xml_file" "$ovf"
-    sed -i -e "s/@MKXVA_DISK_FULLSIZE@/$VDI_SIZE/" "$ovf"
-    sed -i -e "s/@MKXVA_DISK_SIZE@/$n_bytes/" "$ovf"
-    sed -i -e "s/@MKXVA_DISK_MIB_SIZE@/$FS_SIZE_MIB/" "$ovf"
-    sed -i -e "s/@MKXVA_DISK_FILENAME@/${output_base}.vmdk/" "$ovf"
-
-    for to_sign in "$ovf" "$disk"
-    do
-	local sha1_sum=$(sha1sum "$to_sign" | cut -d' ' -f1)
-	echo "SHA1($(basename "$to_sign"))= $sha1_sum" >> $manifest
-    done
-}
-
-output_files="$OPT_OUTPUT_FILES"
-xml_files="$OPT_XML_FILES"
-# Iterate through the type list creating the relevant VMs
-for create_type in $OPT_TYPES
-do
-    # Shift one parameter from the front of the lists
-    create_output_file="${output_files%% *}"
-    output_files="${output_files#* }"
-    create_xml_file="${xml_files%% *}"
-    xml_files="${xml_files#* }"
-    echo "Creating $create_type appliance $create_output_file using metadata file $create_xml_file"
-
-    case "$create_type" in
-	xva)
-	    make_xva "$create_output_file" "$create_xml_file"
-	    ;;
-	ovf)
-	    make_ovf "$create_output_file" "$create_xml_file"
-	    ;;
-	*)
-	    echo "Unknown VM type '$create_type'"
-	    exit 1
-	    ;;
-    esac
-
-done
-
-
-# cleanup
-if [ -z "${DO_NOT_CLEANUP:-}" ] ; then
-    rm -rf "$XVA_TARBALL_STAGING"
-    rm -f "$FS_TMPFILE"
-fi
diff --git a/tools/xen/scripts/templatedelete.sh b/tools/xen/scripts/templatedelete.sh
deleted file mode 100755
index 66765b2..0000000
--- a/tools/xen/scripts/templatedelete.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-#Usage: ./templatedelete.sh <template-uuid>
-
-templateuuid="$1"
-
-xe template-param-set other-config:default_template=false uuid="$templateuuid"
-xe template-param-set is-a-template=false uuid="$templateuuid"
-xe vm-destroy uuid="$templateuuid"
diff --git a/tools/xen/templates/hosts.in b/tools/xen/templates/hosts.in
deleted file mode 100644
index 8ab4c3e..0000000
--- a/tools/xen/templates/hosts.in
+++ /dev/null
@@ -1,8 +0,0 @@
-127.0.0.1   localhost
-127.0.0.1   %HOSTNAME%
-::1     localhost ip6-localhost ip6-loopback
-fe00::0     ip6-localnet
-ff00::0     ip6-mcastprefix
-ff02::1     ip6-allnodes
-ff02::2     ip6-allrouters
-
diff --git a/tools/xen/templates/menu.lst.in b/tools/xen/templates/menu.lst.in
deleted file mode 100644
index 8bc6426..0000000
--- a/tools/xen/templates/menu.lst.in
+++ /dev/null
@@ -1,6 +0,0 @@
-default 0
-
-title default
-        root (hd0,0)
-        kernel /boot/vmlinuz-@KERNEL_VERSION@ ro root=LABEL=vpxroot console=xvc0
-        initrd /boot/initrd.img-@KERNEL_VERSION@
diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in
deleted file mode 100644
index 01041e2..0000000
--- a/tools/xen/templates/ova.xml.in
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" ?>
-<appliance version="0.1">
-        <vm name="vm">
-                <label>
-                        @PRODUCT_BRAND@ @PRODUCT_VERSION@-@BUILD_NUMBER@
-                </label>
-                <shortdesc></shortdesc>
-                <config mem_set="1073741824" vcpus="1"/>
-                <hacks is_hvm="false"/>
-                <vbd device="xvda" function="root" mode="w" vdi="vdi_xvda"/>
-        </vm>
-        <vdi name="vdi_xvda" size="@VDI_SIZE@" source="file://xvda" type="dir-gzipped-chunks" variety="system"/>
-</appliance>
-
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 43a6ce8..be6c5ca 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -14,12 +14,12 @@
 # Size of image
 VDI_MB=${VDI_MB:-5000}
 
-# Devstack now contains many components.  3GB ram is not enough to prevent
+# Devstack now contains many components.  4GB ram is not enough to prevent
 # swapping and memory fragmentation - the latter of which can cause failures
 # such as blkfront failing to plug a VBD and lead to random test fails.
 #
-# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs
-OSDOMU_MEM_MB=4096
+# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs
+OSDOMU_MEM_MB=6144
 OSDOMU_VDI_GB=8
 
 # Network mapping. Specify bridge names or network names. Network names may
diff --git a/tox.ini b/tox.ini
index e3d19ce..788fea9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -33,6 +33,10 @@
    sphinx>=1.1.2,<1.2
    pbr>=0.6,!=0.7,<1.0
    oslosphinx
+   nwdiag
+   blockdiag
+   sphinxcontrib-blockdiag
+   sphinxcontrib-nwdiag
 whitelist_externals = bash
 setenv =
   TOP_DIR={toxinidir}
diff --git a/unstack.sh b/unstack.sh
index f0da971..10e5958 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -187,5 +187,10 @@
 fi
 
 # BUG: maybe it doesn't exist? We should isolate this further down.
-clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
-clean_lvm_filter
+# NOTE: Cinder automatically installs the lvm2 package, independently of the
+# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove,
+# /etc/lvm/lvm.conf, etc.) is here.
+if is_service_enabled cinder; then
+    clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
+    clean_lvm_filter
+fi