Merge "Fixed detection of a project in projects.txt"
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index eeb1f21..d4968a6 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -63,11 +63,6 @@
 * YAMAMOTO Takashi <yamamoto@valinux.co.jp>
 * Fumihiko Kakuma <kakuma@valinux.co.jp>
 
-Sahara
-~~~~~~
-
-* Sergey Lukjanov <slukjanov@mirantis.com>
-
 Swift
 ~~~~~
 
diff --git a/README.md b/README.md
index 455e1c6..750190b 100644
--- a/README.md
+++ b/README.md
@@ -117,19 +117,13 @@
 
 # RPC Backend
 
-Multiple RPC backends are available. Currently, this
-includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of
-choice may be selected via the `localrc` section.
+Support for a RabbitMQ RPC backend is included. Additional RPC backends may
+be available via external plugins.  Enabling or disabling RabbitMQ is handled
+via the usual service functions and ``ENABLED_SERVICES``.
 
-Note that selecting more than one RPC backend will result in a failure.
+Example disabling RabbitMQ in ``local.conf``:
 
-Example (ZeroMQ):
-
-    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq"
-
-Example (Qpid):
-
-    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid"
+    disable_service rabbit
 
 # Apache Frontend
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 8e2e7ff..e91012f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -201,7 +201,7 @@
 
     | *Defaults: ``LOGFILE="" LOGDAYS=7 LOG_COLOR=True``*
     |  By default ``stack.sh`` output is only written to the console
-       where is runs. It can be sent to a file in addition to the console
+       where it runs. It can be sent to a file in addition to the console
        by setting ``LOGFILE`` to the fully-qualified name of the
        destination log file. A timestamp will be appended to the given
        filename for each run of ``stack.sh``.
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index b09d386..f61002b 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -122,7 +122,7 @@
 
     ::
 
-        enable_service qpid
+        enable_service q-svc
 
 How do I run a specific OpenStack milestone?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f15c306..2dd0241 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -170,7 +170,6 @@
 * `lib/nova <lib/nova.html>`__
 * `lib/oslo <lib/oslo.html>`__
 * `lib/rpc\_backend <lib/rpc_backend.html>`__
-* `lib/sahara <lib/sahara.html>`__
 * `lib/swift <lib/swift.html>`__
 * `lib/tempest <lib/tempest.html>`__
 * `lib/tls <lib/tls.html>`__
@@ -181,7 +180,6 @@
 
 * `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
 * `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
-* `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
 * `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
 * `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
 * `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
@@ -238,7 +236,6 @@
 * `exercises/floating\_ips.sh <exercises/floating_ips.sh.html>`__
 * `exercises/horizon.sh <exercises/horizon.sh.html>`__
 * `exercises/neutron-adv-test.sh <exercises/neutron-adv-test.sh.html>`__
-* `exercises/sahara.sh <exercises/sahara.sh.html>`__
 * `exercises/sec\_groups.sh <exercises/sec_groups.sh.html>`__
 * `exercises/swift.sh <exercises/swift.sh.html>`__
 * `exercises/volumes.sh <exercises/volumes.sh.html>`__
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 2dd70d8..c5c4e1e 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -22,6 +22,8 @@
 +--------------------+-------------------------------------------+--------------------+
 |magnum              |git://git.openstack.org/openstack/magnum   |                    |
 +--------------------+-------------------------------------------+--------------------+
+|sahara              |git://git.openstack.org/openstack/sahara   |                    |
++--------------------+-------------------------------------------+--------------------+
 |trove               |git://git.openstack.org/openstack/trove    |                    |
 +--------------------+-------------------------------------------+--------------------+
 |zaqar               |git://git.openstack.org/openstack/zarar    |                    |
diff --git a/exercises/sahara.sh b/exercises/sahara.sh
deleted file mode 100755
index 8cad945..0000000
--- a/exercises/sahara.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-
-# **sahara.sh**
-
-# Sanity check that Sahara started if enabled
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-is_service_enabled sahara || exit 55
-
-if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
-    SAHARA_SERVICE_PROTOCOL="https"
-fi
-
-SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-$CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
deleted file mode 100644
index f177766..0000000
--- a/extras.d/70-sahara.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# sahara.sh - DevStack extras script to install Sahara
-
-if is_service_enabled sahara; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/sahara
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing sahara"
-        install_sahara
-        install_python_saharaclient
-        cleanup_sahara
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring sahara"
-        configure_sahara
-        create_sahara_accounts
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing sahara"
-        sahara_register_images
-        start_sahara
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_sahara
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_sahara
-    fi
-fi
diff --git a/files/debs/neutron b/files/debs/neutron
index 2d69a71..b5a457e 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -9,11 +9,9 @@
 postgresql-server-dev-all
 python-mysqldb
 python-mysql.connector
-python-qpid # NOPRIME
 dnsmasq-base
 dnsmasq-utils # for dhcp_release only available in dist:precise
 rabbitmq-server # NOPRIME
-qpidd # NOPRIME
 sqlite3
 vlan
 radvd # NOPRIME
diff --git a/files/debs/nova b/files/debs/nova
index 9d9acde..346b8b3 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -24,10 +24,8 @@
 curl
 genisoimage # required for config_drive
 rabbitmq-server # NOPRIME
-qpidd # NOPRIME
 socat # used by ajaxterm
 python-libvirt # NOPRIME
 python-libxml2
 python-numpy # used by websockify for spice console
 python-m2crypto
-python-qpid # NOPRIME
diff --git a/files/debs/qpid b/files/debs/qpid
deleted file mode 100644
index e3bbf09..0000000
--- a/files/debs/qpid
+++ /dev/null
@@ -1 +0,0 @@
-sasl2-bin # NOPRIME
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index e75db89..1339799 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -11,6 +11,3 @@
 sudo
 vlan
 radvd # NOPRIME
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-qpidd # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 6f8aef1..039456f 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -22,7 +22,3 @@
 sqlite3
 sudo
 vlan
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-python-qpid # NOPRIME
-qpidd # NOPRIME
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 8292e7b..29851be 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,7 +11,6 @@
 openvswitch # NOPRIME
 postgresql-devel
 rabbitmq-server # NOPRIME
-qpid-cpp-server        # NOPRIME
 sqlite
 sudo
 radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index ebd6674..6eeb623 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -10,6 +10,7 @@
 iputils
 kpartx
 kvm # NOPRIME
+qemu-kvm # NOPRIME
 libvirt-bin # NOPRIME
 libvirt-devel # NOPRIME
 libvirt-python # NOPRIME
@@ -22,6 +23,5 @@
 parted
 polkit
 rabbitmq-server # NOPRIME
-qpid-cpp-server # NOPRIME
 sqlite
 sudo
diff --git a/files/rpms/qpid b/files/rpms/qpid
deleted file mode 100644
index 41dd2f6..0000000
--- a/files/rpms/qpid
+++ /dev/null
@@ -1,3 +0,0 @@
-qpid-proton-c-devel # NOPRIME
-cyrus-sasl-lib # NOPRIME
-cyrus-sasl-plain # NOPRIME
diff --git a/functions-common b/functions-common
index c85052d..483b1fa 100644
--- a/functions-common
+++ b/functions-common
@@ -695,9 +695,8 @@
 }
 
 # Gets or creates group
-# Usage: get_or_create_group <groupname> [<domain> <description>]
+# Usage: get_or_create_group <groupname> <domain> [<description>]
 function get_or_create_group {
-    local domain=${2:+--domain ${2}}
     local desc="${3:-}"
     local os_url="$KEYSTONE_SERVICE_URI_V3"
     # Gets group id
@@ -705,34 +704,30 @@
         # Creates new group with --or-show
         openstack --os-token=$OS_TOKEN --os-url=$os_url \
             --os-identity-api-version=3 group create $1 \
-            $domain --description "$desc" --or-show \
+            --domain $2 --description "$desc" --or-show \
             -f value -c id
     )
     echo $group_id
 }
 
 # Gets or creates user
-# Usage: get_or_create_user <username> <password> [<email> [<domain>]]
+# Usage: get_or_create_user <username> <password> <domain> [<email>]
 function get_or_create_user {
-    if [[ ! -z "$3" ]]; then
-        local email="--email=$3"
+    if [[ ! -z "$4" ]]; then
+        local email="--email=$4"
     else
         local email=""
     fi
-    local os_cmd="openstack"
-    local domain=""
-    if [[ ! -z "$4" ]]; then
-        domain="--domain=$4"
-        os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
-    fi
     # Gets user id
     local user_id=$(
         # Creates new user with --or-show
-        $os_cmd user create \
+        openstack user create \
             $1 \
             --password "$2" \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            --domain=$3 \
             $email \
-            $domain \
             --or-show \
             -f value -c id
     )
@@ -740,18 +735,15 @@
 }
 
 # Gets or creates project
-# Usage: get_or_create_project <name> [<domain>]
+# Usage: get_or_create_project <name> <domain>
 function get_or_create_project {
-    # Gets project id
-    local os_cmd="openstack"
-    local domain=""
-    if [[ ! -z "$2" ]]; then
-        domain="--domain=$2"
-        os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
-    fi
     local project_id=$(
         # Creates new project with --or-show
-        $os_cmd project create $1 $domain --or-show -f value -c id
+        openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            project create $1 \
+            --domain=$2 \
+            --or-show -f value -c id
     )
     echo $project_id
 }
@@ -1350,7 +1342,7 @@
 
     if is_service_enabled $service; then
         # Clean up the screen window
-        screen -S $SCREEN_NAME -p $service -X kill
+        screen -S $SCREEN_NAME -p $service -X kill || true
     fi
 }
 
@@ -1691,7 +1683,7 @@
 # ``ENABLED_SERVICES`` list, if they are not already present.
 #
 # For example:
-#   enable_service qpid
+#   enable_service q-svc
 #
 # This function does not know about the special cases
 # for nova, glance, and neutron built into is_service_enabled().
@@ -1754,7 +1746,6 @@
         [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
         [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
-        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
         [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
@@ -1967,6 +1958,19 @@
     fi
 }
 
+# Test with a finite retry loop.
+#
+function test_with_retry {
+    local testcmd=$1
+    local failmsg=$2
+    local until=${3:-10}
+    local sleep=${4:-0.5}
+
+    if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then
+        die $LINENO "$failmsg"
+    fi
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/inc/python b/inc/python
index 9a7cea0..54e19a7 100644
--- a/inc/python
+++ b/inc/python
@@ -66,7 +66,8 @@
 
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``
+# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``USE_CONSTRAINTS``
 # pip_install package [package ...]
 function pip_install {
     local xtrace=$(set +o | grep xtrace)
@@ -103,6 +104,13 @@
         fi
     fi
 
+    cmd_pip="$cmd_pip install"
+
+    # Handle a constraints file, if needed.
+    if [[ "$USE_CONSTRAINTS" == "True" ]]; then
+        cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
+    fi
+
     local pip_version=$(python -c "import pip; \
                         print(pip.__version__.strip('.')[0])")
     if (( pip_version<6 )); then
@@ -116,7 +124,7 @@
         https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
-        $cmd_pip install $upgrade \
+        $cmd_pip $upgrade \
         $@
 
     # Also install test requirements
@@ -128,7 +136,7 @@
             https_proxy=${https_proxy:-} \
             no_proxy=${no_proxy:-} \
             PIP_FIND_LINKS=$PIP_FIND_LINKS \
-            $cmd_pip install $upgrade \
+            $cmd_pip $upgrade \
             -r $test_req
     fi
 }
@@ -215,7 +223,7 @@
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements != "changed" ]]; then
+    if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then
         if is_in_projects_txt $project_dir; then
             (cd $REQUIREMENTS_DIR; \
                 ./.venv/bin/python update.py $project_dir)
@@ -227,6 +235,14 @@
         fi
     fi
 
+    if [ -n "$REQUIREMENTS_DIR" ]; then
+        # Constrain this package to this project directory from here on out.
+        local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+        $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
+            $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
+            "$flags file://$project_dir#egg=$name"
+    fi
+
     setup_package $project_dir $flags
 
     # We've just gone and possibly modified the user's source tree in an
diff --git a/lib/ceilometer b/lib/ceilometer
index d7888d9..ed9b933 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -341,7 +341,7 @@
         fi
 
         if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
-            pip_instal_gr oslo.vmware
+            pip_install_gr oslo.vmware
         fi
     fi
 
diff --git a/lib/ceph b/lib/ceph
index cbdc3b8..16dcda2 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -264,10 +264,6 @@
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
 
-    # NOTE(eharney): When Glance has fully migrated to Glance store,
-    # default_store can be removed from [DEFAULT].  (See lib/glance.)
-    iniset $GLANCE_API_CONF DEFAULT default_store rbd
-    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
     iniset $GLANCE_API_CONF glance_store default_store rbd
     iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
     iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
diff --git a/lib/databases/mysql b/lib/databases/mysql
index f097fb2..0e477ca 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -95,7 +95,10 @@
     sudo bash -c "source $TOP_DIR/functions && \
         iniset $my_conf mysqld bind-address 0.0.0.0 && \
         iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \
-        iniset $my_conf mysqld default-storage-engine InnoDB"
+        iniset $my_conf mysqld default-storage-engine InnoDB \
+        iniset $my_conf mysqld max_connections 1024 \
+        iniset $my_conf mysqld query_cache_type OFF \
+        iniset $my_conf mysqld query_cache_size 0"
 
 
     if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
diff --git a/lib/glance b/lib/glance
index 47bad0e..4dbce9f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -113,9 +113,7 @@
     iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
     configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
-    fi
+    iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
     iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
 
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
@@ -126,9 +124,7 @@
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
     configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
-    fi
+    iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
     iniset_rpc_backend glance $GLANCE_API_CONF
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
@@ -264,7 +260,7 @@
         if is_service_enabled s-proxy; then
 
             local glance_swift_user=$(get_or_create_user "glance-swift" \
-                "$SERVICE_PASSWORD" "glance-swift@example.com")
+                "$SERVICE_PASSWORD" "default" "glance-swift@example.com")
             get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
 
diff --git a/lib/ironic b/lib/ironic
index 4984be1..cff20c9 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -366,7 +366,7 @@
         fi
         iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
         iniset $IRONIC_CONF_FILE glance swift_api_version v1
-        local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME)
+        local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
         iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
         iniset $IRONIC_CONF_FILE glance swift_container glance
         iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
@@ -658,6 +658,10 @@
         # agent ramdisk gets instance image from swift
         sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true
     fi
+
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
+    fi
 }
 
 function configure_tftpd {
diff --git a/lib/keystone b/lib/keystone
index 7a949cf..c33d466 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -357,13 +357,13 @@
 function create_keystone_accounts {
 
     # admin
-    local admin_tenant=$(get_or_create_project "admin")
-    local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD")
+    local admin_tenant=$(get_or_create_project "admin" default)
+    local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
     local admin_role=$(get_or_create_role "admin")
     get_or_add_user_project_role $admin_role $admin_user $admin_tenant
 
     # Create service project/role
-    get_or_create_project "$SERVICE_TENANT_NAME"
+    get_or_create_project "$SERVICE_TENANT_NAME" default
 
     # Service role, so service users do not have to be admins
     get_or_create_role service
@@ -382,12 +382,12 @@
     local another_role=$(get_or_create_role "anotherrole")
 
     # invisible tenant - admin can't see this one
-    local invis_tenant=$(get_or_create_project "invisible_to_admin")
+    local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
 
     # demo
-    local demo_tenant=$(get_or_create_project "demo")
+    local demo_tenant=$(get_or_create_project "demo" default)
     local demo_user=$(get_or_create_user "demo" \
-        "$ADMIN_PASSWORD" "demo@example.com")
+        "$ADMIN_PASSWORD" "default" "demo@example.com")
 
     get_or_add_user_project_role $member_role $demo_user $demo_tenant
     get_or_add_user_project_role $admin_role $admin_user $demo_tenant
@@ -426,7 +426,7 @@
 function create_service_user {
     local role=${2:-service}
 
-    local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD")
+    local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
     get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
 }
 
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 3ac76a2..acc2851 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -696,9 +696,10 @@
     if is_ssl_enabled_service "neutron"; then
         ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
     fi
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then
-        die $LINENO "Neutron did not start"
-    fi
+
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port"
+    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
+
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
         start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
@@ -721,7 +722,7 @@
                 sudo ip addr del $IP dev $PUBLIC_INTERFACE
                 sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
             done
-            sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
         fi
     fi
 
@@ -1266,16 +1267,26 @@
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3; then
         # Configure and enable public bridge
+        local ext_gw_interface="none"
         if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
-            local ext_gw_interface=$(_neutron_get_ext_gw_interface)
+            ext_gw_interface=$(_neutron_get_ext_gw_interface)
+        elif [[ "$Q_AGENT" = "linuxbridge" ]]; then
+            # Search for the brq device the neutron router and network for $FIXED_RANGE
+            # will be using.
+            # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102
+            ext_gw_interface=brq${EXT_NET_ID:0:11}
+        fi
+        if [[ "$ext_gw_interface" != "none" ]]; then
             local cidr_len=${FLOATING_RANGE#*/}
+            local testcmd="ip -o link | grep -q $ext_gw_interface"
+            test_with_retry "$testcmd" "$ext_gw_interface creation failed"
             if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
             ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'`
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
-            sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
+            sudo ip route replace  $FIXED_RANGE via $ROUTER_GW_IP
         fi
         _neutron_set_router_id
     fi
@@ -1310,7 +1321,7 @@
 
             # Configure interface for public bridge
             sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
-            sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
+            sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
         fi
         _neutron_set_router_id
     fi
@@ -1380,9 +1391,8 @@
     local timeout_sec=$5
     local probe_cmd = ""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
-    if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then
-        die $LINENO "server didn't become ssh-able!"
-    fi
+    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
+    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
 }
 
 # Neutron 3rd party programs
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
old mode 100644
new mode 100755
index b348af9..fefc1c3
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -9,6 +9,20 @@
 
 function neutron_lb_cleanup {
     sudo brctl delbr $PUBLIC_BRIDGE
+
+    if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then
+        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
+            sudo ip link delete $port
+        done
+    elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then
+        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
+            sudo ip link delete $port
+        done
+    fi
+    for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do
+        sudo ip link set $bridge down
+        sudo brctl delbr $bridge
+    done
 }
 
 function is_neutron_ovs_base_plugin {
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index f465cc9..34190f9 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -42,7 +42,7 @@
 
 function neutron_lbaas_stop {
     pids=$(ps aux | awk '/haproxy/ { print $2 }')
-    [ ! -z "$pids" ] && sudo kill $pids
+    [ ! -z "$pids" ] && sudo kill $pids || true
 }
 
 # Restore xtrace
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 22b58e0..5525cfd 100755
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -34,6 +34,13 @@
         #pip_install_gr <there-si-no-guestfs-in-pypi>
     elif is_fedora || is_suse; then
         install_package kvm
+        # there is a dependency issue with kvm (which is really just a
+        # wrapper to qemu-system-x86) that leaves some bios files out,
+        # so install qemu-kvm (which shouldn't strictly be needed, as
+        # everything has been merged into qemu-system-x86) to bring in
+        # the right packages. see
+        # https://bugzilla.redhat.com/show_bug.cgi?id=1235890
+        install_package qemu-kvm
         install_package libvirt libvirt-devel
         pip_install_gr libvirt-python
     fi
diff --git a/lib/oslo b/lib/oslo
index 554bec8..123572c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -26,6 +26,7 @@
 GITDIR["cliff"]=$DEST/cliff
 GITDIR["debtcollector"]=$DEST/debtcollector
 GITDIR["futurist"]=$DEST/futurist
+GITDIR["oslo.cache"]=$DEST/oslo.cache
 GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
 GITDIR["oslo.config"]=$DEST/oslo.config
 GITDIR["oslo.context"]=$DEST/oslo.context
@@ -35,6 +36,7 @@
 GITDIR["oslo.messaging"]=$DEST/oslo.messaging
 GITDIR["oslo.middleware"]=$DEST/oslo.middleware
 GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.reports"]=$DEST/oslo.reports
 GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
 GITDIR["oslo.serialization"]=$DEST/oslo.serialization
 GITDIR["oslo.service"]=$DEST/oslo.service
@@ -63,9 +65,11 @@
 
 # install_oslo() - Collect source and prepare
 function install_oslo {
+    _do_install_oslo_lib "automaton"
     _do_install_oslo_lib "cliff"
     _do_install_oslo_lib "debtcollector"
     _do_install_oslo_lib "futurist"
+    _do_install_oslo_lib "oslo.cache"
     _do_install_oslo_lib "oslo.concurrency"
     _do_install_oslo_lib "oslo.config"
     _do_install_oslo_lib "oslo.context"
@@ -75,6 +79,7 @@
     _do_install_oslo_lib "oslo.messaging"
     _do_install_oslo_lib "oslo.middleware"
     _do_install_oslo_lib "oslo.policy"
+    _do_install_oslo_lib "oslo.reports"
     _do_install_oslo_lib "oslo.rootwrap"
     _do_install_oslo_lib "oslo.serialization"
     _do_install_oslo_lib "oslo.service"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 33ab03d..03eacd8 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -1,72 +1,32 @@
 #!/bin/bash
 #
 # lib/rpc_backend
-# Interface for interactig with different RPC backends
+# Interface for installing RabbitMQ on the system
 
 # Dependencies:
 #
 # - ``functions`` file
 # - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used
-# - ``RPC_MESSAGING_PROTOCOL`` option for configuring the messaging protocol
 
 # ``stack.sh`` calls the entry points in this order:
 #
 # - check_rpc_backend
 # - install_rpc_backend
 # - restart_rpc_backend
-# - iniset_rpc_backend
+# - iniset_rpc_backend (stable interface)
+#
+# Note: if implementing an out of tree plugin for an RPC backend, you
+# should install all services through normal plugin methods, then
+# redefine ``iniset_rpc_backend`` in your code. That's the one portion
+# of this file which is a standard interface.
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9}
-
-# TODO(sdague): RPC backend selection is super wonky because we treat
-# messaging server as a service, which it really isn't for multi host
-QPID_HOST=${QPID_HOST:-}
-
-
 # Functions
 # ---------
 
-# Make sure we only have one rpc backend enabled.
-# Also check the specified rpc backend is available on your platform.
-function check_rpc_backend {
-    local c svc
-
-    local rpc_needed=1
-    # We rely on the fact that filenames in lib/* match the service names
-    # that can be passed as arguments to is_service_enabled.
-    # We check for a call to iniset_rpc_backend in these files, meaning
-    # the service needs a backend.
-    rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}')
-    for c in ${rpc_candidates}; do
-        if is_service_enabled $c; then
-            rpc_needed=0
-            break
-        fi
-    done
-    local rpc_backend_cnt=0
-    for svc in qpid zeromq rabbit; do
-        is_service_enabled $svc &&
-        (( rpc_backend_cnt++ )) || true
-    done
-    if [ "$rpc_backend_cnt" -gt 1 ]; then
-        echo "ERROR: only one rpc backend may be enabled,"
-        echo "       set only one of 'rabbit', 'qpid', 'zeromq'"
-        echo "       via ENABLED_SERVICES."
-    elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then
-        echo "ERROR: at least one rpc backend must be enabled,"
-        echo "       set one of 'rabbit', 'qpid', 'zeromq'"
-        echo "       via ENABLED_SERVICES."
-    fi
-
-    if is_service_enabled qpid && ! qpid_is_supported; then
-        die $LINENO "Qpid support is not available for this version of your distribution."
-    fi
-}
-
 # clean up after rpc backend - eradicate all traces so changing backends
 # produces a clean switch
 function cleanup_rpc_backend {
@@ -79,110 +39,14 @@
             # And the Erlang runtime too
             apt_get purge -y erlang*
         fi
-    elif is_service_enabled qpid; then
-        if is_fedora; then
-            uninstall_package qpid-cpp-server
-        elif is_ubuntu; then
-            uninstall_package qpidd
-        else
-            exit_distro_not_supported "qpid installation"
-        fi
-    elif is_service_enabled zeromq; then
-        if is_fedora; then
-            uninstall_package zeromq python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis python-redis
-            fi
-        elif is_ubuntu; then
-            uninstall_package libzmq1 python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis-server python-redis
-            fi
-        elif is_suse; then
-            uninstall_package libzmq1 python-pyzmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                uninstall_package redis python-redis
-            fi
-        else
-            exit_distro_not_supported "zeromq installation"
-        fi
-    fi
-
-    # Remove the AMQP 1.0 messaging libraries
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        if is_fedora; then
-            uninstall_package qpid-proton-c-devel
-            uninstall_package python-qpid-proton
-        fi
-        # TODO(kgiusti) ubuntu cleanup
     fi
 }
 
 # install rpc backend
 function install_rpc_backend {
-    # Regardless of the broker used, if AMQP 1.0 is configured load
-    # the necessary messaging client libraries for oslo.messaging
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        if is_fedora; then
-            install_package qpid-proton-c-devel
-            install_package python-qpid-proton
-        elif is_ubuntu; then
-            # TODO(kgiusti) The QPID AMQP 1.0 protocol libraries
-            # are not yet in the ubuntu repos. Enable these installs
-            # once they are present:
-            #install_package libqpid-proton2-dev
-            #install_package python-qpid-proton
-            # Also add 'uninstall' directives in cleanup_rpc_backend()!
-            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
-        else
-            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
-        fi
-        # Install pyngus client API
-        # TODO(kgiusti) can remove once python qpid bindings are
-        # available on all supported platforms _and_ pyngus is added
-        # to the requirements.txt file in oslo.messaging
-        pip_install_gr pyngus
-    fi
-
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         install_package rabbitmq-server
-    elif is_service_enabled qpid; then
-        if is_fedora; then
-            install_package qpid-cpp-server
-        elif is_ubuntu; then
-            install_package qpidd
-        else
-            exit_distro_not_supported "qpid installation"
-        fi
-        _configure_qpid
-    elif is_service_enabled zeromq; then
-        if is_fedora; then
-            install_package zeromq python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis python-redis
-            fi
-        elif is_ubuntu; then
-            install_package libzmq1 python-zmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis-server python-redis
-            fi
-        elif is_suse; then
-            install_package libzmq1 python-pyzmq
-            if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-                install_package redis python-redis
-            fi
-        else
-            exit_distro_not_supported "zeromq installation"
-        fi
-        # Necessary directory for socket location.
-        sudo mkdir -p /var/run/openstack
-        sudo chown $STACK_USER /var/run/openstack
-    fi
-
-    # If using the QPID broker, install the QPID python client API
-    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        install_package python-qpid
     fi
 }
 
@@ -232,17 +96,12 @@
                 sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*"
             fi
         fi
-    elif is_service_enabled qpid; then
-        echo_summary "Starting qpid"
-        restart_service qpidd
     fi
 }
 
 # builds transport url string
 function get_transport_url {
-    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/"
-    elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+    if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/"
     fi
 }
@@ -252,29 +111,7 @@
     local package=$1
     local file=$2
     local section=${3:-DEFAULT}
-    if is_service_enabled zeromq; then
-        iniset $file $section rpc_backend "zmq"
-        iniset $file $section rpc_zmq_host `hostname`
-        if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
-            iniset $file $section rpc_zmq_matchmaker "redis"
-            MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
-            iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
-        else
-            die $LINENO "Other matchmaker drivers not supported"
-        fi
-    elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
-        if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-            iniset $file $section rpc_backend "amqp"
-        else
-            iniset $file $section rpc_backend "qpid"
-        fi
-        iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST}
-        if [ -n "$QPID_USERNAME" ]; then
-            iniset $file $section qpid_username $QPID_USERNAME
-            iniset $file $section qpid_password $QPID_PASSWORD
-        fi
-    elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+    if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend "rabbit"
         iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST
         iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
@@ -288,17 +125,6 @@
     fi
 }
 
-# Check if qpid can be used on the current distro.
-# qpid_is_supported
-function qpid_is_supported {
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-
-    # Qpid is not in openSUSE
-    ( ! is_suse )
-}
-
 function rabbit_setuser {
     local user="$1" pass="$2" found="" out=""
     out=$(sudo rabbitmqctl list_users) ||
@@ -314,85 +140,6 @@
     sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*"
 }
 
-# Set up the various configuration files used by the qpidd broker
-function _configure_qpid {
-
-    # the location of the configuration files have changed since qpidd 0.14
-    local qpid_conf_file
-    if [ -e /etc/qpid/qpidd.conf ]; then
-        qpid_conf_file=/etc/qpid/qpidd.conf
-    elif [ -e /etc/qpidd.conf ]; then
-        qpid_conf_file=/etc/qpidd.conf
-    else
-        exit_distro_not_supported "qpidd.conf file not found!"
-    fi
-
-    # force the ACL file to a known location
-    local qpid_acl_file=/etc/qpid/qpidd.acl
-    if [ ! -e $qpid_acl_file ]; then
-        sudo mkdir -p -m 755 `dirname $qpid_acl_file`
-        sudo touch $qpid_acl_file
-        sudo chmod o+r $qpid_acl_file
-    fi
-    sudo sed -i.bak '/^acl-file=/d' $qpid_conf_file
-    echo "acl-file=$qpid_acl_file" | sudo tee --append $qpid_conf_file
-
-    sudo sed -i '/^auth=/d' $qpid_conf_file
-    if [ -z "$QPID_USERNAME" ]; then
-        # no QPID user configured, so disable authentication
-        # and access control
-        echo "auth=no" | sudo tee --append $qpid_conf_file
-        cat <<EOF | sudo tee $qpid_acl_file
-acl allow all all
-EOF
-    else
-        # Configure qpidd to use PLAIN authentication, and add
-        # QPID_USERNAME to the ACL:
-        echo "auth=yes" | sudo tee --append $qpid_conf_file
-        if [ -z "$QPID_PASSWORD" ]; then
-            read_password QPID_PASSWORD "ENTER A PASSWORD FOR QPID USER $QPID_USERNAME"
-        fi
-        # Create ACL to allow $QPID_USERNAME full access
-        cat <<EOF | sudo tee $qpid_acl_file
-group admin ${QPID_USERNAME}@QPID
-acl allow admin all
-acl deny all all
-EOF
-        # Add user to SASL database
-        if is_ubuntu; then
-            install_package sasl2-bin
-        elif is_fedora; then
-            install_package cyrus-sasl-lib
-            install_package cyrus-sasl-plain
-        fi
-        local sasl_conf_file=/etc/sasl2/qpidd.conf
-        sudo sed -i.bak '/PLAIN/!s/mech_list: /mech_list: PLAIN /' $sasl_conf_file
-        local sasl_db=`sudo grep sasldb_path $sasl_conf_file | cut -f 2 -d ":" | tr -d [:blank:]`
-        if [ ! -e $sasl_db ]; then
-            sudo mkdir -p -m 755 `dirname $sasl_db`
-        fi
-        echo $QPID_PASSWORD | sudo saslpasswd2 -c -p -f $sasl_db -u QPID $QPID_USERNAME
-        sudo chmod o+r $sasl_db
-    fi
-
-    # If AMQP 1.0 is specified, ensure that the version of the
-    # broker can support AMQP 1.0 and configure the queue and
-    # topic address patterns used by oslo.messaging.
-    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
-        QPIDD=$(type -p qpidd)
-        if ! $QPIDD --help | grep -q "queue-patterns"; then
-            exit_distro_not_supported "qpidd with AMQP 1.0 support"
-        fi
-        if ! grep -q "queue-patterns=exclusive" $qpid_conf_file; then
-            cat <<EOF | sudo tee --append $qpid_conf_file
-queue-patterns=exclusive
-queue-patterns=unicast
-topic-patterns=broadcast
-EOF
-        fi
-    fi
-}
-
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/sahara b/lib/sahara
deleted file mode 100644
index 51e431a..0000000
--- a/lib/sahara
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/bash
-#
-# lib/sahara
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_sahara
-# install_python_saharaclient
-# configure_sahara
-# sahara_register_images
-# start_sahara
-# stop_sahara
-# cleanup_sahara
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-
-# Set up default directories
-GITDIR["python-saharaclient"]=$DEST/python-saharaclient
-SAHARA_DIR=$DEST/sahara
-
-SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
-SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
-
-if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
-    SAHARA_SERVICE_PROTOCOL="https"
-fi
-SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
-SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
-SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386}
-SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
-
-SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake}
-
-# Support entry points installation of console scripts
-if [[ -d $SAHARA_DIR/bin ]]; then
-    SAHARA_BIN_DIR=$SAHARA_DIR/bin
-else
-    SAHARA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,sahara
-
-# Functions
-# ---------
-
-# create_sahara_accounts() - Set up common required sahara accounts
-#
-# Tenant      User       Roles
-# ------------------------------
-# service     sahara    admin
-function create_sahara_accounts {
-
-    create_service_user "sahara"
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-        # TODO: remove "data_processing" service when #1356053 will be fixed
-        local sahara_service_old=$(openstack service create \
-            "data_processing" \
-            --name "sahara" \
-            --description "Sahara Data Processing" \
-            -f value -c id
-        )
-        local sahara_service_new=$(openstack service create \
-            "data-processing" \
-            --name "sahara" \
-            --description "Sahara Data Processing" \
-            -f value -c id
-        )
-        get_or_create_endpoint $sahara_service_old \
-            "$REGION_NAME" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-        get_or_create_endpoint $sahara_service_new \
-            "$REGION_NAME" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-    fi
-}
-
-# cleanup_sahara() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_sahara {
-
-    # Cleanup auth cache dir
-    sudo rm -rf $SAHARA_AUTH_CACHE_DIR
-}
-
-# configure_sahara() - Set config files, create data dirs, etc
-function configure_sahara {
-    sudo install -d -o $STACK_USER $SAHARA_CONF_DIR
-
-    if [[ -f $SAHARA_DIR/etc/sahara/policy.json ]]; then
-        cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR
-    fi
-
-    # Create auth cache dir
-    sudo install -d -o $STACK_USER -m 700 $SAHARA_AUTH_CACHE_DIR
-    rm -rf $SAHARA_AUTH_CACHE_DIR/*
-
-    configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR
-
-    iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT
-
-    # Set configuration to send notifications
-
-    if is_service_enabled ceilometer; then
-        iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true"
-        iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging"
-    fi
-
-    iniset $SAHARA_CONF_FILE DEFAULT verbose True
-    iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
-    iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS
-
-    iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
-
-    if is_service_enabled neutron; then
-        iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
-
-        if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
-            iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE
-        fi
-    else
-        iniset $SAHARA_CONF_FILE DEFAULT use_neutron false
-    fi
-
-    if is_service_enabled heat; then
-        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
-
-        if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then
-            iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE
-        fi
-    else
-        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
-    fi
-
-    if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
-        iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE
-    fi
-
-    # Register SSL certificates if provided
-    if is_ssl_enabled_service sahara; then
-        ensure_certificates SAHARA
-
-        iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT"
-        iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY"
-    fi
-
-    iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
-    # Format logging
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $SAHARA_CONF_FILE DEFAULT
-    fi
-
-    if is_service_enabled tls-proxy; then
-        # Set the service port for a proxy to take the original
-        iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT
-    fi
-
-    recreate_database sahara
-    $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
-}
-
-# install_sahara() - Collect source and prepare
-function install_sahara {
-    git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
-    setup_develop $SAHARA_DIR
-}
-
-# install_python_saharaclient() - Collect source and prepare
-function install_python_saharaclient {
-    if use_library_from_git "python-saharaclient"; then
-        git_clone_by_name "python-saharaclient"
-        setup_dev_lib "python-saharaclient"
-    fi
-}
-
-# sahara_register_images() - Registers images in sahara image registry
-function sahara_register_images {
-    if is_service_enabled heat && [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
-        # Register heat image for Fake plugin
-        local fake_plugin_properties="--property _sahara_tag_0.1=True"
-        fake_plugin_properties+=" --property _sahara_tag_fake=True"
-        fake_plugin_properties+=" --property _sahara_username=fedora"
-        openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image set $(basename "$HEAT_CFN_IMAGE_URL" ".qcow2") $fake_plugin_properties
-    fi
-}
-
-# start_sahara() - Start running processes, including screen
-function start_sahara {
-    local service_port=$SAHARA_SERVICE_PORT
-    local service_protocol=$SAHARA_SERVICE_PROTOCOL
-    if is_service_enabled tls-proxy; then
-        service_port=$SAHARA_SERVICE_PORT_INT
-        service_protocol="http"
-    fi
-
-    run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
-    run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
-    run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE"
-
-    echo "Waiting for Sahara to start..."
-    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then
-        die $LINENO "Sahara did not start"
-    fi
-
-    # Start proxies if enabled
-    if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT &
-    fi
-}
-
-# stop_sahara() - Stop running processes
-function stop_sahara {
-    # Kill the Sahara screen windows
-    stop_process sahara
-    stop_process sahara-api
-    stop_process sahara-eng
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/swift b/lib/swift
index 820042d..5b73981 100644
--- a/lib/swift
+++ b/lib/swift
@@ -616,20 +616,23 @@
             "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    local swift_tenant_test1=$(get_or_create_project swifttenanttest1)
+    local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
     die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
-    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password "test@example.com")
+    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
+                        "default" "test@example.com")
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
     get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
 
-    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password "test3@example.com")
+    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+                                "default" "test3@example.com")
     die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
     get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
 
-    local swift_tenant_test2=$(get_or_create_project swifttenanttest2)
+    local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
     die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
 
-    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password "test2@example.com")
+    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+                                "default" "test2@example.com")
     die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
     get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
 
@@ -639,7 +642,8 @@
     local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
     die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
 
-    local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password "test4@example.com" $swift_domain)
+    local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
+                                $swift_domain "test4@example.com")
     die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
     get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
 }
@@ -768,7 +772,7 @@
         stop_process s-${type}
     done
     # Blast out any stragglers
-    pkill -f swift-
+    pkill -f swift- || true
 }
 
 function swift_configure_tempurls {
diff --git a/lib/tempest b/lib/tempest
index 9fba0aa..a84ade2 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -379,6 +379,7 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True
     # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
+    iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -550,8 +551,8 @@
     if is_service_enabled tempest; then
         # Tempest has some tests that validate various authorization checks
         # between two regular users in separate tenants
-        get_or_create_project alt_demo
-        get_or_create_user alt_demo "$ADMIN_PASSWORD" "alt_demo@example.com"
+        get_or_create_project alt_demo default
+        get_or_create_user alt_demo "$ADMIN_PASSWORD" "default" "alt_demo@example.com"
         get_or_add_user_project_role Member alt_demo alt_demo
     fi
 }
diff --git a/lib/zaqar b/lib/zaqar
index 8d51910..891b0ea 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -128,10 +128,9 @@
         configure_redis
     fi
 
-    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $ZAQAR_CONF DEFAULT notification_driver messaging
-        iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
-    fi
+    iniset $ZAQAR_CONF DEFAULT notification_driver messaging
+    iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
+
     iniset_rpc_backend zaqar $ZAQAR_CONF
 
     cleanup_zaqar
diff --git a/stack.sh b/stack.sh
index 7a5ed04..17cbe75 100755
--- a/stack.sh
+++ b/stack.sh
@@ -500,12 +500,8 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
-# Make sure we only have one rpc backend enabled,
-# and the specified rpc backend is available on your platform.
-check_rpc_backend
-
 # Service to enable with SSL if ``USE_SSL`` is True
-SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara"
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
 
 if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
     die $LINENO "tls-proxy and SSL are mutually exclusive"
@@ -683,6 +679,9 @@
 echo_summary "Installing package prerequisites"
 source $TOP_DIR/tools/install_prereqs.sh
 
+# Normalise USE_CONSTRAINTS
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
+
 # Configure an appropriate Python environment
 if [[ "$OFFLINE" != "True" ]]; then
     PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
@@ -1012,6 +1011,9 @@
     # Begone token auth
     unset OS_TOKEN OS_URL
 
+    # force set to use v2 identity authentication even with v3 commands
+    export OS_AUTH_TYPE=v2password
+
     # Set up password auth credentials now that Keystone is bootstrapped
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
@@ -1020,15 +1022,6 @@
     export OS_REGION_NAME=$REGION_NAME
 fi
 
-
-# ZeroMQ
-# ------
-if is_service_enabled zeromq; then
-    echo_summary "Starting zeromq receiver"
-    run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
-fi
-
-
 # Horizon
 # -------
 
diff --git a/stackrc b/stackrc
index 9cd9c05..342f9bf 100644
--- a/stackrc
+++ b/stackrc
@@ -149,6 +149,12 @@
 # Zero disables timeouts
 GIT_TIMEOUT=${GIT_TIMEOUT:-0}
 
+# Constraints mode
+# - False (default) : update git projects dependencies from global-requirements.
+#
+# - True : use upper-constraints.txt to constrain versions of packages intalled
+#          and do not edit projects at all.
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
 
 # Repositories
 # ------------
@@ -225,10 +231,6 @@
 NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
 NOVA_BRANCH=${NOVA_BRANCH:-master}
 
-# data processing service
-SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
-SAHARA_BRANCH=${SAHARA_BRANCH:-master}
-
 # object storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
@@ -290,10 +292,6 @@
 GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
 GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master}
 
-# python saharaclient
-GITREPO["python-saharaclient"]=${SAHARACLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
-GITBRANCH["python-saharaclient"]=${SAHARACLIENT_BRANCH:-master}
-
 # python swift client library
 GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
 GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
@@ -327,6 +325,10 @@
 GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git}
 GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master}
 
+# oslo.cache
+GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git}
+GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master}
+
 # oslo.concurrency
 GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
 GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
@@ -363,6 +365,10 @@
 GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
 GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
 
+# oslo.reports
+GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
+GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
+
 # oslo.rootwrap
 GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
 GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index fc65967..8dc3ba3 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -35,11 +35,12 @@
 ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
-ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth"
+ALL_LIBS+=" oslo.serialization django_openstack_auth"
 ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
 ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
+ALL_LIBS+=" oslo.cache oslo.reports"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 0862135..3a364fe 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -43,6 +43,9 @@
                 'project_name': args.os_project_name,
             },
         }
+        if args.os_identity_api_version == '3':
+            self._cloud_data['auth']['user_domain_id'] = 'default'
+            self._cloud_data['auth']['project_domain_id'] = 'default'
         if args.os_cacert:
             self._cloud_data['cacert'] = args.os_cacert
 
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 7acfb5e..628a69f 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -23,6 +23,7 @@
 import os.path
 import sys
 
+from subprocess import Popen
 
 def get_options():
     parser = argparse.ArgumentParser(
@@ -46,7 +47,7 @@
     print cmd
     print "-" * len(cmd)
     print
-    print os.popen(cmd).read()
+    Popen(cmd, shell=True)
 
 
 def _header(name):