Merge "Adding ENABLE_IDENTITY_V2 on docs"
diff --git a/clean.sh b/clean.sh
index fc6f80d..c8b8223 100755
--- a/clean.sh
+++ b/clean.sh
@@ -104,7 +104,7 @@
 fi
 
 # Clean out /etc
-sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron
+sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron /etc/openstack/
 
 # Clean out tgt
 sudo rm -f /etc/tgt/conf.d/*
@@ -136,7 +136,8 @@
 FILES_TO_CLEAN=".localrc.auto .localrc.password "
 FILES_TO_CLEAN+="docs/files docs/html shocco/ "
 FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* "
-FILES_TO_CLEAN+=".stackenv .prereqs"
+FILES_TO_CLEAN+=".stackenv .prereqs "
+FILES_TO_CLEAN+="~/.config/openstack"
 
 for file in $FILES_TO_CLEAN; do
     rm -rf $TOP_DIR/$file
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 48c08dd..6f45c1c 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -108,7 +108,6 @@
     DATABASE_PASSWORD=$ADMIN_PASSWORD
     RABBIT_PASSWORD=$ADMIN_PASSWORD
     SERVICE_PASSWORD=$ADMIN_PASSWORD
-    SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50
     #FIXED_RANGE=172.31.1.0/24
     #FLOATING_RANGE=192.168.20.0/25
     #HOST_IP=10.3.4.5
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index f3bd2fe..0c439ad 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -34,7 +34,6 @@
     DATABASE_PASSWORD=password
     ADMIN_PASSWORD=password
     SERVICE_PASSWORD=password
-    SERVICE_TOKEN=password
     RABBIT_PASSWORD=password
     # Enable Logging
     LOGFILE=$DEST/logs/stack.sh.log
@@ -46,7 +45,7 @@
     # Horizon
     ENABLED_SERVICES+=,horizon
     # Nova
-    ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch
+    ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch
     # Glance
     ENABLED_SERVICES+=,g-api,g-reg
     # Neutron
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 5660bc5..392bb1b 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -131,7 +131,6 @@
     DATABASE_PASSWORD=supersecret
     RABBIT_PASSWORD=supersecrete
     SERVICE_PASSWORD=supersecrete
-    SERVICE_TOKEN=xyzpdqlazydog
 
 In the multi-node configuration the first 10 or so IPs in the private
 subnet are usually reserved. Add this to ``local.sh`` to have it run
@@ -172,7 +171,6 @@
     DATABASE_PASSWORD=supersecret
     RABBIT_PASSWORD=supersecrete
     SERVICE_PASSWORD=supersecrete
-    SERVICE_TOKEN=xyzpdqlazydog
     DATABASE_TYPE=mysql
     SERVICE_HOST=192.168.42.11
     MYSQL_HOST=$SERVICE_HOST
@@ -375,3 +373,43 @@
 ::
 
     mysqladmin -u root -pnova password 'supersecret'
+
+Live Migration
+--------------
+
+In order for live migration to work with the default live migration URI::
+
+    [libvirt]
+    live_migration_uri = qemu+ssh://stack@%s/system
+
+SSH keys need to be exchanged between each compute node:
+
+1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub)
+   needs to be in the DESTINATION stack user's authorized_keys file
+   (~stack/.ssh/authorized_keys).  This can be accomplished by manually
+   copying the contents from the file on the SOURCE to the DESTINATION.  If
+   you have a password configured for the stack user, then you can use the
+   following command to accomplish the same thing::
+
+        ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION
+
+2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub)
+   needs to be in the SOURCE root user's known_hosts file
+   (/root/.ssh/known_hosts).  This can be accomplished by running the
+   following on the SOURCE machine (hostname must be used)::
+
+        ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
+
+In essence, this means that every compute node's root user's public RSA key
+must exist in every other compute node's stack user's authorized_keys file and
+every compute node's public ECDSA key needs to be in every other compute
+node's root user's known_hosts file.  Please note that if the root or stack
+user does not have a SSH key, one can be generated using::
+
+    ssh-keygen -t rsa
+
+The above steps are necessary because libvirtd runs as root when the
+live_migration_uri uses the "qemu:///system" family of URIs.  For more
+information, see the `libvirt documentation`_.
+
+.. _libvirt documentation: https://libvirt.org/drvqemu.html#securitydriver
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index a72b6f9..1e20d7f 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -6,6 +6,8 @@
 plugin and the Open vSwitch mechanism driver.
 
 
+.. _single-interface-ovs:
+
 Using Neutron with a Single Interface
 =====================================
 
@@ -23,7 +25,9 @@
 In most cases where DevStack is being deployed with a single
 interface, there is a hardware router that is being used for external
 connectivity and DHCP. The developer machine is connected to this
-network and is on a shared subnet with other machines.
+network and is on a shared subnet with other machines.  The
+`local.conf` exhibited here assumes that 1500 is a reasonable MTU to
+use on that network.
 
 .. nwdiag::
 
@@ -59,7 +63,6 @@
         DATABASE_PASSWORD=secrete
         RABBIT_PASSWORD=secrete
         SERVICE_PASSWORD=secrete
-        SERVICE_TOKEN=secrete
 
         # Do not use Nova-Network
         disable_service n-net
@@ -75,6 +78,8 @@
         PUBLIC_NETWORK_GATEWAY="172.18.161.1"
         Q_L3_ENABLED=True
         PUBLIC_INTERFACE=eth0
+
+        # Open vSwitch provider networking configuration
         Q_USE_PROVIDERNET_FOR_PUBLIC=True
         OVS_PHYSICAL_BRIDGE=br-ex
         PUBLIC_BRIDGE=br-ex
@@ -231,7 +236,6 @@
     MYSQL_PASSWORD=secrete
     RABBIT_PASSWORD=secrete
     SERVICE_PASSWORD=secrete
-    SERVICE_TOKEN=secrete
 
     ## Neutron options
     PUBLIC_INTERFACE=eth0
@@ -362,7 +366,6 @@
         MYSQL_PASSWORD=secrete
         RABBIT_PASSWORD=secrete
         SERVICE_PASSWORD=secrete
-        SERVICE_TOKEN=secrete
 
         ## Neutron options
         Q_USE_SECGROUP=True
@@ -410,12 +413,11 @@
         MYSQL_PASSWORD=secrete
         RABBIT_PASSWORD=secrete
         SERVICE_PASSWORD=secrete
-        SERVICE_TOKEN=secrete
 
         # Services that a compute node runs
         ENABLED_SERVICES=n-cpu,rabbit,q-agt
 
-        ## Neutron options
+        ## Open vSwitch provider networking options
         PHYSICAL_NETWORK=default
         OVS_PHYSICAL_BRIDGE=br-ex
         PUBLIC_INTERFACE=eth1
@@ -438,6 +440,16 @@
 Miscellaneous Tips
 ==================
 
+Non-Standard MTU on the Physical Network
+----------------------------------------
+
+DevStack defaults to assume that the MTU on the physical network
+is 1500.  A different MTU can be specified by adding the following to
+the `localrc` part of `local.conf` on each machine.
+
+::
+    Q_ML2_PLUGIN_PATH_MTU=1500
+
 
 Disabling Next Generation Firewall Tools
 ----------------------------------------
@@ -478,3 +490,48 @@
 by default. If you want to remove all the extension drivers (even
 'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank.
 
+
+Using Linux Bridge instead of Open vSwitch
+------------------------------------------
+
+The configuration for using the Linux Bridge ML2 driver is fairly
+straight forward. The Linux Bridge configuration for DevStack is similar
+to the :ref:`Open vSwitch based single interface <single-interface-ovs>`
+setup, with small modifications for the interface mappings.
+
+
+::
+
+    [[local|localrc]]
+    HOST_IP=172.18.161.6
+    SERVICE_HOST=172.18.161.6
+    MYSQL_HOST=172.18.161.6
+    RABBIT_HOST=172.18.161.6
+    GLANCE_HOSTPORT=172.18.161.6:9292
+    ADMIN_PASSWORD=secrete
+    DATABASE_PASSWORD=secrete
+    RABBIT_PASSWORD=secrete
+    SERVICE_PASSWORD=secrete
+
+    # Do not use Nova-Network
+    disable_service n-net
+    # Enable Neutron
+    ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+
+
+    ## Neutron options
+    Q_USE_SECGROUP=True
+    FLOATING_RANGE="172.18.161.0/24"
+    FIXED_RANGE="10.0.0.0/24"
+    Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
+    PUBLIC_NETWORK_GATEWAY="172.18.161.1"
+    Q_L3_ENABLED=True
+    PUBLIC_INTERFACE=eth0
+
+    Q_USE_PROVIDERNET_FOR_PUBLIC=True
+
+    # Linuxbridge Settings
+    Q_AGENT=linuxbridge
+    LB_PHYSICAL_INTERFACE=eth0
+    PUBLIC_PHYSICAL_NETWORK=default
+    LB_INTERFACE_MAPPINGS=default:eth0
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index a01c368..011c41f 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -108,7 +108,6 @@
     DATABASE_PASSWORD=iheartdatabases
     RABBIT_PASSWORD=flopsymopsy
     SERVICE_PASSWORD=iheartksl
-    SERVICE_TOKEN=xyzpdqlazydog
 
 Run DevStack:
 
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index 53c3fa9..45b8f2d 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -67,7 +67,6 @@
             echo DATABASE_PASSWORD=password >> local.conf
             echo RABBIT_PASSWORD=password >> local.conf
             echo SERVICE_PASSWORD=password >> local.conf
-            echo SERVICE_TOKEN=tokentoken >> local.conf
             ./stack.sh
         path: /home/stack/start.sh
         permissions: 0755
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 2622436..4a1d93d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -20,7 +20,7 @@
 
 #. Select a Linux Distribution
 
-   Only Ubuntu 14.04 (Trusty), Fedora 21 (or Fedora 22) and CentOS/RHEL
+   Only Ubuntu 14.04 (Trusty), Fedora 22 (or Fedora 23) and CentOS/RHEL
    7 are documented here. OpenStack also runs and is packaged on other
    flavors of Linux such as OpenSUSE and Debian.
 
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 8396d2f..b96883a 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -82,16 +82,30 @@
 Additional Services
 ===================
 
-+----------------+--------------------------------------------------+------------+
-| Plugin Name    | URL                                              | Comments   |
-|                |                                                  |            |
-+----------------+--------------------------------------------------+------------+
-|ec2-api         |git://git.openstack.org/openstack/ec2-api         |[as1]_      |
-+----------------+--------------------------------------------------+------------+
-|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector|            |
-+----------------+--------------------------------------------------+------------+
-|                |                                                  |            |
-+----------------+--------------------------------------------------+------------+
++-----------------+------------------------------------------------------------+------------+
+| Plugin Name     | URL                                                        | Comments   |
+|                 |                                                            |            |
++-----------------+------------------------------------------------------------+------------+
+|amqp1            |git://git.openstack.org/openstack/devstack-plugin-amqp1     |            |
++-----------------+------------------------------------------------------------+------------+
+|bdd              |git://git.openstack.org/openstack/devstack-plugin-bdd       |            |
++-----------------+------------------------------------------------------------+------------+
+|ec2-api          |git://git.openstack.org/openstack/ec2-api                   |[as1]_      |
++-----------------+------------------------------------------------------------+------------+
+|glusterfs        |git://git.openstack.org/openstack/devstack-plugin-glusterfs |            |
++-----------------+------------------------------------------------------------+------------+
+|hdfs             |git://git.openstack.org/openstack/devstack-plugin-hdfs      |            |
++-----------------+------------------------------------------------------------+------------+
+|ironic-inspector |git://git.openstack.org/openstack/ironic-inspector          |            |
++-----------------+------------------------------------------------------------+------------+
+|pika             |git://git.openstack.org/openstack/devstack-plugin-pika      |            |
++-----------------+------------------------------------------------------------+------------+
+|sheepdog         |git://git.openstack.org/openstack/devstack-plugin-sheepdog  |            |
++-----------------+------------------------------------------------------------+------------+
+|zmq              |git://git.openstack.org/openstack/devstack-plugin-zmq       |            |
++-----------------+------------------------------------------------------------+------------+
+|                 |                                                            |            |
++-----------------+------------------------------------------------------------+------------+
 
 .. [as1] first functional devstack plugin, hence why used in most of
          the examples.
diff --git a/doc/source/stackrc.rst b/doc/source/stackrc.rst
index b21f74f..81d4b80 100644
--- a/doc/source/stackrc.rst
+++ b/doc/source/stackrc.rst
@@ -20,7 +20,7 @@
 
     ::
 
-        ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE
+        ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-cpu,n-net,n-cond,c-sch,c-api,c-vol,n-sch,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE
 
     Other services that are not enabled by default can be enabled in
     ``localrc``. For example, to add Swift, use the following service
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
index 38b901b..cc90128 100644
--- a/extras.d/60-ceph.sh
+++ b/extras.d/60-ceph.sh
@@ -32,7 +32,7 @@
             echo_summary "Configuring Cinder for Ceph"
             configure_ceph_cinder
         fi
-        if is_service_enabled cinder || is_service_enabled nova; then
+        if is_service_enabled n-cpu; then
             # NOTE (leseb): the part below is a requirement to attach Ceph block devices
             echo_summary "Configuring libvirt secret"
             import_libvirt_secret_ceph
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 74f4c60..5e8da99 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -13,7 +13,6 @@
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
-        init_tempest
     elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
         # local.conf Tempest option overrides
         :
diff --git a/files/debs/keystone b/files/debs/keystone
index 370e4aa..fd0317b 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -1,5 +1,6 @@
 libkrb5-dev
 libldap2-dev
 libsasl2-dev
+memcached
 python-mysqldb
 sqlite3
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
index 46832c7..66cfc23 100644
--- a/files/rpms-suse/keystone
+++ b/files/rpms-suse/keystone
@@ -1,3 +1,4 @@
 cyrus-sasl-devel
+memcached
 openldap2-devel
 sqlite3
diff --git a/files/rpms/general b/files/rpms/general
index 2804682..5bc87b6 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -8,9 +8,9 @@
 gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
-iptables-services  # NOPRIME f21,f22,f23
+iptables-services  # NOPRIME f22,f23
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f21,f22,f23
+java-1.8.0-openjdk-headless  # NOPRIME f22,f23
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
@@ -26,7 +26,7 @@
 psmisc
 pyOpenSSL # version in pip uses too much memory
 python-devel
-redhat-rpm-config # MySQL-python rhbz-1195207 f21
+redhat-rpm-config # MySQL-python rhbz-1195207
 screen
 tar
 tcpdump
diff --git a/files/rpms/keystone b/files/rpms/keystone
index c01c261..1703083 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,3 +1,4 @@
+memcached
 mod_ssl
 MySQL-python
 sqlite
diff --git a/files/rpms/nova b/files/rpms/nova
index 4db9a06..0312e85 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,7 +7,7 @@
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f21,f22,f23
+kernel-modules # dist:f22,f23
 kpartx
 kvm # NOPRIME
 libvirt-bin # NOPRIME
diff --git a/functions b/functions
index 762fc47..9495710 100644
--- a/functions
+++ b/functions
@@ -357,7 +357,9 @@
 function wait_for_service {
     local timeout=$1
     local url=$2
+    time_start "wait_for_service"
     timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+    time_stop "wait_for_service"
 }
 
 
diff --git a/functions-common b/functions-common
index 4a9db34..12c925b 100644
--- a/functions-common
+++ b/functions-common
@@ -106,6 +106,9 @@
         --os-username admin \
         --os-password $ADMIN_PASSWORD \
         --os-project-name admin
+
+    # CLean up any old clouds.yaml files we had laying around
+    rm -f ~$STACK_USER/.config/openstack/clouds.yaml
 }
 
 # trueorfalse <True|False> <VAR>
@@ -410,6 +413,8 @@
         DISTRO="rhel${os_RELEASE::1}"
     elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
         DISTRO="xs$os_RELEASE"
+    elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then
+        DISTRO="${os_VENDOR}${os_RELEASE::1}"
     else
         # Catch-all for now is Vendor + Release + Update
         DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
@@ -444,7 +449,7 @@
 
     [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
         [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] || \
-        [ "$os_VENDOR" = "CloudLinux" ]
+        [ "$os_VENDOR" = "CloudLinux" ] || [ "$os_VENDOR" = "kvmibm" ]
 }
 
 
@@ -597,6 +602,7 @@
         timeout=${GIT_TIMEOUT}
     fi
 
+    time_start "git_timed"
     until timeout -s SIGINT ${timeout} git "$@"; do
         # 124 is timeout(1)'s special return code when it reached the
         # timeout; otherwise assume fatal failure
@@ -611,6 +617,7 @@
         fi
         sleep 5
     done
+    time_stop "git_timed"
 }
 
 # git update using reference as a branch.
@@ -892,6 +899,38 @@
     echo $user_role_id
 }
 
+# Gets or adds user role to domain
+# Usage: get_or_add_user_domain_role <role> <user> <domain>
+function get_or_add_user_domain_role {
+    local user_role_id
+    # Gets user role id
+    user_role_id=$(openstack role list \
+        --user $2 \
+        --os-url=$KEYSTONE_SERVICE_URI_V3 \
+        --os-identity-api-version=3 \
+        --column "ID" \
+        --domain $3 \
+        --column "Name" \
+        | grep " $1 " | get_field 1)
+    if [[ -z "$user_role_id" ]]; then
+        # Adds role to user and get it
+        openstack role add $1 \
+            --user $2 \
+            --domain $3 \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3
+        user_role_id=$(openstack role list \
+            --user $2 \
+            --os-url=$KEYSTONE_SERVICE_URI_V3 \
+            --os-identity-api-version=3 \
+            --column "ID" \
+            --domain $3 \
+            --column "Name" \
+            | grep " $1 " | get_field 1)
+    fi
+    echo $user_role_id
+}
+
 # Gets or adds group role to project
 # Usage: get_or_add_group_project_role <role> <group> <project>
 function get_or_add_group_project_role {
@@ -975,8 +1014,6 @@
 function get_endpoint_url {
     echo $(openstack endpoint list \
             --service $1 --interface $2 \
-            --os-url $KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             -c URL -f value)
 }
 
@@ -1045,7 +1082,7 @@
 # Uses globals ``OFFLINE``, ``*_proxy``
 # apt_get operation package [package ...]
 function apt_get {
-    local xtrace
+    local xtrace result
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
@@ -1061,10 +1098,12 @@
     $sudo DEBIAN_FRONTEND=noninteractive \
         http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
         no_proxy=${no_proxy:-} \
-        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" < /dev/null
+    result=$?
 
     # stop the clock
     time_stop "apt-get"
+    return $result
 }
 
 function _parse_package_files {
@@ -1241,7 +1280,9 @@
 # install_package package [package ...]
 function install_package {
     update_package_repo
-    real_install_package $@ || RETRY_UPDATE=True update_package_repo && real_install_package $@
+    if ! real_install_package "$@"; then
+        RETRY_UPDATE=True update_package_repo && real_install_package "$@"
+    fi
 }
 
 # Distro-agnostic function to tell if a package is installed
@@ -1344,10 +1385,11 @@
     exec 3>&-
     exec 6>&-
 
-    local real_logfile="${LOGDIR}/${service}.log.${CURRENT_LOG_TIME}"
+    local logfile="${service}.log.${CURRENT_LOG_TIME}"
+    local real_logfile="${LOGDIR}/${logfile}"
     if [[ -n ${LOGDIR} ]]; then
         exec 1>&"$real_logfile" 2>&1
-        ln -sf "$real_logfile" ${LOGDIR}/${service}.log
+        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
         if [[ -n ${SCREEN_LOGDIR} ]]; then
             # Drop the backward-compat symlink
             ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
@@ -1408,6 +1450,7 @@
     local command="$2"
     local group=$3
 
+    time_start "run_process"
     if is_service_enabled $service; then
         if [[ "$USE_SCREEN" = "True" ]]; then
             screen_process "$service" "$command" "$group"
@@ -1416,6 +1459,7 @@
             _run_process "$service" "$command" "$group" &
         fi
     fi
+    time_stop "run_process"
 }
 
 # Helper to launch a process in a named screen
@@ -1434,7 +1478,8 @@
 
     screen -S $SCREEN_NAME -X screen -t $name
 
-    local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}"
+    local logfile="${name}.log.${CURRENT_LOG_TIME}"
+    local real_logfile="${LOGDIR}/${logfile}"
     echo "LOGDIR: $LOGDIR"
     echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
     echo "log: $real_logfile"
@@ -1445,7 +1490,7 @@
         fi
         # If logging isn't active then avoid a broken symlink
         touch "$real_logfile"
-        ln -sf "$real_logfile" ${LOGDIR}/${name}.log
+        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log"
         if [[ -n ${SCREEN_LOGDIR} ]]; then
             # Drop the backward-compat symlink
             ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
@@ -2230,9 +2275,11 @@
     local until=${3:-10}
     local sleep=${4:-0.5}
 
+    time_start "test_with_retry"
     if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then
         die $LINENO "$failmsg"
     fi
+    time_stop "test_with_retry"
 }
 
 # Timing infrastructure - figure out where large blocks of time are
diff --git a/inc/ini-config b/inc/ini-config
index d2830d7..e99b088 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -261,6 +261,18 @@
     $xtrace
 }
 
+# Get list of sections from an INI file
+# iniget_sections config-file
+function iniget_sections {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+
+    echo $(sed -ne "s/^\[\(.*\)\]/\1/p" "$file")
+    $xtrace
+}
+
 # Restore xtrace
 $INC_CONF_TRACE
 
diff --git a/inc/meta-config b/inc/meta-config
index b6fe437..6eb7a00 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -195,6 +195,25 @@
     done
 }
 
+function extract_localrc_section {
+    local configfile=$1    # top_dir/local.conf
+    local localrcfile=$2   # top_dir/localrc
+    local localautofile=$3 # top_dir/.localrc.auto
+
+    if [[ -r $configfile ]]; then
+        LRC=$(get_meta_section_files $configfile local)
+        for lfile in $LRC; do
+            if [[ "$lfile" == "localrc" ]]; then
+                if [[ -r $localrcfile ]]; then
+                    echo "localrc and local.conf:[[local]] both exist, using localrc"
+                else
+                    echo "# Generated file, do not edit" >$localautofile
+                    get_meta_section $configfile local $lfile >>$localautofile
+                fi
+            fi
+        done
+    fi
+}
 
 # Restore xtrace
 $_XTRACE_INC_META
diff --git a/inc/python b/inc/python
index c157604..35bab6f 100644
--- a/inc/python
+++ b/inc/python
@@ -81,7 +81,7 @@
 # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
 # pip_install package [package ...]
 function pip_install {
-    local xtrace
+    local xtrace result
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local upgrade=""
@@ -155,10 +155,11 @@
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
         $cmd_pip $upgrade \
         $@
+    result=$?
 
     # Also install test requirements
     local test_req="${!#}/test-requirements.txt"
-    if [[ -e "$test_req" ]]; then
+    if [[ $result == 0 ]] && [[ -e "$test_req" ]]; then
         echo "Installing test-requirements for $test_req"
         $sudo_pip \
             http_proxy=${http_proxy:-} \
@@ -167,9 +168,11 @@
             PIP_FIND_LINKS=$PIP_FIND_LINKS \
             $cmd_pip $upgrade \
             -r $test_req
+        result=$?
     fi
 
     time_stop "pip_install"
+    return $result
 }
 
 # get version of a package from global requirements file
@@ -239,15 +242,31 @@
 
 # this should be used if you want to install globally, all libraries should
 # use this, especially *oslo* ones
+#
+# setup_install project_dir [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <project_dir>[<extras>]"
 function setup_install {
     local project_dir=$1
-    setup_package_with_constraints_edit $project_dir
+    local extras=$2
+    _setup_package_with_constraints_edit $project_dir "" $extras
 }
 
 # this should be used for projects which run services, like all services
+#
+# setup_develop project_dir [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install -e <project_dir>[<extras>]"
 function setup_develop {
     local project_dir=$1
-    setup_package_with_constraints_edit $project_dir -e
+    local extras=$2
+    _setup_package_with_constraints_edit $project_dir -e $extras
 }
 
 # determine if a project as specified by directory is in
@@ -269,10 +288,17 @@
 # install this package we get the from source version.
 #
 # Uses globals ``REQUIREMENTS_DIR``
-# setup_develop directory
-function setup_package_with_constraints_edit {
+# _setup_package_with_constraints_edit project_dir flags [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# flags: pip CLI options/flags
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <flags> <project_dir>[<extras>]"
+function _setup_package_with_constraints_edit {
     local project_dir=$1
     local flags=$2
+    local extras=$3
 
     if [ -n "$REQUIREMENTS_DIR" ]; then
         # Constrain this package to this project directory from here on out.
@@ -283,19 +309,38 @@
             "$flags file://$project_dir#egg=$name"
     fi
 
-    setup_package $project_dir $flags
+    setup_package $project_dir "$flags" $extras
 
 }
 
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
+#
 # Uses globals ``STACK_USER``
-# setup_develop_no_requirements_update directory
+# setup_package project_dir [flags] [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# flags: pip CLI options/flags
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <flags> <project_dir>[<extras>]"
 function setup_package {
     local project_dir=$1
     local flags=$2
+    local extras=$3
 
-    pip_install $flags $project_dir
+    # if the flags variable exists, and it doesn't look like a flag,
+    # assume it's actually the extras list.
+    if [[ -n "$flags" && -z "$extras" && ! "$flags" =~ ^-.* ]]; then
+        extras=$flags
+        flags=""
+    fi
+
+    if [[ ! -z "$extras" ]]; then
+        extras="[$extras]"
+    fi
+
+    pip_install $flags "$project_dir$extras"
     # ensure that further actions can do things like setup.py sdist
     if [[ "$flags" == "-e" ]]; then
         safe_chown -R $STACK_USER $1/*.egg-info
diff --git a/lib/apache b/lib/apache
index c9e02a2..2c84c7a 100644
--- a/lib/apache
+++ b/lib/apache
@@ -185,9 +185,11 @@
     # Apache can be slow to stop, doing an explicit stop, sleep, start helps
     # to mitigate issues where apache will claim a port it's listening on is
     # still in use and fail to start.
+    time_start "restart_apache_server"
     stop_service $APACHE_NAME
     sleep 3
     start_service $APACHE_NAME
+    time_stop "restart_apache_server"
 }
 
 # Restore xtrace
diff --git a/lib/ceph b/lib/ceph
index 4ac498a..3e0839a 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -116,7 +116,7 @@
 
 # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
 function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f21|f22|f23) ]]; then
+    if [[ ! ${DISTRO} =~ (trusty|f22|f23) ]]; then
         echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
         if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
             die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
diff --git a/lib/cinder b/lib/cinder
index 5bd940b..3aea050 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -259,7 +259,6 @@
 
     iniset $CINDER_CONF DEFAULT auth_strategy keystone
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $CINDER_CONF DEFAULT verbose True
 
     iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER"
     iniset $CINDER_CONF database connection `database_connection_url cinder`
@@ -299,7 +298,7 @@
     fi
 
     if is_service_enabled ceilometer; then
-        iniset $CINDER_CONF DEFAULT notification_driver "messaging"
+        iniset $CINDER_CONF oslo_messaging_notifications driver "messaging"
     fi
 
     if is_service_enabled tls-proxy; then
@@ -467,6 +466,8 @@
 function _configure_tgt_for_config_d {
     if [[ ! -d /etc/tgt/stack.d/ ]]; then
         sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d
+    fi
+    if ! grep -q "include /etc/tgt/stack.d/*" /etc/tgt/targets.conf; then
         echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf
     fi
 }
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index c21350b..9bff5be 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -62,7 +62,7 @@
                 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
             fi
         fi
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
         iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 1bbbd62..f6cc922 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -154,6 +154,7 @@
         fi
     elif is_fedora; then
         install_package mariadb-server
+        sudo systemctl enable mariadb
     elif is_ubuntu; then
         install_package mysql-server
     else
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 913e8ff..204c257 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -101,6 +101,9 @@
         install_package postgresql
     elif is_fedora || is_suse; then
         install_package postgresql-server
+        if is_fedora; then
+            sudo systemctl enable postgresql-server
+        fi
     else
         exit_distro_not_supported "postgresql installation"
     fi
diff --git a/lib/glance b/lib/glance
index 19e7937..0431bba 100644
--- a/lib/glance
+++ b/lib/glance
@@ -113,7 +113,7 @@
     iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
     configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
-    iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
+    iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messaging
     iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
@@ -126,7 +126,7 @@
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
     configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
-    iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
+    iniset $GLANCE_API_CONF oslo_messaging_notifications driver messaging
     iniset_rpc_backend glance $GLANCE_API_CONF
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
diff --git a/lib/horizon b/lib/horizon
index 67181fc..dca3111 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -83,10 +83,7 @@
     # Message catalog compilation is handled by Django admin script,
     # so compiling them after the installation avoids Django installation twice.
     (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages)
-}
 
-# init_horizon() - Initialize databases, etc.
-function init_horizon {
     # ``local_settings.py`` is used to override horizon default settings.
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
@@ -113,6 +110,7 @@
     horizon_conf=$(apache_site_config_for horizon)
 
     # Configure apache to run horizon
+    # Set up the django horizon application to serve via apache/wsgi
     sudo sh -c "sed -e \"
         s,%USER%,$APACHE_USER,g;
         s,%GROUP%,$APACHE_GROUP,g;
@@ -133,7 +131,10 @@
         exit_distro_not_supported "horizon apache configuration"
     fi
     enable_apache_site horizon
+}
 
+# init_horizon() - Initialize databases, etc.
+function init_horizon {
     # Remove old log files that could mess with how DevStack detects whether Horizon
     # has been successfully started (see start_horizon() and functions::screen_it())
     # and run_process
@@ -147,6 +148,7 @@
         django_admin=django-admin.py
     fi
 
+    # These need to be run after horizon plugins are configured.
     DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin collectstatic --noinput
     DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin compress --force
 
diff --git a/lib/keystone b/lib/keystone
index b4b7df9..d60a4ba 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -12,7 +12,6 @@
 # - ``IDENTITY_API_VERSION``
 # - ``BASE_SQL_CONN``
 # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
-# - ``SERVICE_TOKEN``
 # - ``S3_SERVICE_PORT`` (template backend only)
 
 # ``stack.sh`` calls the entry points in this order:
@@ -22,6 +21,7 @@
 # - _config_keystone_apache_wsgi
 # - init_keystone
 # - start_keystone
+# - bootstrap_keystone
 # - create_keystone_accounts
 # - stop_keystone
 # - cleanup_keystone
@@ -68,6 +68,12 @@
 # Select the Assignment backend driver
 KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql}
 
+# Select the Role backend driver
+KEYSTONE_ROLE_BACKEND=${KEYSTONE_ROLE_BACKEND:-sql}
+
+# Select the Resource backend driver
+KEYSTONE_RESOURCE_BACKEND=${KEYSTONE_RESOURCE_BACKEND:-sql}
+
 # Select Keystone's token provider (and format)
 # Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
 KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
@@ -209,6 +215,16 @@
 
     iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND"
     iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND"
+    iniset $KEYSTONE_CONF role driver "$KEYSTONE_ROLE_BACKEND"
+    iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND"
+
+    # Enable caching
+    iniset $KEYSTONE_CONF cache enabled "True"
+    iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool"
+    iniset $KEYSTONE_CONF cache memcache_servers $SERVICE_HOST:11211
+
+    # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617
+    iniset $KEYSTONE_CONF catalog caching "False"
 
     iniset_rpc_backend keystone $KEYSTONE_CONF
 
@@ -230,8 +246,6 @@
         iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
     fi
 
-    iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
-
     if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
         iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
     fi
@@ -324,14 +338,17 @@
 # Migrated from keystone_data.sh
 function create_keystone_accounts {
 
-    # admin
+    # The keystone bootstrapping process (performed via keystone-manage bootstrap)
+    # creates an admin user, admin role and admin project. As a sanity check
+    # we exercise the CLI to retrieve the IDs for these values.
     local admin_tenant
-    admin_tenant=$(get_or_create_project "admin" default)
+    admin_tenant=$(openstack project show "admin" -f value -c id)
     local admin_user
-    admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
+    admin_user=$(openstack user show "admin" -f value -c id)
     local admin_role
-    admin_role=$(get_or_create_role "admin")
-    get_or_add_user_project_role $admin_role $admin_user $admin_tenant
+    admin_role=$(openstack role show "admin" -f value -c id)
+
+    get_or_add_user_domain_role $admin_role $admin_user default
 
     # Create service project/role
     get_or_create_project "$SERVICE_TENANT_NAME" default
@@ -380,17 +397,6 @@
     get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
     get_or_add_group_project_role $another_role $non_admin_group $demo_tenant
     get_or_add_group_project_role $admin_role $admin_group $admin_tenant
-
-    # Keystone
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-        get_or_create_service "keystone" "identity" "Keystone Identity Service"
-        get_or_create_endpoint "identity" \
-            "$REGION_NAME" \
-            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
-            "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \
-            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
-    fi
 }
 
 # Create a user that is capable of verifying keystone tokens for use with auth_token middleware.
@@ -421,7 +427,7 @@
     local signing_dir=$3
     local section=${4:-keystone_authtoken}
 
-    iniset $conf_file $section auth_plugin password
+    iniset $conf_file $section auth_type password
     iniset $conf_file $section auth_url $KEYSTONE_AUTH_URI
     iniset $conf_file $section username $admin_user
     iniset $conf_file $section password $SERVICE_PASSWORD
@@ -432,6 +438,7 @@
     iniset $conf_file $section auth_uri $KEYSTONE_SERVICE_URI
     iniset $conf_file $section cafile $SSL_BUNDLE_FILE
     iniset $conf_file $section signing_dir $signing_dir
+    iniset $conf_file $section memcache_servers $SERVICE_HOST:11211
 }
 
 # init_keystone() - Initialize databases, etc.
@@ -485,6 +492,9 @@
         # When not installing from repo, keystonemiddleware is still needed...
         pip_install_gr keystonemiddleware
     fi
+    # Install the memcache library so keystonemiddleware can cache tokens in a
+    # shared location.
+    pip_install_gr python-memcached
 }
 
 # install_keystone() - Collect source and prepare
@@ -493,19 +503,14 @@
     if is_service_enabled ldap; then
         install_ldap
     fi
-    if [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
-        # Install memcached and the memcache Python library that keystone uses.
-        # Unfortunately the Python library goes by different names in the .deb
-        # and .rpm circles.
-        install_package memcached
-        if is_ubuntu; then
-            install_package python-memcache
-        else
-            install_package python-memcached
-        fi
-    fi
+
     git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
     setup_develop $KEYSTONE_DIR
+
+    if is_service_enabled ldap; then
+        setup_develop $KEYSTONE_DIR ldap
+    fi
+
     if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
         install_apache_wsgi
         if is_ssl_enabled_service "key"; then
@@ -547,6 +552,9 @@
         start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT &
         start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT &
     fi
+
+    # (re)start memcached to make sure we have a clean memcache.
+    restart_service memcached
 }
 
 # stop_keystone() - Stop running processes
@@ -559,6 +567,55 @@
     stop_process key
 }
 
+# bootstrap_keystone() - Initialize user, role and project
+# This function uses the following GLOBAL variables:
+# - ``KEYSTONE_BIN_DIR``
+# - ``ADMIN_PASSWORD``
+# - ``IDENTITY_API_VERSION``
+# - ``KEYSTONE_CATALOG_BACKEND``
+# - ``KEYSTONE_AUTH_URI``
+# - ``REGION_NAME``
+# - ``KEYSTONE_SERVICE_PROTOCOL``
+# - ``KEYSTONE_SERVICE_HOST``
+# - ``KEYSTONE_SERVICE_PORT``
+function bootstrap_keystone {
+
+    # Initialize keystone, this will create an 'admin' user, 'admin' project,
+    # 'admin' role, and assign the user the role on the project. These resources
+    # are created only if they do not already exist.
+    $KEYSTONE_BIN_DIR/keystone-manage bootstrap --bootstrap-password $ADMIN_PASSWORD
+
+    # Create the keystone service and endpoints. To do this with the new
+    # bootstrapping process, we need to get a token and use that token to
+    # interact with the new APIs. The token will only be used to create services
+    # and endpoints, thus creating a minimal service catalog.
+    # They are unset immediately after.
+    # TODO(stevemar): OpenStackClient and KeystoneClient do not have support to
+    # handle interactions that not return service catalogs. Eventually remove
+    # this section when the support is in place. Use token based auth for now.
+    local token_id
+    token_id=$(openstack token issue -c id -f value \
+        --os-username admin --os-project-name admin \
+        --os-user-domain-id default --os-project-domain-id default \
+        --os-identity-api-version 3 --os-auth-url $KEYSTONE_AUTH_URI \
+        --os-password $ADMIN_PASSWORD)
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+
+        export OS_TOKEN=$token_id
+        export OS_URL=$KEYSTONE_AUTH_URI/v3
+        export OS_IDENTITY_API_VERSION=3
+
+        get_or_create_service "keystone" "identity" "Keystone Identity Service"
+        get_or_create_endpoint "identity" \
+            "$REGION_NAME" \
+            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
+            "$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION" \
+            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
+    fi
+
+    unset OS_TOKEN OS_URL OS_IDENTITY_API_VERSION
+}
 
 # Restore xtrace
 $_XTRACE_KEYSTONE
diff --git a/lib/ldap b/lib/ldap
index 65056ae..4cea812 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -145,8 +145,6 @@
         sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
     fi
 
-    pip_install_gr ldappool
-
     rm -rf $tmp_ldap_dir
 }
 
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index cc5b75e..78eb55d 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -802,7 +802,8 @@
     local from_intf=$1
     local to_intf=$2
     local add_ovs_port=$3
-    local af=$4
+    local del_ovs_port=$4
+    local af=$5
 
     if [[ -n "$from_intf" && -n "$to_intf" ]]; then
         # Remove the primary IP address from $from_intf and add it to $to_intf,
@@ -816,6 +817,7 @@
         local DEFAULT_ROUTE_GW
         DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }")
         local ADD_OVS_PORT=""
+        local DEL_OVS_PORT=""
 
         IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
 
@@ -827,13 +829,19 @@
             ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
         fi
 
+        if [[ "$del_ovs_port" == "True" ]]; then
+            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
+        fi
+
         if [[ "$IP_BRD" != "" ]]; then
             IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
             IP_ADD="sudo ip addr add $IP_BRD dev $to_intf"
             IP_UP="sudo ip link set $to_intf up"
         fi
 
-        $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
+        # The add/del OVS port calls have to happen either before or
+        # after the address is moved in order to not leave it orphaned.
+        $DEL_OVS_PORT; $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
     fi
 }
 
@@ -842,14 +850,14 @@
 function cleanup_neutron {
 
     if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
-        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
 
         if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
             # ip(8) wants the prefix length when deleting
             local v6_gateway
             v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
             sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
-            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
+            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
         fi
 
         if is_provider_network && is_ironic_hardware; then
@@ -955,7 +963,7 @@
         setup_colorized_logging $NEUTRON_CONF DEFAULT project_id
     else
         # Show user_name and project_name by default like in nova
-        iniset $NEUTRON_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
+        iniset $NEUTRON_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s"
     fi
 
     if is_service_enabled tls-proxy; then
@@ -985,7 +993,6 @@
 
     cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_TEST_CONFIG_FILE
 
-    iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
     iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
@@ -1001,7 +1008,6 @@
 
     cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
 
-    iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
     iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
@@ -1033,7 +1039,6 @@
 
     cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE
 
-    iniset $Q_L3_CONF_FILE DEFAULT verbose True
     iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
@@ -1044,32 +1049,26 @@
 
     neutron_plugin_configure_l3_agent
 
-    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
+    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
 
     if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6"
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
     fi
 }
 
 function _configure_neutron_metadata_agent {
     cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
 
-    iniset $Q_META_CONF_FILE DEFAULT verbose True
     iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
     iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
     fi
-
-    # Configures keystone for metadata_agent
-    # The third argument "True" sets auth_url needed to communicate with keystone
-    _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True
-
 }
 
 function _configure_neutron_ceilometer_notifications {
-    iniset $NEUTRON_CONF DEFAULT notification_driver messaging
+    iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging
 }
 
 function _configure_neutron_lbaas {
@@ -1123,7 +1122,6 @@
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE  agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
     fi
-    iniset $NEUTRON_CONF DEFAULT verbose True
     iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
     # Configure agent for plugin
@@ -1143,7 +1141,6 @@
         iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
     fi
 
-    iniset $NEUTRON_CONF DEFAULT verbose True
     iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
     iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
@@ -1233,17 +1230,10 @@
     fi
 }
 
-# Configures keystone integration for neutron service and agents
+# Configures keystone integration for neutron service
 function _neutron_setup_keystone {
     local conf_file=$1
     local section=$2
-    local use_auth_url=$3
-
-    # Configures keystone for metadata_agent
-    # metadata_agent needs auth_url to communicate with keystone
-    if [[ "$use_auth_url" == "True" ]]; then
-        iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0
-    fi
 
     create_neutron_cache_dir
     configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 30e1b03..0483ef1 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -40,6 +40,12 @@
 # L3 Plugin to load for ML2
 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
 
+# Underlying path MTU for physical network managing br-tun; use '-' instead of
+# ':-' to allow people to explicitly override this to blank, to disable
+# automatic MTU calculation for tunnelled tenant networks
+Q_ML2_PLUGIN_PATH_MTU=${Q_ML2_PLUGIN_PATH_MTU-1500}
+
+
 function populate_ml2_config {
     CONF=$1
     SECTION=$2
@@ -77,6 +83,12 @@
         echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts."
     fi
 
+    # Enable ml2 mtu calculation mechanism for networks by providing path mtu
+    # value for physical devices that are used for br-tun traffic
+    if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]] && [[ "$Q_ML2_PLUGIN_PATH_MTU" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ml2 path_mtu "$Q_ML2_PLUGIN_PATH_MTU"
+    fi
+
     # Allow for overrding VLAN configuration (for example, to configure provider
     # VLANs) by first checking if Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is set.
     if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
deleted file mode 100644
index 9ea7338..0000000
--- a/lib/neutron_plugins/nec
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# This file is needed so Q_PLUGIN=nec will work.
-
-# FIXME(amotoki): This function should not be here, but unfortunately
-# devstack calls it before the external plugins are fetched
-function has_neutron_plugin_security_group {
-    # 0 means True here
-    return 0
-}
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 91aff33..59c7737 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -67,6 +67,7 @@
         restart_service openvswitch-switch
     elif is_fedora; then
         restart_service openvswitch
+        sudo systemctl enable openvswitch
     elif is_suse; then
         restart_service openvswitch-switch
     fi
diff --git a/lib/nova b/lib/nova
index c75623f..79bef9b 100644
--- a/lib/nova
+++ b/lib/nova
@@ -98,6 +98,10 @@
 # should work in most cases.
 SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 
+# The following FILTERS contains SameHostFilter and DifferentHostFilter with
+# the default filters.
+FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+
 QEMU_CONF=/etc/libvirt/qemu.conf
 
 # Set default defaults here as some hypervisor drivers override these
@@ -446,7 +450,7 @@
     fi
 
     # S3
-    if is_service_enabled n-obj swift3; then
+    if is_service_enabled swift3; then
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
             get_or_create_service "s3" "s3" "S3"
@@ -473,7 +477,7 @@
     iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
-    iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
+    iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS"
     iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
@@ -542,7 +546,7 @@
         setup_colorized_logging $NOVA_CONF DEFAULT
     else
         # Show user_name and project_name instead of user_id and project_id
-        iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
+        iniset $NOVA_CONF DEFAULT logging_user_identity_format "%(user_name)s %(project_name)s"
     fi
     if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
         _config_nova_apache_wsgi
@@ -552,16 +556,16 @@
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
         iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
-        iniset $NOVA_CONF DEFAULT notification_driver "messaging"
+        iniset $NOVA_CONF oslo_messaging_notifications driver "messaging"
     fi
 
     # All nova-compute workers need to know the vnc configuration options
     # These settings don't hurt anything if n-xvnc and n-novnc are disabled
     if is_service_enabled n-cpu; then
         NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
-        iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
+        iniset $NOVA_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
         XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
-        iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
+        iniset $NOVA_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
         SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
         iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
     fi
@@ -571,13 +575,13 @@
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF DEFAULT vnc_enabled true
-        iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
-        iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+        iniset $NOVA_CONF vnc enabled true
+        iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN"
+        iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF DEFAULT novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $NOVA_CONF DEFAULT xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
     else
-        iniset $NOVA_CONF DEFAULT vnc_enabled false
+        iniset $NOVA_CONF vnc enabled false
     fi
 
     if is_service_enabled n-spice; then
@@ -628,6 +632,23 @@
         iniset $NOVA_CONF serial_console enabled True
     fi
     iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
+
+    # Setup logging for nova-dhcpbridge command line
+    sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
+
+    local service="n-dhcp"
+    local logfile="${service}.log.${CURRENT_LOG_TIME}"
+    local real_logfile="${LOGDIR}/${logfile}"
+    if [[ -n ${LOGDIR} ]]; then
+        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
+        iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            # Drop the backward-compat symlink
+            ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
+        fi
+    fi
+
+    iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
 }
 
 function init_nova_cells {
@@ -668,6 +689,8 @@
     iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
     iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
+    # force down dhcp leases to 5 minutes, which lets us expire faster
+    iniset $NOVA_CONF DEFAULT dhcp_lease_time 300
     if [ -n "$FLAT_INTERFACE" ]; then
         iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
     fi
@@ -864,11 +887,6 @@
     run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
     run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
 
-    # Starting the nova-objectstore only if swift3 service is not enabled.
-    # Swift will act as s3 objectstore.
-    is_service_enabled swift3 || \
-        run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
-
     export PATH=$old_path
 }
 
@@ -902,7 +920,7 @@
     # Kill the nova screen windows
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
-    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj n-sproxy; do
+    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-sproxy; do
         stop_process $serv
     done
 }
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index dae55c6..dbb4d4f 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -38,7 +38,10 @@
         fi
         #pip_install_gr <there-si-no-guestfs-in-pypi>
     elif is_fedora || is_suse; then
-        install_package kvm
+        # On "KVM for IBM z Systems", kvm does not have its own package
+        if [[ ! ${DISTRO} =~ "kvmibm1" ]]; then
+            install_package kvm
+        fi
         # there is a dependency issue with kvm (which is really just a
         # wrapper to qemu-system-x86) that leaves some bios files out,
         # so install qemu-kvm (which shouldn't strictly be needed, as
@@ -110,10 +113,10 @@
             local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor"
         fi
         local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+        if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
             echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
         fi
-        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+        if ! sudo grep -q "^log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
             echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
         fi
     fi
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 8bbaa21..1b4f7ae 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -48,13 +48,13 @@
     iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
     # Power architecture currently does not support graphical consoles.
     if is_arch "ppc64"; then
-        iniset $NOVA_CONF DEFAULT vnc_enabled "false"
+        iniset $NOVA_CONF vnc enabled "false"
     fi
 
     # arm64-specific configuration
     if is_arch "aarch64"; then
         # arm64 architecture currently does not support graphical consoles.
-        iniset $NOVA_CONF DEFAULT vnc_enabled "false"
+        iniset $NOVA_CONF vnc enabled "false"
     fi
 
     # File injection is being disabled by default in the near future -
@@ -65,9 +65,9 @@
         iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
         iniset $NOVA_CONF libvirt images_type "ploop"
         iniset $NOVA_CONF DEFAULT force_raw_images  "False"
-        iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address  $HOST_IP
-        iniset $NOVA_CONF DEFAULT vncserver_listen $HOST_IP
-        iniset $NOVA_CONF DEFAULT vnc_keymap
+        iniset $NOVA_CONF vnc vncserver_proxyclient_address  $HOST_IP
+        iniset $NOVA_CONF vnc vncserver_listen $HOST_IP
+        iniset $NOVA_CONF vnc keymap
     fi
 }
 
diff --git a/lib/oslo b/lib/oslo
index 3d6fbb3..6f5c7d1 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -44,6 +44,7 @@
 GITDIR["oslo.utils"]=$DEST/oslo.utils
 GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects
 GITDIR["oslo.vmware"]=$DEST/oslo.vmware
+GITDIR["osprofiler"]=$DEST/osprofiler
 GITDIR["pycadf"]=$DEST/pycadf
 GITDIR["stevedore"]=$DEST/stevedore
 GITDIR["taskflow"]=$DEST/taskflow
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3864ade..05e303e 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -47,6 +47,9 @@
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         install_package rabbitmq-server
+        if is_fedora; then
+            sudo systemctl enable rabbitmq-server
+        fi
     fi
 }
 
diff --git a/lib/swift b/lib/swift
index b596142..9edeb0a 100644
--- a/lib/swift
+++ b/lib/swift
@@ -817,7 +817,6 @@
     OS_USERNAME=swift \
     OS_PASSWORD=$SERVICE_PASSWORD \
     OS_PROJECT_NAME=$SERVICE_TENANT_NAME \
-    OS_AUTH_URL=$SERVICE_ENDPOINT \
     openstack object store account \
         set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
 }
diff --git a/lib/tempest b/lib/tempest
index c510984..5c771f9 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -35,7 +35,6 @@
 #
 # - install_tempest
 # - configure_tempest
-# - init_tempest
 
 # Save trace setting
 _XTRACE_TEMPEST=$(set +o | grep xtrace)
@@ -67,9 +66,6 @@
 # have tempest installed in DevStack by default.
 INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"}
 
-BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
-BOTO_CONF=/etc/boto.cfg
-
 # Cinder/Volume variables
 TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default}
 TEMPEST_DEFAULT_VOLUME_VENDOR="Open Source"
@@ -131,8 +127,7 @@
     local flavor_lines
     local public_network_id
     local public_router_id
-    local boto_instance_type="m1.tiny"
-    local ssh_connect_method="fixed"
+    local ssh_connect_method="floating"
 
     # Save IFS
     ifs=$IFS
@@ -202,14 +197,12 @@
                 nova flavor-create m1.nano 42 64 0 1
             fi
             flavor_ref=42
-            boto_instance_type=m1.nano
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
                 nova flavor-create m1.micro 84 128 0 1
             fi
             flavor_ref_alt=84
         else
             # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it.
-            boto_instance_type=$DEFAULT_INSTANCE_TYPE
             IFS=$'\r\n'
             flavors=""
             for line in $available_flavors; do
@@ -243,10 +236,6 @@
         fi
     fi
 
-    if ! is_service_enabled n-net; then
-        ssh_connect_method="floating"
-    fi
-
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
 
     if [ "$Q_L3_ENABLED" = "True" ]; then
@@ -254,15 +243,6 @@
             awk '{print $2}')
     fi
 
-    EC2_URL=$(get_endpoint_url ec2 public || true)
-    if [[ -z $EC2_URL ]]; then
-        EC2_URL="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
-    fi
-    S3_URL=$(get_endpoint_url s3 public || true)
-    if [[ -z $S3_URL ]]; then
-        S3_URL="http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
-    fi
-
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
 
     # Oslo
@@ -275,8 +255,6 @@
     # Timeouts
     iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG boto http_socket_timeout 5
 
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
@@ -307,6 +285,9 @@
     if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
         iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
     fi
+    if [ "$VIRT_DRIVER" = "xenserver" ]; then
+        iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
+    fi
 
     # Image Features
     iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
@@ -340,7 +321,7 @@
     local tmp_cfg_file
     tmp_cfg_file=$(mktemp)
     cd $TEMPEST_DIR
-    tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
+    tox -revenv -- tempest verify-config -uro $tmp_cfg_file
 
     local compute_api_extensions=${COMPUTE_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_COMPUTE_API_EXTENSIONS" ]]; then
@@ -422,16 +403,6 @@
     fi
     iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions
 
-    # boto
-    iniset $TEMPEST_CONFIG boto ec2_url "$EC2_URL"
-    iniset $TEMPEST_CONFIG boto s3_url "$S3_URL"
-    iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
-    iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml
-    iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img.manifest.xml
-    iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml
-    iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
-    iniset $TEMPEST_CONFIG boto http_socket_timeout 30
-
     # Orchestration Tests
     if is_service_enabled heat; then
         if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
@@ -485,6 +456,8 @@
     iniset $TEMPEST_CONFIG volume-feature-enabled extend_with_snapshot True
     # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life.
     iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True
+    # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info.
+    iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True
 
     local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then
@@ -574,12 +547,6 @@
         iniset $TEMPEST_CONFIG service_available cinder "False"
     fi
 
-    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
-        # Use the ``BOTO_CONFIG`` environment variable to point to this file
-        iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
-        sudo chown $STACK_USER $BOTO_CONF
-    fi
-
     # Auth
     iniset $TEMPEST_CONFIG auth tempest_roles "Member"
     if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
@@ -641,35 +608,6 @@
     popd
 }
 
-# init_tempest() - Initialize EC2 images
-function init_tempest {
-    local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}
-    # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec
-    local image_dir="$FILES/images/${base_image_name}-uec"
-    local kernel="$image_dir/${base_image_name}-vmlinuz"
-    local ramdisk="$image_dir/${base_image_name}-initrd"
-    local disk_image="$image_dir/${base_image_name}-blank.img"
-    if is_service_enabled nova; then
-        # If the CirrOS uec downloaded and the system is UEC capable
-        if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a  "$VIRT_DRIVER" != "openvz" \
-            -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
-            echo "Prepare aki/ari/ami Images"
-            mkdir -p $BOTO_MATERIALS_PATH
-            ( #new namespace
-                # euca2ools should be installed to call euca-* commands
-                is_package_installed euca2ools || install_package euca2ools
-                # tenant:demo ; user: demo
-                source $TOP_DIR/accrc/demo/demo
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
-            ) 2>&1 </dev/null | cat
-        else
-            echo "Boto materials are not prepared"
-        fi
-    fi
-}
-
 # Restore xtrace
 $_XTRACE_TEMPEST
 
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 14d13cf..9c4f6f7 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -88,11 +88,7 @@
         sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
         sudo update-rc.d elasticsearch defaults 95 10
     elif is_fedora; then
-        if [[ "$os_RELEASE" -ge "21" ]]; then
-            is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
-        else
-            is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
-        fi
+        is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
         yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
         sudo /bin/systemctl daemon-reload
         sudo /bin/systemctl enable elasticsearch.service
diff --git a/samples/local.conf b/samples/local.conf
index 34c9e8b..ea68dc0 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -23,10 +23,8 @@
 # While ``stack.sh`` is happy to run without ``localrc``, devlife is better when
 # there are a few minimal variables set:
 
-# If the ``SERVICE_TOKEN`` and ``*_PASSWORD`` variables are not set
-# here you will be prompted to enter values for them by ``stack.sh``
-# and they will be added to ``local.conf``.
-SERVICE_TOKEN=azertytoken
+# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
+# values for them by ``stack.sh``and they will be added to ``local.conf``.
 ADMIN_PASSWORD=nomoresecrete
 DATABASE_PASSWORD=stackdb
 RABBIT_PASSWORD=stackqueue
@@ -86,10 +84,9 @@
 # Swift
 # -----
 
-# Swift is now used as the back-end for the S3-like object store. If Nova's
-# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT
-# run if Swift is enabled. Setting the hash value is required and you will
-# be prompted for it if Swift is enabled so just set it to something already:
+# Swift is now used as the back-end for the S3-like object store. Setting the
+# hash value is required and you will be prompted for it if Swift is enabled
+# so just set it to something already:
 SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
 
 # For development purposes the default of 3 replicas is usually not required.
diff --git a/stack.sh b/stack.sh
index bc67ce0..c56024f 100755
--- a/stack.sh
+++ b/stack.sh
@@ -42,6 +42,8 @@
     set -o nounset
 fi
 
+# Set start of devstack timestamp
+DEVSTACK_START_TIME=$(date +%s)
 
 # Configuration
 # =============
@@ -149,19 +151,7 @@
 
 # Phase: local
 rm -f $TOP_DIR/.localrc.auto
-if [[ -r $TOP_DIR/local.conf ]]; then
-    LRC=$(get_meta_section_files $TOP_DIR/local.conf local)
-    for lfile in $LRC; do
-        if [[ "$lfile" == "localrc" ]]; then
-            if [[ -r $TOP_DIR/localrc ]]; then
-                warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc"
-            else
-                echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto
-                get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto
-            fi
-        fi
-    done
-fi
+extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto
 
 # ``stack.sh`` is customizable by setting environment variables.  Override a
 # default setting via export::
@@ -195,7 +185,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|f23|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f22|f23|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -302,23 +292,13 @@
     sudo yum-config-manager --enable rhel-7-server-optional-rpms
 
     # install the lastest RDO
-    sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm
+    is_package_installed rdo-release || yum_install https://rdoproject.org/repos/rdo-release.rpm
 
     if is_oraclelinux; then
         sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
     fi
 }
 
-# If you have all the repos installed above already setup (e.g. a CI
-# situation where they are on your image) you may choose to skip this
-# to speed things up
-SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
-
-if is_fedora && [[ $DISTRO == "rhel7" ]] && \
-        [[ ${SKIP_EPEL_INSTALL} != True ]]; then
-    _install_epel_and_rdo
-fi
-
 
 # Configure Target Directories
 # ----------------------------
@@ -332,6 +312,11 @@
 safe_chown -R $STACK_USER $DEST
 safe_chmod 0755 $DEST
 
+# Destination path for devstack logs
+if [[ -n ${LOGDIR:-} ]]; then
+    mkdir -p $LOGDIR
+fi
+
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 sudo mkdir -p $DATA_DIR
@@ -345,6 +330,16 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
+# If you have all the repos installed above already setup (e.g. a CI
+# situation where they are on your image) you may choose to skip this
+# to speed things up
+SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
+
+if is_fedora && [[ $DISTRO == "rhel7" ]] && \
+        [[ ${SKIP_EPEL_INSTALL} != True ]]; then
+    _install_epel_and_rdo
+fi
+
 # Ensure python is installed
 # --------------------------
 is_package_installed python || install_package python
@@ -404,10 +399,6 @@
 LOGDAYS=${LOGDAYS:-7}
 CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
 
-if [[ -n ${LOGDIR:-} ]]; then
-    mkdir -p $LOGDIR
-fi
-
 if [[ -n "$LOGFILE" ]]; then
     # Clean up old log files.  Append '.*' to the user-specified
     # ``LOGFILE`` to match the date in the search template.
@@ -493,11 +484,14 @@
 
     if [[ $r -ne 0 ]]; then
         echo "Error on exit"
+        generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
         if [[ -z $LOGDIR ]]; then
             $TOP_DIR/tools/worlddump.py
         else
             $TOP_DIR/tools/worlddump.py -d $LOGDIR
         fi
+    else
+        generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT}
     fi
 
     exit $r
@@ -664,9 +658,6 @@
 # --------
 
 if is_service_enabled keystone; then
-    # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
-    # just a string and is not a 'real' Keystone token.
-    read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
     # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
     read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
     # Horizon currently truncates usernames and passwords at 20 characters
@@ -725,6 +716,9 @@
     PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
 fi
 
+# Install subunit for the subunit output stream
+pip_install -U os-testr
+
 TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 
 # Install Python packages into a virtualenv so that we can track them
@@ -850,7 +844,6 @@
     install_django_openstack_auth
     # dashboard
     stack_install_service horizon
-    configure_horizon
 fi
 
 if is_service_enabled heat; then
@@ -1007,22 +1000,34 @@
     if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
         init_keystone
         start_keystone
+        bootstrap_keystone
     fi
 
-    export OS_IDENTITY_API_VERSION=3
-
-    # Set up a temporary admin URI for Keystone
-    SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3
-
     if is_service_enabled tls-proxy; then
         export OS_CACERT=$INT_CA_DIR/ca-chain.pem
-        # Until the client support is fixed, just use the internal endpoint
-        SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3
     fi
 
-    # Setup OpenStackClient token-endpoint auth
-    export OS_TOKEN=$SERVICE_TOKEN
-    export OS_URL=$SERVICE_ENDPOINT
+    # Rather than just export these, we write them out to a
+    # intermediate userrc file that can also be used to debug if
+    # something goes wrong between here and running
+    # tools/create_userrc.sh (this script relies on services other
+    # than keystone being available, so we can't call it right now)
+    cat > $TOP_DIR/userrc_early <<EOF
+# Use this for debugging issues before files in accrc are created
+
+# Set up password auth credentials now that Keystone is bootstrapped
+export OS_IDENTITY_API_VERSION=3
+export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_USERNAME=admin
+export OS_USER_DOMAIN_ID=default
+export OS_PASSWORD=$ADMIN_PASSWORD
+export OS_PROJECT_NAME=admin
+export OS_PROJECT_DOMAIN_ID=default
+export OS_REGION_NAME=$REGION_NAME
+
+EOF
+
+    source $TOP_DIR/userrc_early
 
     create_keystone_accounts
     create_nova_accounts
@@ -1038,30 +1043,6 @@
         create_heat_accounts
     fi
 
-    # Begone token auth
-    unset OS_TOKEN OS_URL
-
-    # Rather than just export these, we write them out to a
-    # intermediate userrc file that can also be used to debug if
-    # something goes wrong between here and running
-    # tools/create_userrc.sh (this script relies on services other
-    # than keystone being available, so we can't call it right now)
-    cat > $TOP_DIR/userrc_early <<EOF
-# Use this for debugging issues before files in accrc are created
-
-# Set up password auth credentials now that Keystone is bootstrapped
-export OS_AUTH_URL=$KEYSTONE_AUTH_URI
-export OS_USERNAME=admin
-export OS_USER_DOMAIN_ID=default
-export OS_PASSWORD=$ADMIN_PASSWORD
-export OS_PROJECT_NAME=admin
-export OS_PROJECT_DOMAIN_ID=default
-export OS_REGION_NAME=$REGION_NAME
-
-EOF
-
-    source $TOP_DIR/userrc_early
-
 fi
 
 # Write a clouds.yaml file
@@ -1070,12 +1051,9 @@
 # Horizon
 # -------
 
-# Set up the django horizon application to serve via apache/wsgi
-
 if is_service_enabled horizon; then
-    echo_summary "Configuring and starting Horizon"
-    init_horizon
-    start_horizon
+    echo_summary "Configuring Horizon"
+    configure_horizon
 fi
 
 
@@ -1300,6 +1278,12 @@
     fi
 fi
 
+if is_service_enabled horizon; then
+    echo_summary "Starting Horizon"
+    init_horizon
+    start_horizon
+fi
+
 
 # Create account rc files
 # =======================
diff --git a/stackrc b/stackrc
index 16621f1..58146a4 100644
--- a/stackrc
+++ b/stackrc
@@ -29,6 +29,9 @@
 # Destination for status files
 SERVICE_DIR=${DEST}/status
 
+# Path for subunit output file
+SUBUNIT_OUTPUT=${DEST}/devstack.subunit
+
 # Determine stack user
 if [[ $EUID -eq 0 ]]; then
     STACK_USER=stack
@@ -407,6 +410,10 @@
 GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
 GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master}
 
+# osprofiler
+GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git}
+GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-master}
+
 # pycadf auditing library
 GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
 GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master}
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index d9cb8d8..a5e1107 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -80,6 +80,11 @@
     sudo chown -R root:root ${INI_TMP_ETC_DIR}
 fi
 
+# test iniget_sections
+VAL=$(iniget_sections "${TEST_INI}")
+assert_equal "$VAL" "default aaa bbb ccc ddd eee del_separate_options \
+del_same_option del_missing_option del_missing_option_multi del_no_options"
+
 # Test with missing arguments
 BEFORE=$(cat ${TEST_INI})
 
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index f31560a..326241d 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -40,7 +40,7 @@
 ALL_LIBS+=" oslo.utils python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
-ALL_LIBS+=" oslo.cache oslo.reports"
+ALL_LIBS+=" oslo.cache oslo.reports osprofiler"
 ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep"
 
 # Generate the above list with
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
new file mode 100644
index 0000000..1a6f80c
--- /dev/null
+++ b/tools/cap-pip.txt
@@ -0,0 +1 @@
+pip<8
diff --git a/tools/cpu_map_update.py b/tools/cpu_map_update.py
index 1938793..92b7b8f 100755
--- a/tools/cpu_map_update.py
+++ b/tools/cpu_map_update.py
@@ -30,7 +30,8 @@
             x86 = arch
             break
     if x86 is not None:
-        # Create a gate64 cpu model that is core2duo less monitor and pse36
+        # Create a gate64 cpu model that is core2duo less monitor, pse36,
+        # vme, and ssse3.
         gate64 = ET.SubElement(x86, "model")
         gate64.set("name", "gate64")
         ET.SubElement(gate64, "vendor").set("name", "Intel")
@@ -51,13 +52,11 @@
         ET.SubElement(gate64, "feature").set("name", "fxsr")
         ET.SubElement(gate64, "feature").set("name", "sse")
         ET.SubElement(gate64, "feature").set("name", "sse2")
-        ET.SubElement(gate64, "feature").set("name", "vme")
         ET.SubElement(gate64, "feature").set("name", "mtrr")
         ET.SubElement(gate64, "feature").set("name", "mca")
         ET.SubElement(gate64, "feature").set("name", "clflush")
         ET.SubElement(gate64, "feature").set("name", "pni")
         ET.SubElement(gate64, "feature").set("name", "nx")
-        ET.SubElement(gate64, "feature").set("name", "ssse3")
         ET.SubElement(gate64, "feature").set("name", "syscall")
         ET.SubElement(gate64, "feature").set("name", "lm")
 
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 25f713c..74d5428 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -156,8 +156,6 @@
     exit 3
 fi
 
-export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
-
 EC2_URL=$(openstack endpoint list --service ec2 --interface public --os-identity-api-version=3 -c URL -f value || true)
 if [[ -z $EC2_URL ]]; then
     EC2_URL=http://localhost:8773/
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 9ae2ae7..193a1f7 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -135,7 +135,7 @@
         fi
     fi
 
-    if  [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "21" ]]; then
+    if  [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then
         # requests ships vendored version of chardet/urllib3, but on
         # fedora these are symlinked back to the primary versions to
         # avoid duplication of code on disk.  This is fine when
diff --git a/tools/image_list.sh b/tools/image_list.sh
index a27635e..27b3d46 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -3,6 +3,12 @@
 # Keep track of the DevStack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
 
+# The following "source" implicitly calls get_default_host_ip() in
+# stackrc and will die if the selected default IP happens to lie
+# in the default ranges for FIXED_RANGE or FLOATING_RANGE. Since we
+# do not really need HOST_IP to be properly set in the remainder of
+# this script, just set it to some dummy value and make stackrc happy.
+HOST_IP=SKIP
 source $TOP_DIR/functions
 
 # Possible virt drivers, if we have more, add them here. Always keep
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index f239c7b..542a284 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -77,9 +77,9 @@
             die $LINENO "Download of get-pip.py failed"
         touch $LOCAL_PIP.downloaded
     fi
-    sudo -H -E python $LOCAL_PIP
+    sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
     if python3_enabled; then
-        sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
+        sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
     fi
 }
 
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 97e4d94..9d2b082 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -133,7 +133,7 @@
         print "Skipping as nova-compute does not appear to be running"
         return
 
-    _dump_cmd("kill -s USR1 `pgrep nova-compute`")
+    _dump_cmd("kill -s USR2 `pgrep nova-compute`")
     print "guru meditation report in nova-compute log"
 
 
diff --git a/tools/xen/README.md b/tools/xen/README.md
index a1adf59..21090e5 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -78,7 +78,6 @@
     # to prompt for these passwords, blocking the install process.
 
     DATABASE_PASSWORD=my_super_secret
-    SERVICE_TOKEN=my_super_secret
     ADMIN_PASSWORD=my_super_secret
     SERVICE_PASSWORD=my_super_secret
     RABBIT_PASSWORD=my_super_secret
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index e24d9ed..46ff0b6 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -12,13 +12,6 @@
 
 export LC_ALL=C
 
-# Abort if localrc is not set
-if [ ! -e ../../localrc ]; then
-    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
-    echo "See the xen README for required passwords."
-    exit 1
-fi
-
 # This directory
 THIS_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -31,6 +24,10 @@
 #
 # Get Settings
 #
+TOP_DIR=$(cd $THIS_DIR/../../ && pwd)
+source $TOP_DIR/inc/meta-config
+rm -f $TOP_DIR/.localrc.auto
+extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto
 
 # Source params - override xenrc params in your localrc to suit your taste
 source $THIS_DIR/xenrc