Merge "Fix error reported due to re-add ipv6 address"
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1f5797c..1161b34 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -36,10 +36,6 @@
 
 -  **local** - extracts ``localrc`` from ``local.conf`` before
    ``stackrc`` is sourced
--  **pre-install** - runs after the system packages are installed but
-   before any of the source repositories are installed
--  **install** - runs immediately after the repo installations are
-   complete
 -  **post-config** - runs after the layer 2 services are configured and
    before they are started
 -  **extra** - runs after services are started and before any files in
@@ -427,6 +423,9 @@
 
       LIBS_FROM_GIT=python-keystoneclient,oslo.config
 
+Setting the variable to ``ALL`` will activate the download for all
+libraries.
+
 Virtual Environments
 --------------------
 
diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst
index a719d60..9549ed2 100644
--- a/doc/source/guides/lxc.rst
+++ b/doc/source/guides/lxc.rst
@@ -88,7 +88,7 @@
 
 You can also ssh into your container. On your host, run
 ``sudo lxc-info -n devstack`` to get the IP address (e.g. 
-``ssh ubuntu@$(sudo lxc-info -n p2 | awk '/IP/ { print $2 }')``).
+``ssh ubuntu@$(sudo lxc-info -n devstack | awk '/IP/ { print $2 }')``).
 
 Run Devstack
 -------------
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index a834314..c5b1634 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -88,7 +88,6 @@
         FIXED_RANGE="10.0.0.0/24"
         Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
         PUBLIC_NETWORK_GATEWAY="172.18.161.1"
-        Q_L3_ENABLED=True
         PUBLIC_INTERFACE=eth0
 
         # Open vSwitch provider networking configuration
@@ -362,6 +361,8 @@
 DevStack Configuration
 ----------------------
 
+.. _ovs-provider-network-controller:
+
 The following is a snippet of the DevStack configuration on the
 controller node.
 
@@ -387,7 +388,6 @@
         OVS_PHYSICAL_BRIDGE=br-ex
 
         Q_USE_PROVIDER_NETWORKING=True
-        Q_L3_ENABLED=False
 
         # Do not use Nova-Network
         disable_service n-net
@@ -434,13 +434,12 @@
         OVS_PHYSICAL_BRIDGE=br-ex
         PUBLIC_INTERFACE=eth1
         Q_USE_PROVIDER_NETWORKING=True
-        Q_L3_ENABLED=False
 
 Compute node 2's configuration will be exactly the same, except
 ``HOST_IP`` will be ``10.0.0.4``
 
 When DevStack is configured to use provider networking (via
-``Q_USE_PROVIDER_NETWORKING`` is True and ``Q_L3_ENABLED`` is False) -
+``Q_USE_PROVIDER_NETWORKING`` is True) -
 DevStack will automatically add the network interface defined in
 ``PUBLIC_INTERFACE`` to the ``OVS_PHYSICAL_BRIDGE``
 
@@ -543,7 +542,6 @@
     FIXED_RANGE="10.0.0.0/24"
     Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
     PUBLIC_NETWORK_GATEWAY="172.18.161.1"
-    Q_L3_ENABLED=True
     PUBLIC_INTERFACE=eth0
 
     Q_USE_PROVIDERNET_FOR_PUBLIC=True
@@ -553,3 +551,101 @@
     LB_PHYSICAL_INTERFACE=eth0
     PUBLIC_PHYSICAL_NETWORK=default
     LB_INTERFACE_MAPPINGS=default:eth0
+
+Using MacVTap instead of Open vSwitch
+------------------------------------------
+
+Security groups are not supported by the MacVTap agent. Due to that, devstack
+configures the NoopFirewall driver on the compute node.
+
+MacVTap agent does not support l3, dhcp and metadata agent. Due to that you can
+chose between the following deployment scenarios:
+
+Single node with provider networks using config drive and external l3, dhcp
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This scenario applies, if l3 and dhcp services are provided externally, or if
+you do not require them.
+
+
+::
+
+    [[local|localrc]]
+    HOST_IP=10.0.0.2
+    SERVICE_HOST=10.0.0.2
+    MYSQL_HOST=10.0.0.2
+    RABBIT_HOST=10.0.0.2
+    ADMIN_PASSWORD=secret
+    MYSQL_PASSWORD=secret
+    RABBIT_PASSWORD=secret
+    SERVICE_PASSWORD=secret
+
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
+    Q_USE_PROVIDER_NETWORKING=True
+
+    #Enable Neutron services
+    disable_service n-net
+    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    ENABLED_SERVICES+=,q-agt,q-svc
+
+    ## MacVTap agent options
+    Q_AGENT=macvtap
+    PHYSICAL_NETWORK=default
+
+    FIXED_RANGE="203.0.113.0/24"
+    NETWORK_GATEWAY=203.0.113.1
+    PROVIDER_SUBNET_NAME="provider_net"
+    PROVIDER_NETWORK_TYPE="vlan"
+    SEGMENTATION_ID=2010
+
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [macvtap]
+    physical_interface_mappings = $PHYSICAL_NETWORK:eth1
+
+    [[post-config|$NOVA_CONF]]
+    force_config_drive = True
+
+
+Multi node with MacVTap compute node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This scenario applies, if you require OpenStack provided l3, dhcp or metadata
+services. Those are hosted on a separate controller and network node, running
+some other l2 agent technology (in this example Open vSwitch). This node needs
+to be configured for VLAN tenant networks.
+
+For OVS, a similar configuration like described in the
+:ref:`OVS Provider Network <ovs-provider-network-controller>` section can be
+used. Just add the the following line to this local.conf, which also loads
+the MacVTap mechanism driver:
+
+::
+
+    [[local|localrc]]
+    ...
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,macvtap
+    ...
+
+For the MacVTap compute node, use this local.conf:
+
+::
+
+    HOST_IP=10.0.0.3
+    SERVICE_HOST=10.0.0.2
+    MYSQL_HOST=10.0.0.2
+    RABBIT_HOST=10.0.0.2
+    ADMIN_PASSWORD=secret
+    MYSQL_PASSWORD=secret
+    RABBIT_PASSWORD=secret
+    SERVICE_PASSWORD=secret
+
+    # Services that a compute node runs
+    disable_all_services
+    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    ENABLED_SERVICES+=n-cpu,q-agt
+
+    ## MacVTap agent options
+    Q_AGENT=macvtap
+    PHYSICAL_NETWORK=default
+
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [macvtap]
+    physical_interface_mappings = $PHYSICAL_NETWORK:eth1
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index a2721b4..eed88ed 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -30,6 +30,7 @@
 app-catalog-ui                         `git://git.openstack.org/openstack/app-catalog-ui <https://git.openstack.org/cgit/openstack/app-catalog-ui>`__
 astara                                 `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
 barbican                               `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
+bilean                                 `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
 blazar                                 `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
 broadview-collector                    `git://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
 ceilometer                             `git://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
@@ -47,6 +48,7 @@
 devstack-plugin-glusterfs              `git://git.openstack.org/openstack/devstack-plugin-glusterfs <https://git.openstack.org/cgit/openstack/devstack-plugin-glusterfs>`__
 devstack-plugin-hdfs                   `git://git.openstack.org/openstack/devstack-plugin-hdfs <https://git.openstack.org/cgit/openstack/devstack-plugin-hdfs>`__
 devstack-plugin-kafka                  `git://git.openstack.org/openstack/devstack-plugin-kafka <https://git.openstack.org/cgit/openstack/devstack-plugin-kafka>`__
+devstack-plugin-mariadb                `git://git.openstack.org/openstack/devstack-plugin-mariadb <https://git.openstack.org/cgit/openstack/devstack-plugin-mariadb>`__
 devstack-plugin-nfs                    `git://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
 devstack-plugin-pika                   `git://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
 devstack-plugin-sheepdog               `git://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
@@ -60,16 +62,21 @@
 gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
 gnocchi                                `git://git.openstack.org/openstack/gnocchi <https://git.openstack.org/cgit/openstack/gnocchi>`__
 group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
+higgins                                `git://git.openstack.org/openstack/higgins <https://git.openstack.org/cgit/openstack/higgins>`__
 ironic                                 `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
 ironic-inspector                       `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
+ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
 kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
 kuryr                                  `git://git.openstack.org/openstack/kuryr <https://git.openstack.org/cgit/openstack/kuryr>`__
 magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
 magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
 manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
 mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
+monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
 monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
+monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
 monasca-log-api                        `git://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
+monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
 murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
@@ -79,6 +86,7 @@
 networking-cisco                       `git://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
 networking-fortinet                    `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
 networking-generic-switch              `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
+networking-huawei                      `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
 networking-infoblox                    `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
 networking-l2gw                        `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
 networking-midonet                     `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
@@ -94,13 +102,16 @@
 networking-sfc                         `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
 networking-vsphere                     `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
 neutron                                `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
+neutron-dynamic-routing                `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
 neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
 neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
 neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
 nova-docker                            `git://git.openstack.org/openstack/nova-docker <https://git.openstack.org/cgit/openstack/nova-docker>`__
+nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
 nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
 octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
 osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
+panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
 python-freezerclient                   `git://git.openstack.org/openstack/python-freezerclient <https://git.openstack.org/cgit/openstack/python-freezerclient>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 83e5609..70469d6 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -99,6 +99,8 @@
       should exist at this point.
    -  **extra** - Called near the end after layer 1 and 2 services have
       been started.
+   - **test-config** Called at the end of devstack used to configure tempest
+      or any other test environments
 
 -  **unstack** - Called by ``unstack.sh`` before other services are shut
    down.
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index fcf79bd..6a3d121 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -13,6 +13,8 @@
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
+        echo_summary "Installing Tempest Plugins"
+        install_tempest_plugins
     elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
         # local.conf Tempest option overrides
         :
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 428544f..8a4b0f0 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -44,8 +44,8 @@
     WSGIPassAuthorization On
 </Location>
 
-Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_admin>
+Alias /identity_v2_admin %KEYSTONE_BIN%/keystone-wsgi-admin
+<Location /identity_v2_admin>
     SetHandler wsgi-script
     Options +ExecCGI
 
diff --git a/files/rpms/general b/files/rpms/general
index 2d4a97a..ee2e8a0 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -7,9 +7,9 @@
 gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
-iptables-services  # NOPRIME f22,f23
+iptables-services  # NOPRIME f22,f23,f24
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f22,f23
+java-1.8.0-openjdk-headless  # NOPRIME f22,f23,f24
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
diff --git a/files/rpms/nova b/files/rpms/nova
index 0312e85..594393e 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,7 +7,7 @@
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f22,f23
+kernel-modules # dist:f22,f23,f24
 kpartx
 kvm # NOPRIME
 libvirt-bin # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index 46dc59d..1e05167 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
 liberasurecode-devel
 memcached
 pyxattr
-rsync-daemon # dist:f22,f23
+rsync-daemon # dist:f22,f23,f24
 sqlite
 xfsprogs
 xinetd
diff --git a/functions b/functions
index 2736dc0..aa12e1e 100644
--- a/functions
+++ b/functions
@@ -298,6 +298,12 @@
             container_format=bare
             unpack=zcat
             ;;
+        *.img.bz2)
+            image_name=$(basename "$image" ".img.bz2")
+            disk_format=qcow2
+            container_format=bare
+            unpack=bunzip2
+            ;;
         *.qcow2)
             image_name=$(basename "$image" ".qcow2")
             disk_format=qcow2
@@ -331,6 +337,8 @@
     if [ "$container_format" = "bare" ]; then
         if [ "$unpack" = "zcat" ]; then
             openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+        elif [ "$unpack" = "bunzip2" ]; then
+            openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}")
         else
             openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
         fi
@@ -503,61 +511,6 @@
 }
 
 
-# This function recursively compares versions, and is not meant to be
-# called by anything other than vercmp_numbers below. This function does
-# not work with alphabetic versions.
-#
-# _vercmp_r sep ver1 ver2
-function _vercmp_r {
-    typeset sep
-    typeset -a ver1=() ver2=()
-    sep=$1; shift
-    ver1=("${@:1:sep}")
-    ver2=("${@:sep+1}")
-
-    if ((ver1 > ver2)); then
-        echo 1; return 0
-    elif ((ver2 > ver1)); then
-        echo -1; return 0
-    fi
-
-    if ((sep <= 1)); then
-        echo 0; return 0
-    fi
-
-    _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}"
-}
-
-
-# This function compares two versions and is meant to be called by
-# external callers. Please note the function assumes non-alphabetic
-# versions. For example, this will work:
-#
-#   vercmp_numbers 1.10 1.4
-#
-# The above will return "1", as 1.10 is greater than 1.4.
-#
-#   vercmp_numbers 5.2 6.4
-#
-# The above will return "-1", as 5.2 is less than 6.4.
-#
-#   vercmp_numbers 4.0 4.0
-#
-# The above will return "0", as the versions are equal.
-#
-# vercmp_numbers ver1 ver2
-function vercmp_numbers {
-    typeset v1=$1 v2=$2 sep
-    typeset -a ver1 ver2
-
-    deprecated "vercmp_numbers is deprecated for more generic vercmp"
-
-    IFS=. read -ra ver1 <<< "$v1"
-    IFS=. read -ra ver2 <<< "$v2"
-
-    _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}"
-}
-
 # vercmp ver1 op ver2
 #  Compare VER1 to VER2
 #   - op is one of < <= == >= >
diff --git a/functions-common b/functions-common
index a56a0ab..3fdd71b 100644
--- a/functions-common
+++ b/functions-common
@@ -380,6 +380,7 @@
         DISTRO="sle${os_RELEASE%.*}"
     elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
         "$os_VENDOR" =~ (CentOS) || \
+        "$os_VENDOR" =~ (Scientific) || \
         "$os_VENDOR" =~ (OracleServer) || \
         "$os_VENDOR" =~ (Virtuozzo) ]]; then
         # Drop the . release as we assume it's compatible
@@ -2263,11 +2264,12 @@
 # Service wrapper to restart services
 # restart_service service-name
 function restart_service {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 restart
+    if [ -x /bin/systemctl ]; then
+        sudo /bin/systemctl restart $1
     else
-        sudo /sbin/service $1 restart
+        sudo service $1 restart
     fi
+
 }
 
 # Only change permissions of a file or directory if it is not on an
@@ -2285,20 +2287,20 @@
 # Service wrapper to start services
 # start_service service-name
 function start_service {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 start
+    if [ -x /bin/systemctl ]; then
+        sudo /bin/systemctl start $1
     else
-        sudo /sbin/service $1 start
+        sudo service $1 start
     fi
 }
 
 # Service wrapper to stop services
 # stop_service service-name
 function stop_service {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 stop
+    if [ -x /bin/systemctl ]; then
+        sudo /bin/systemctl stop $1
     else
-        sudo /sbin/service $1 stop
+        sudo service $1 stop
     fi
 }
 
@@ -2369,7 +2371,7 @@
 function time_stop {
     local name
     local end_time
-    local elpased_time
+    local elapsed_time
     local total
     local start_time
 
diff --git a/inc/python b/inc/python
index 495150d..e013dfa 100644
--- a/inc/python
+++ b/inc/python
@@ -192,7 +192,7 @@
 function use_library_from_git {
     local name=$1
     local enabled=1
-    [[ ,${LIBS_FROM_GIT}, =~ ,${name}, ]] && enabled=0
+    [[ ${LIBS_FROM_GIT} = 'ALL' ]] || [[ ,${LIBS_FROM_GIT}, =~ ,${name}, ]] && enabled=0
     return $enabled
 }
 
diff --git a/lib/ceph b/lib/ceph
index 3e0839a..e999647 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -116,7 +116,7 @@
 
 # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
 function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f22|f23) ]]; then
+    if [[ ! ${DISTRO} =~ (trusty|f22|f23|f24) ]]; then
         echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
         if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
             die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
diff --git a/lib/cinder b/lib/cinder
index 1786232..0ebf195 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -40,6 +40,7 @@
 # set up default directories
 GITDIR["python-cinderclient"]=$DEST/python-cinderclient
 GITDIR["os-brick"]=$DEST/os-brick
+GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
 CINDER_DIR=$DEST/cinder
 
 # Cinder virtual environment
@@ -269,6 +270,7 @@
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
     iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
+    iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
 
     iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
 
@@ -342,6 +344,10 @@
         iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE
     fi
 
+    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
+        iniset $CINDER_CONF DEFAULT glance_api_version 2
+    fi
+
     # Register SSL certificates if provided
     if is_ssl_enabled_service cinder; then
         ensure_certificates CINDER
@@ -415,7 +421,7 @@
         recreate_database cinder
 
         # Migrate cinder database
-        $CINDER_BIN_DIR/cinder-manage db sync
+        $CINDER_BIN_DIR/cinder-manage --config-file $CINDER_CONF db sync
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -466,6 +472,11 @@
 
 # install_cinderclient() - Collect source and prepare
 function install_cinderclient {
+    if use_library_from_git "python-brick-cinderclient-ext"; then
+        git_clone_by_name "python-brick-cinderclient-ext"
+        setup_dev_lib "python-brick-cinderclient-ext"
+    fi
+
     if use_library_from_git "python-cinderclient"; then
         git_clone_by_name "python-cinderclient"
         setup_dev_lib "python-cinderclient"
diff --git a/lib/glance b/lib/glance
index cda357f..8d95aad 100644
--- a/lib/glance
+++ b/lib/glance
@@ -57,6 +57,7 @@
 GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
 GLANCE_GLARE_CONF=$GLANCE_CONF_DIR/glance-glare.conf
 GLANCE_GLARE_PASTE_INI=$GLANCE_CONF_DIR/glance-glare-paste.ini
+GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-True}
 
 if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then
     GLANCE_SERVICE_PROTOCOL="https"
@@ -134,6 +135,12 @@
         iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop"
     fi
 
+    # NOTE(flaper87): To uncomment as soon as all services consuming Glance are
+    # able to consume V2 entirely.
+    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
+        iniset $GLANCE_API_CONF DEFAULT enable_v1_api False
+    fi
+
     # Store specific configs
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
     if is_service_enabled g-glare; then
@@ -143,6 +150,13 @@
 
     iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
 
+    # CORS feature support - to allow calls from Horizon by default
+    if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
+        iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN"
+    else
+        iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST"
+    fi
+
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
         iniset $GLANCE_API_CONF glance_store default_store swift
@@ -334,10 +348,10 @@
     recreate_database glance
 
     # Migrate glance database
-    $GLANCE_BIN_DIR/glance-manage db_sync
+    $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync
 
     # Load metadata definitions
-    $GLANCE_BIN_DIR/glance-manage db_load_metadefs
+    $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
 
     create_glance_cache_dir
 }
diff --git a/lib/heat b/lib/heat
index 4326321..c841e0a 100644
--- a/lib/heat
+++ b/lib/heat
@@ -156,7 +156,7 @@
     # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure
     # the section for the client plugin associated with the trustee
     if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then
-        iniset $HEAT_CONF trustee auth_plugin password
+        iniset $HEAT_CONF trustee auth_type password
         iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI
         iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER
         iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD
@@ -241,7 +241,7 @@
     # (re)create heat database
     recreate_database heat
 
-    $HEAT_BIN_DIR/heat-manage db_sync
+    $HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync
     create_heat_cache_dir
 }
 
diff --git a/lib/horizon b/lib/horizon
index abc1f6d..0517e32 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -96,7 +96,7 @@
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
 
     _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
-    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\""
+    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
diff --git a/lib/keystone b/lib/keystone
index bcd5fab..6198e43 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -89,7 +89,7 @@
 
 # Select Keystone's token provider (and format)
 # Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
-KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
+KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
 # Set Keystone interface configuration
@@ -122,8 +122,14 @@
 fi
 
 # complete URIs
-KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT}
-KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}
+if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
+    # If running in Apache, use path access rather than port.
+    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_v2_admin
+    KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity
+else
+    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT}
+    KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}
+fi
 
 # V3 URIs
 KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3
@@ -240,7 +246,7 @@
     # Enable caching
     iniset $KEYSTONE_CONF cache enabled "True"
     iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool"
-    iniset $KEYSTONE_CONF cache memcache_servers $SERVICE_HOST:11211
+    iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
 
     # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617
     iniset $KEYSTONE_CONF catalog caching "False"
@@ -259,7 +265,15 @@
         # Set the service ports for a proxy to take the originals
         service_port=$KEYSTONE_SERVICE_PORT_INT
         auth_port=$KEYSTONE_AUTH_PORT_INT
+    fi
 
+    # Override the endpoints advertised by keystone (the public_endpoint and
+    # admin_endpoint) so that clients use the correct endpoint. By default, the
+    # keystone server uses the public_port and admin_port which isn't going to
+    # work when you want to use a different port (in the case of proxy), or you
+    # don't want the port (in the case of putting keystone on a path in
+    # apache).
+    if is_service_enabled tls-proxy || [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
         iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
         iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
     fi
@@ -304,10 +318,10 @@
         fi
 
         iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-public"
-        iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi threads $(nproc)
+        iniset "$KEYSTONE_PUBLIC_UWSGI_FILE" uwsgi processes $(nproc)
 
         iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi wsgi-file "$KEYSTONE_BIN_DIR/keystone-wsgi-admin"
-        iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi threads $API_WORKERS
+        iniset "$KEYSTONE_ADMIN_UWSGI_FILE" uwsgi processes $API_WORKERS
 
         # Common settings
         for file in "$KEYSTONE_PUBLIC_UWSGI_FILE" "$KEYSTONE_ADMIN_UWSGI_FILE"; do
@@ -334,7 +348,7 @@
     # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project.
     # The users from this project are globally admin as before, but it also
     # allows policy changes in order to clarify the adminess scope.
-    iniset $KEYSTONE_CONF resource admin_project_domain_name default
+    iniset $KEYSTONE_CONF resource admin_project_domain_name Default
     iniset $KEYSTONE_CONF resource admin_project_name admin
 }
 
@@ -376,7 +390,7 @@
 
     # Create service project/role
     get_or_create_domain "$SERVICE_DOMAIN_NAME"
-    get_or_create_project "$SERVICE_PROJECT_NAME" default
+    get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
 
     # Service role, so service users do not have to be admins
     get_or_create_role service
@@ -489,16 +503,16 @@
     recreate_database keystone
 
     # Initialize keystone database
-    $KEYSTONE_BIN_DIR/keystone-manage db_sync
+    $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync
 
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
         # Set up certificates
         rm -rf $KEYSTONE_CONF_DIR/ssl
-        $KEYSTONE_BIN_DIR/keystone-manage pki_setup
+        $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF pki_setup
     fi
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then
         rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
-        $KEYSTONE_BIN_DIR/keystone-manage fernet_setup
+        $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup
     fi
 }
 
@@ -582,7 +596,14 @@
     # Check that the keystone service is running. Even if the tls tunnel
     # should be enabled, make sure the internal port is checked using
     # unencryted traffic at this point.
-    if ! wait_for_service $SERVICE_TIMEOUT $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/; then
+    # If running in Apache, use the path rather than port.
+
+    local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/
+    if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
+        service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/
+    fi
+
+    if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
         die $LINENO "keystone did not start"
     fi
 
@@ -625,8 +646,8 @@
         --bootstrap-service-name keystone \
         --bootstrap-region-id "$REGION_NAME" \
         --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \
-        --bootstrap-public-url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT" \
-        --bootstrap-internal-url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT"
+        --bootstrap-public-url "$KEYSTONE_SERVICE_URI" \
+        --bootstrap-internal-url "$KEYSTONE_SERVICE_URI"
 }
 
 # Restore xtrace
diff --git a/lib/neutron b/lib/neutron
index 51c3a00..ad68d8e 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -126,6 +126,10 @@
     iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
     iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
 
+    iniset $NEUTRON_CONF DEFAULT debug True
+
+    iniset_rpc_backend neutron $NEUTRON_CONF
+
     # Neutron API server & Neutron plugin
     if is_service_enabled neutron-api; then
         local policy_file=$NEUTRON_CONF_DIR/policy.json
@@ -137,8 +141,6 @@
 
         iniset $NEUTRON_CONF DEFAULT core_plugin ml2
 
-        iniset $NEUTRON_CONF DEFAULT verbose True
-        iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
         iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
 
@@ -169,6 +171,7 @@
     # Neutron OVS or LB agent
     if is_service_enabled neutron-agent; then
         iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan
+        iniset $NEUTRON_PLUGIN_CONF DEFAULT debug True
 
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
@@ -184,7 +187,6 @@
     if is_service_enabled neutron-dhcp; then
         cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
 
-        iniset $NEUTRON_DHCP_CONF DEFAULT verbose True
         iniset $NEUTRON_DHCP_CONF DEFAULT debug True
         iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
         iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
@@ -197,16 +199,14 @@
         iniset $NEUTRON_CONF DEFAULT service_plugins router
         iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
         iniset $NEUTRON_L3_CONF DEFAULT debug True
-        iniset $NEUTRON_L3_CONF DEFAULT verbose True
         neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
     fi
 
     # Metadata
-    if is_service_enabled neutron-meta; then
+    if is_service_enabled neutron-metadata-agent; then
         cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
 
-        iniset $NEUTRON_META_CONF DEFAULT verbose True
-        iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $NEUTRON_META_CONF DEFAULT debug True
         iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
         iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
 
@@ -240,6 +240,13 @@
         iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY"
     fi
 
+    # Metering
+    if is_service_enabled neutron-metering; then
+        source $TOP_DIR/lib/neutron_plugins/services/metering
+        neutron_agent_metering_configure_common
+        neutron_agent_metering_configure_agent
+    fi
+
 }
 
 # configure_neutron_rootwrap() - configure Neutron's rootwrap
@@ -288,7 +295,7 @@
 
     iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
 
-    if is_service_enabled neutron-meta; then
+    if is_service_enabled neutron-metadata-agent; then
         iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
 
@@ -423,8 +430,12 @@
             create_neutron_initial_network
         fi
     fi
-    if is_service_enabled neutron-meta; then
-        run_process neutron-meta "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG"
+    if is_service_enabled neutron-metadata-agent; then
+        run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG"
+    fi
+
+    if is_service_enabled neutron-metering; then
+        run_process neutron-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
     fi
 }
 
@@ -440,20 +451,16 @@
         [ ! -z "$pid" ] && sudo kill -9 $pid
     fi
 
-    if is_service_enabled neutron-meta; then
+    if is_service_enabled neutron-metadata-agent; then
         sudo pkill -9 -f neutron-ns-metadata-proxy || :
-        stop_process neutron-meta
+        stop_process neutron-metadata-agent
     fi
 }
 
 # Compile the lost of enabled config files
 function _set_config_files {
 
-    #TODO(sc68cal) - see if we can clean up this and only
-    # pass in config files that make sense for certain agents
-    if is_service_enabled neutron-api; then
-        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF"
-    fi
+    NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF"
 
     #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not
     if is_service_enabled neutron-agent; then
@@ -468,7 +475,7 @@
         NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_L3_CONF"
     fi
 
-    if is_service_enabled neutron-meta; then
+    if is_service_enabled neutron-metadata-agent; then
         NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_META_CONF"
     fi
 
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 7eb8637..73123ef 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -61,7 +61,7 @@
 # Neutron Network Configuration
 # -----------------------------
 
-
+deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future"
 
 if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
     Q_PROTOCOL="https"
@@ -130,8 +130,6 @@
 Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-# Use neutron-debug command
-Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
 # The name of the default q-l3 router
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
@@ -299,7 +297,7 @@
 }
 
 function _determine_config_l3 {
-    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
     if is_service_enabled q-fwaas; then
         opts+=" --config-file $Q_FWAAS_CONF_FILE"
     fi
@@ -361,15 +359,13 @@
         _configure_neutron_ceilometer_notifications
     fi
 
-    _configure_neutron_debug_command
-
     iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
 }
 
 function create_nova_conf_neutron {
     iniset $NOVA_CONF DEFAULT use_neutron True
     iniset $NOVA_CONF neutron auth_type "password"
-    iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+    iniset $NOVA_CONF neutron auth_url "$KEYSTONE_AUTH_URI/v3"
     iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
     iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
     iniset $NOVA_CONF neutron user_domain_name "$SERVICE_DOMAIN_NAME"
@@ -528,7 +524,7 @@
 }
 
 function start_mutnauq_other_agents {
-    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
+    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
 
     if is_service_enabled neutron-vpnaas; then
         :  # Started by plugin
@@ -536,8 +532,8 @@
         run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
     fi
 
-    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
-    run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
+    run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME"
     run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
@@ -621,16 +617,10 @@
         DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }")
         local ADD_OVS_PORT=""
         local DEL_OVS_PORT=""
+        local ARP_CMD=""
 
         IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
 
-        if [[ "$af" == "inet" ]]; then
-            IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
-            ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP "
-        else
-            ARP_CMD=""
-        fi
-
         if [ "$DEFAULT_ROUTE_GW" != "" ]; then
             ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
         fi
@@ -647,6 +637,10 @@
             IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
             IP_ADD="sudo ip addr add $IP_BRD dev $to_intf"
             IP_UP="sudo ip link set $to_intf up"
+            if [[ "$af" == "inet" ]]; then
+                IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
+                ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP "
+            fi
         fi
 
         # The add/del OVS port calls have to happen either before or
@@ -800,24 +794,6 @@
     _neutron_setup_rootwrap
 }
 
-function _configure_neutron_debug_command {
-    if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then
-        return
-    fi
-
-    cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_TEST_CONFIG_FILE
-
-    iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
-    iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-
-    _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE
-
-    neutron_plugin_configure_debug_command
-}
-
 function _configure_neutron_dhcp_agent {
 
     cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
@@ -1028,24 +1004,6 @@
     neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
 }
 
-function setup_neutron_debug {
-    if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
-        public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
-        if [[ -n $public_net_id ]]; then
-            neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id
-        fi
-        private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
-        if [[ -n $private_net_id ]]; then
-            neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id
-        fi
-    fi
-}
-
-function teardown_neutron_debug {
-    delete_probe $PUBLIC_NETWORK_NAME
-    delete_probe $PRIVATE_NETWORK_NAME
-}
-
 function _get_net_id {
     neutron --os-cloud devstack-admin --os-region "$REGION_NAME" --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}'
 }
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 59c7737..ecf252f 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -54,7 +54,7 @@
     local kernel_major_minor
     kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
     # From kernel 3.13 on, openvswitch-datapath-dkms is not needed
-    if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
+    if vercmp "$kernel_major_minor" "<" "3.13" ; then
         install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
     fi
 }
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 80af0bb..0f185da 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -23,11 +23,9 @@
 Q_PUBLIC_VETH_EX=${Q_PUBLIC_VETH_EX:-veth-pub-ex}
 Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int}
 
-# The next two variables are configured by plugin
+# The next variable is configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
 #
-# The plugin supports L3.
-Q_L3_ENABLED=${Q_L3_ENABLED:-True}
 # L3 routers exist per tenant
 Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True}
 
@@ -85,34 +83,28 @@
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
 function _determine_config_l3 {
-    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
     echo "$opts"
 }
 
 function _configure_neutron_l3_agent {
-    local cfg_file
-    Q_L3_ENABLED=True
 
     cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE
 
-    iniset $Q_L3_CONF_FILE DEFAULT verbose True
     iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
-    iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+    iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+        iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
     fi
 
     _neutron_setup_interface_driver $Q_L3_CONF_FILE
 
-    neutron_plugin_configure_l3_agent
+    neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE
 
-    if [[ $(ip -f inet a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
-    fi
+    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
 
     if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6"
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
     fi
 }
 
@@ -157,7 +149,9 @@
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
         fi
 
-        if [[ "$IP_VERSION" =~ .*6 ]] && [[ -n "$IPV6_PROVIDER_FIXED_RANGE" ]] && [[ -n "$IPV6_PROVIDER_NETWORK_GATEWAY" ]]; then
+        if [[ "$IP_VERSION" =~ .*6 ]]; then
+            die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
+            die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
             SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2)
             die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
@@ -182,9 +176,7 @@
         fi
     fi
 
-    AUTO_ALLOCATE_EXT=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list | grep 'auto-allocated-topology' | get_field 1)
-    SUBNETPOOL_EXT=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list | grep 'subnet_allocation' | get_field 1)
-    if [[ "$Q_L3_ENABLED" == "True" ]]; then
+    if is_networking_extension_supported "router" && is_networking_extension_supported "external-net"; then
         # Create a router, and add the private subnet as one of its interfaces
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
@@ -196,10 +188,8 @@
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
         fi
 
-        # if the extension is available, then mark the external
-        # network as default, and provision default subnetpools
         EXTERNAL_NETWORK_FLAGS="--router:external"
-        if [[ -n $AUTO_ALLOCATE_EXT && -n $SUBNETPOOL_EXT ]]; then
+        if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then
             EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default"
             if [[ "$IP_VERSION" =~ 4.* ]]; then
                 SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2)
@@ -364,3 +354,17 @@
         _neutron_set_router_id
     fi
 }
+
+function is_provider_network {
+    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
+        return 0
+    fi
+    return 1
+}
+
+function is_networking_extension_supported {
+    local extension=$1
+    # TODO(sc68cal) cache this instead of calling every time
+    EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value)
+    [[ $EXT_LIST =~ $extension ]] && return 0
+}
diff --git a/lib/nova b/lib/nova
index aeba803..67a80b9 100644
--- a/lib/nova
+++ b/lib/nova
@@ -483,6 +483,9 @@
 
     iniset $NOVA_CONF privsep_osbrick helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
 
+    iniset $NOVA_CONF vif_plug_ovs_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
+    iniset $NOVA_CONF vif_plug_linux_bridge_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
+
     if is_service_enabled n-api; then
         if is_service_enabled n-api-meta; then
             # If running n-api-meta as a separate service
@@ -589,11 +592,6 @@
 
     iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
 
-    if [[ "$NOVA_BACKEND" == "LVM" ]]; then
-        iniset $NOVA_CONF libvirt images_type "lvm"
-        iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME
-    fi
-
     if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
         iniset $NOVA_CONF DEFAULT glance_protocol https
     fi
@@ -636,7 +634,7 @@
     if is_service_enabled n-cell; then
         cp $NOVA_CONF $NOVA_CELLS_CONF
         iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB`
-        iniset $NOVA_CELLS_CONF oslo_messaging_rabbit rabbit_virtual_host child_cell
+        iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell
         iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
         iniset $NOVA_CELLS_CONF cells enable True
         iniset $NOVA_CELLS_CONF cells cell_type compute
@@ -690,18 +688,18 @@
         recreate_database nova
 
         # Migrate nova database
-        $NOVA_BIN_DIR/nova-manage db sync
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
 
         if is_service_enabled n-cell; then
             recreate_database $NOVA_CELLS_DB
         fi
 
         recreate_database $NOVA_API_DB
-        $NOVA_BIN_DIR/nova-manage api_db sync
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
 
         # Run online migrations on the new databases
         # Needed for flavor conversion
-        $NOVA_BIN_DIR/nova-manage db online_data_migrations
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations
     fi
 
     create_nova_cache_dir
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index dbb4d4f..4e5a748 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -108,9 +108,9 @@
             # source file paths, not relative paths. This screws with the matching
             # of '1:libvirt' making everything turn on. So use libvirt.c for now.
             # This will have to be re-visited when Ubuntu ships libvirt >= 1.2.3
-            local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor"
+            local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu"
         else
-            local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor"
+            local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:cpu"
         fi
         local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
         if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
@@ -121,18 +121,9 @@
         fi
     fi
 
-    # Update the libvirt cpu map with a gate64 cpu model. This enables nova
-    # live migration for 64bit guest OSes on heterogenous cloud "hardware".
-    if [[ -f /usr/share/libvirt/cpu_map.xml ]] ; then
-        sudo $TOP_DIR/tools/cpu_map_update.py /usr/share/libvirt/cpu_map.xml
-    fi
-
-    # libvirt detects various settings on startup, as we potentially changed
-    # the system configuration (modules, filesystems), we need to restart
-    # libvirt to detect those changes. Use a stop start as otherwise the new
-    # cpu_map is not loaded properly on some systems (Ubuntu).
-    stop_service $LIBVIRT_DAEMON
-    start_service $LIBVIRT_DAEMON
+    # Service needs to be started on redhat/fedora -- do a restart for
+    # sanity after fiddling the config.
+    restart_service $LIBVIRT_DAEMON
 }
 
 
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 1b4f7ae..d0e364e 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -68,6 +68,12 @@
         iniset $NOVA_CONF vnc vncserver_proxyclient_address  $HOST_IP
         iniset $NOVA_CONF vnc vncserver_listen $HOST_IP
         iniset $NOVA_CONF vnc keymap
+    elif [[ "$NOVA_BACKEND" == "LVM" ]]; then
+        iniset $NOVA_CONF libvirt images_type "lvm"
+        iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME
+        if isset LVM_VOLUME_CLEAR; then
+            iniset $NOVA_CONF libvirt volume_clear "$LVM_VOLUME_CLEAR"
+        fi
     fi
 }
 
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 05e303e..0ee46dc 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -104,8 +104,9 @@
 
 # builds transport url string
 function get_transport_url {
+    local virtual_host=$1
     if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
-        echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/"
+        echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/$virtual_host"
     fi
 }
 
@@ -114,11 +115,9 @@
     local package=$1
     local file=$2
     local section=${3:-DEFAULT}
+    local virtual_host=$4
     if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
-        iniset $file $section rpc_backend "rabbit"
-        iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST
-        iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
-        iniset $file oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
+        iniset $file $section transport_url $(get_transport_url "$virtual_host")
         if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then
             iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD
         fi
diff --git a/lib/swift b/lib/swift
index 8cb94ef..0c74411 100644
--- a/lib/swift
+++ b/lib/swift
@@ -428,6 +428,7 @@
     sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER}
 
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server allow_account_management true
 
     # Configure Crossdomain
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain"
@@ -457,9 +458,7 @@
         cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
 [filter:s3token]
 paste.filter_factory = keystonemiddleware.s3_token:filter_factory
-auth_port = ${KEYSTONE_AUTH_PORT}
-auth_host = ${KEYSTONE_AUTH_HOST}
-auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+auth_uri = ${KEYSTONE_AUTH_URI}
 cafile = ${SSL_BUNDLE_FILE}
 admin_user = swift
 admin_tenant_name = ${SERVICE_PROJECT_NAME}
diff --git a/lib/tempest b/lib/tempest
index e556935..347b2a7 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -16,7 +16,6 @@
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
 #   - ``Q_ROUTER_NAME``
-#   - ``Q_L3_ENABLED``
 #   - ``VIRT_DRIVER``
 #   - ``LIBVIRT_TYPE``
 #   - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -239,7 +238,9 @@
 
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
 
-    if [ "$Q_L3_ENABLED" = "True" ]; then
+    # the public network (for floating ip access) is only available
+    # if the extension is enabled.
+    if is_networking_extension_supported 'external-net'; then
         public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
             awk '{print $2}')
     fi
@@ -260,6 +261,8 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3"
+    # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation
+    iniset $TEMPEST_CONFIG identity admin_domain_scope True
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
@@ -288,10 +291,14 @@
     fi
     if [ "$VIRT_DRIVER" = "xenserver" ]; then
         iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
+        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
     fi
 
     # Image Features
     iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
+    if [ "$GLANCE_V1_ENABLED" != "True" ]; then
+        iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False
+    fi
 
     # Compute
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
@@ -331,14 +338,14 @@
         tempest_compute_max_microversion=None
     fi
     if [ "$tempest_compute_min_microversion" == "None" ]; then
-        inicomment $TEMPEST_CONFIG compute-feature-enabled min_microversion
+        inicomment $TEMPEST_CONFIG compute min_microversion
     else
-        iniset $TEMPEST_CONFIG compute-feature-enabled min_microversion $tempest_compute_min_microversion
+        iniset $TEMPEST_CONFIG compute min_microversion $tempest_compute_min_microversion
     fi
     if [ "$tempest_compute_max_microversion" == "None" ]; then
-        inicomment $TEMPEST_CONFIG compute-feature-enabled max_microversion
+        inicomment $TEMPEST_CONFIG compute max_microversion
     else
-        iniset $TEMPEST_CONFIG compute-feature-enabled max_microversion $tempest_compute_max_microversion
+        iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion
     fi
 
     iniset $TEMPEST_CONFIG compute-feature-enabled resize True
@@ -374,7 +381,7 @@
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
-    iniset $TEMPEST_CONFIG network tenant_networks_reachable false
+    iniset $TEMPEST_CONFIG network project_networks_reachable false
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
@@ -432,6 +439,19 @@
     iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True
     # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
     iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
+    local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
+    local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
+    if [ "$tempest_volume_min_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG volume min_microversion
+    else
+        iniset $TEMPEST_CONFIG volume min_microversion $tempest_volume_min_microversion
+    fi
+
+    if [ "$tempest_volume_max_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG volume max_microversion
+    else
+        iniset $TEMPEST_CONFIG volume max_microversion $tempest_volume_max_microversion
+    fi
 
     if ! is_service_enabled c-bak; then
         iniset $TEMPEST_CONFIG volume-feature-enabled backup False
@@ -598,8 +618,16 @@
     # running pip install -U on tempest requirements
     $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
     PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest
+    popd
+}
+
+# install_tempest_plugins() - Install any specified plugins into the tempest venv
+function install_tempest_plugins {
+    pushd $TEMPEST_DIR
     if [[ $TEMPEST_PLUGINS != 0 ]] ; then
         tox -evenv-tempest -- pip install $TEMPEST_PLUGINS
+        echo "Checking installed Tempest plugins:"
+        tox -evenv-tempest -- tempest list-plugins
     fi
     popd
 }
diff --git a/openrc b/openrc
index db2e97d..8d8ae8b 100644
--- a/openrc
+++ b/openrc
@@ -90,6 +90,13 @@
 #
 export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION}
 
+# Currently, in order to use openstackclient with Identity API v3,
+# we need to set the domain which the user and project belong to.
+if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then
+    export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"}
+    export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"}
+fi
+
 # Set OS_CACERT to a default CA certificate chain if it exists.
 if [[ ! -v OS_CACERT ]] ; then
     DEFAULT_OS_CACERT=$INT_CA_DIR/ca-chain.pem
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 9c4f6f7..856eaff 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -10,7 +10,7 @@
 
 # Package source and version, all pkg files are expected to have
 # something like this, as well as a way to override them.
-ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.4.2}
+ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5}
 ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch}
 
 # Elastic search actual implementation
diff --git a/stack.sh b/stack.sh
index 44ca0cb..6fbb0be 100755
--- a/stack.sh
+++ b/stack.sh
@@ -185,7 +185,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|f24|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -335,6 +335,13 @@
 # to speed things up
 SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
 
+# If we have /etc/nodepool/provider assume we're on a OpenStack CI
+# node, where EPEL is already pointing at our internal mirror and RDO
+# is pre-installed.
+if [[ -f /etc/nodepool/provider ]]; then
+    SKIP_EPEL_INSTALL=True
+fi
+
 if is_fedora && [[ $DISTRO == "rhel7" ]] && \
         [[ ${SKIP_EPEL_INSTALL} != True ]]; then
     _install_epel_and_rdo
@@ -1208,9 +1215,9 @@
     done
 fi
 
-# Create a randomized default value for the keymgr's fixed_key
+# Create a randomized default value for the key manager's fixed_key
 if is_service_enabled nova; then
-    iniset $NOVA_CONF keymgr fixed_key $(generate_hex_string 32)
+    iniset $NOVA_CONF key_manager fixed_key $(generate_hex_string 32)
 fi
 
 # Launch the nova-api and wait for it to answer before continuing
@@ -1250,7 +1257,6 @@
 if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
     echo_summary "Creating initial neutron network elements"
     create_neutron_initial_network
-    setup_neutron_debug
 fi
 
 if is_service_enabled nova; then
@@ -1372,6 +1378,12 @@
     fi
 fi
 
+# Run test-config
+# ---------------
+
+# Phase: test-config
+run_phase stack test-config
+
 
 # Fin
 # ===
diff --git a/stackrc b/stackrc
index 4ba57c9..acb7d3f 100644
--- a/stackrc
+++ b/stackrc
@@ -135,12 +135,29 @@
     source $RC_DIR/.localrc.auto
 fi
 
+# Default for log coloring is based on interactive-or-not.
+# Baseline assumption is that non-interactive invocations are for CI,
+# where logs are to be presented as browsable text files; hence color
+# codes should be omitted.
+# Simply override LOG_COLOR if your environment is different.
+if [ -t 1 ]; then
+    _LOG_COLOR_DEFAULT=True
+else
+    _LOG_COLOR_DEFAULT=False
+fi
+
 # Use color for logging output (only available if syslog is not used)
-LOG_COLOR=$(trueorfalse True LOG_COLOR)
+LOG_COLOR=$(trueorfalse $_LOG_COLOR_DEFAULT LOG_COLOR)
 
 # Make tracing more educational
 if [[ "$LOG_COLOR" == "True" ]]; then
-    export PS4='+\[$(tput setaf 242)\]$(short_source)\[$(tput sgr0)\] '
+    # tput requires TERM or -T.  If neither is present, use vt100, a
+    # no-frills least common denominator supported everywhere.
+    TPUT_T=
+    if ! [ $TERM ]; then
+        TPUT_T='-T vt100'
+    fi
+    export PS4='+\[$(tput '$TPUT_T' setaf 242)\]$(short_source)\[$(tput '$TPUT_T' sgr0)\] '
 else
     export PS4='+ $(short_source):   '
 fi
@@ -208,6 +225,9 @@
 # ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config
 #
 # Will install those 2 libraries from git, the rest from pypi.
+#
+# Setting the variable to 'ALL' will activate the download for all
+# libraries.
 
 
 ##############
@@ -282,6 +302,10 @@
 GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
 GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master}
 
+# os-brick client for local volume attachement
+GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git}
+GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master}
+
 # python glance client library
 GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
 GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index a979c34..bb58088 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -42,7 +42,7 @@
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports osprofiler"
 ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
-ALL_LIBS+=" diskimage-builder os-vif"
+ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tools/cpu_map_update.py b/tools/cpu_map_update.py
deleted file mode 100755
index 92b7b8f..0000000
--- a/tools/cpu_map_update.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This small script updates the libvirt CPU map to add a gate64 cpu model
-# that can be used to enable a common 64bit capable feature set across
-# devstack nodes so that features like nova live migration work.
-
-import sys
-import xml.etree.ElementTree as ET
-from xml.dom import minidom
-
-
-def update_cpu_map(tree):
-    root = tree.getroot()
-    cpus = root#.find("cpus")
-    x86 = None
-    for arch in cpus.findall("arch"):
-        if arch.get("name") == "x86":
-            x86 = arch
-            break
-    if x86 is not None:
-        # Create a gate64 cpu model that is core2duo less monitor, pse36,
-        # vme, and ssse3.
-        gate64 = ET.SubElement(x86, "model")
-        gate64.set("name", "gate64")
-        ET.SubElement(gate64, "vendor").set("name", "Intel")
-        ET.SubElement(gate64, "feature").set("name", "fpu")
-        ET.SubElement(gate64, "feature").set("name", "de")
-        ET.SubElement(gate64, "feature").set("name", "pse")
-        ET.SubElement(gate64, "feature").set("name", "tsc")
-        ET.SubElement(gate64, "feature").set("name", "msr")
-        ET.SubElement(gate64, "feature").set("name", "pae")
-        ET.SubElement(gate64, "feature").set("name", "mce")
-        ET.SubElement(gate64, "feature").set("name", "cx8")
-        ET.SubElement(gate64, "feature").set("name", "apic")
-        ET.SubElement(gate64, "feature").set("name", "sep")
-        ET.SubElement(gate64, "feature").set("name", "pge")
-        ET.SubElement(gate64, "feature").set("name", "cmov")
-        ET.SubElement(gate64, "feature").set("name", "pat")
-        ET.SubElement(gate64, "feature").set("name", "mmx")
-        ET.SubElement(gate64, "feature").set("name", "fxsr")
-        ET.SubElement(gate64, "feature").set("name", "sse")
-        ET.SubElement(gate64, "feature").set("name", "sse2")
-        ET.SubElement(gate64, "feature").set("name", "mtrr")
-        ET.SubElement(gate64, "feature").set("name", "mca")
-        ET.SubElement(gate64, "feature").set("name", "clflush")
-        ET.SubElement(gate64, "feature").set("name", "pni")
-        ET.SubElement(gate64, "feature").set("name", "nx")
-        ET.SubElement(gate64, "feature").set("name", "syscall")
-        ET.SubElement(gate64, "feature").set("name", "lm")
-
-
-def format_xml(root):
-    # Adapted from http://pymotw.com/2/xml/etree/ElementTree/create.html
-    # thank you dhellmann
-    rough_string = ET.tostring(root, encoding="UTF-8")
-    dom_parsed = minidom.parseString(rough_string)
-    return dom_parsed.toprettyxml("  ", encoding="UTF-8")
-
-
-def main():
-    if len(sys.argv) != 2:
-        raise Exception("Must pass path to cpu_map.xml to update")
-    cpu_map = sys.argv[1]
-    tree = ET.parse(cpu_map)
-    for model in tree.getroot().iter("model"):
-        if model.get("name") == "gate64":
-            # gate64 model is already present
-            return
-    update_cpu_map(tree)
-    pretty_xml = format_xml(tree.getroot())
-    with open(cpu_map, 'w') as f:
-        f.write(pretty_xml)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 089a6ef..bbad1bf 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -44,16 +44,10 @@
     # stackforge, etc)
     return proj.startswith('openstack/')
 
-# Rather than returning a 404 for a nonexistent file, cgit delivers a
-# 0-byte response to a GET request.  It also does not provide a
-# Content-Length in a HEAD response, so the way we tell if a file exists
-# is to check the length of the entire GET response body.
+# Check if this project has a plugin file
 def has_devstack_plugin(proj):
     r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
-    if len(r.text) > 0:
-        return True
-    else:
-        return False
+    return r.status_code == 200
 
 logging.debug("Getting project list from %s" % url)
 r = requests.get(url)
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index dfa4f42..1267699 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -116,7 +116,7 @@
 
 # Eradicate any and all system packages
 
-# Python in f23 and f22 depends on the python-pip package so removing it
+# Python in fedora depends on the python-pip package so removing it
 # results in a nonfunctional system. pip on fedora installs to /usr so pip
 # can safely override the system pip for all versions of fedora
 if ! is_fedora ; then
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 8b97265..3a61215 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -183,10 +183,8 @@
         # Copy the tools DEB to the XS web server
         XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb"
         ISO_DIR="/opt/xensource/packages/iso"
-        XS_TOOLS_FILE_NAME="xs-tools.deb"
-        XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME"
         if [ -e "$ISO_DIR" ]; then
-            TOOLS_ISO=$(ls -1 $ISO_DIR/xs-tools-*.iso | head -1)
+            TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1)
             TMP_DIR=/tmp/temp.$RANDOM
             mkdir -p $TMP_DIR
             mount -o loop $TOOLS_ISO $TMP_DIR