Merge "Add guide on running devstack in lxc container"
diff --git a/clean.sh b/clean.sh
index 3db4858..b18f28e 100755
--- a/clean.sh
+++ b/clean.sh
@@ -26,7 +26,7 @@
 fi
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
 # and ``DISTRO``
 GetDistro
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 6f45c1c..e985bdc 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -379,24 +379,6 @@
 
         PIP_UPGRADE=True
 
-
-Service Catalog Backend
------------------------
-
-By default DevStack uses Keystone's ``sql`` service catalog backend.
-An alternate ``template`` backend is also available, however, it does
-not support the ``service-*`` and ``endpoint-*`` commands of the
-``keystone`` CLI.  To do so requires the ``sql`` backend be enabled
-with ``KEYSTONE_CATALOG_BACKEND``:
-
-    ::
-
-        KEYSTONE_CATALOG_BACKEND=template
-
-DevStack's default configuration in ``sql`` mode is set in
-``lib/keystone``
-
-
 Guest Images
 ------------
 
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 7aca8d0..cd48915 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -110,11 +110,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Services can be turned off by adding ``disable_service xxx`` to
-``local.conf`` (using ``n-vol`` in this example):
+``local.conf`` (using ``c-vol`` in this example):
 
     ::
 
-        disable_service n-vol
+        disable_service c-vol
 
 Is enabling a service that defaults to off done with the reverse of the above?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index c6af953..c8b5c44 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -536,3 +536,19 @@
     LB_PHYSICAL_INTERFACE=eth0
     PUBLIC_PHYSICAL_NETWORK=default
     LB_INTERFACE_MAPPINGS=default:eth0
+
+Creating specific OVS bridges for physical networks
+---------------------------------------------------
+
+When using the Open vSwitch ML2 mechanism driver, it is possible to
+have multiple Open vSwitch bridges meant for physical networking be
+automatically created by setting the ``OVS_BRIDGE_MAPPINGS`` to a list of
+physical network to bridge name associations with the following syntax:
+
+::
+    OVS_BRIDGE_MAPPINGS=net1name:bridge1name,net2name:bridge2name,<...>
+
+Also, ``OVS_BRIDGE_MAPPINGS`` has precedence over ``PHYSICAL_NETWORK`` and
+``OVS_PHYSICAL_BRIDGE``, meaning that if the former is set, the latter
+ones will be ignored. When ``OVS_BRIDGE_MAPPINGS`` is not set, the other
+variables will still be evaluated.
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 0da57ee..69ac430 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,5 +1,6 @@
 cryptsetup
 genisoimage
+gir1.2-libosinfo-1.0
 lvm2 # NOPRIME
 open-iscsi
 python-guestfs # NOPRIME
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
deleted file mode 100644
index 4aab416..0000000
--- a/files/default_catalog.templates
+++ /dev/null
@@ -1,63 +0,0 @@
-# config for TemplatedCatalog, using camelCase because I don't want to do
-# translations for legacy compat
-catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0
-catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0
-catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0
-catalog.RegionOne.identity.name = Identity Service
-
-
-catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
-catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
-catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
-catalog.RegionOne.compute.name = Compute Service
-
-
-catalog.RegionOne.computev21.publicURL = http://%SERVICE_HOST%:8774/v2.1/$(tenant_id)s
-catalog.RegionOne.computev21.adminURL = http://%SERVICE_HOST%:8774/v2.1/$(tenant_id)s
-catalog.RegionOne.computev21.internalURL = http://%SERVICE_HOST%:8774/v2.1/$(tenant_id)s
-catalog.RegionOne.computev21.name = Compute Service V2.1
-
-
-catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.name = Volume Service
-
-
-catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
-catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
-catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
-catalog.RegionOne.volumev2.name = Volume Service V2
-
-
-catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/
-catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/
-catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/
-catalog.RegionOne.ec2.name = EC2 Service
-
-
-catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
-catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
-catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
-catalog.RegionOne.s3.name = S3 Service
-
-
-catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292
-catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292
-catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292
-catalog.RegionOne.image.name = Image Service
-
-catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.cloudformation.name = CloudFormation service
-
-catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
-catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
-catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
-catalog.RegionOne.orchestration.name = Orchestration Service
-
-catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1
-catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1
-catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1
-catalog.RegionOne.metering.name = Telemetry Service
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index 29bd31b..9ece115 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,5 +1,6 @@
 cryptsetup
 genisoimage
+libosinfo
 lvm2
 open-iscsi
 sg3_utils
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 7773b04..26c5ced 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,6 +1,7 @@
 cryptsetup
 genisoimage
 iscsi-initiator-utils
+libosinfo
 lvm2
 sg3_utils
 # Stuff for diablo volumes
diff --git a/functions-common b/functions-common
index 6019a9c..80bdbea 100644
--- a/functions-common
+++ b/functions-common
@@ -292,112 +292,71 @@
 # ================
 
 # Determine OS Vendor, Release and Update
-# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
-# Returns results in global variables:
+
+#
+# NOTE : For portability, you almost certainly do not want to use
+# these variables directly!  The "is_*" functions defined below this
+# bundle up compatible platforms under larger umbrellas that we have
+# determinted are compatible enough (e.g. is_ubuntu covers Ubuntu &
+# Debian, is_fedora covers RPM-based distros).  Higher-level functions
+# such as "install_package" further abstract things in better ways.
+#
 # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
 # ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora)
-# ``os_UPDATE`` - update: ex. the ``5`` in ``RHEL6.5``
 # ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
-# ``os_CODENAME`` - vendor's codename for release: ``snow leopard``, ``trusty``
-os_VENDOR=""
-os_RELEASE=""
-os_UPDATE=""
-os_PACKAGE=""
-os_CODENAME=""
+# ``os_CODENAME`` - vendor's codename for release: ``trusty``
+
+declare os_VENDOR os_RELEASE os_PACKAGE os_CODENAME
+
+# Make a *best effort* attempt to install lsb_release packages for the
+# user if not available.  Note can't use generic install_package*
+# because they depend on this!
+function _ensure_lsb_release {
+    if [[ -x $(which lsb_release 2>/dev/null) ]]; then
+        return
+    fi
+
+    if [[ -x $(which apt-get 2>/dev/null) ]]; then
+        sudo apt-get install -y lsb-release
+    elif [[ -x $(which zypper 2>/dev/null) ]]; then
+        # XXX: old code paths seem to have assumed SUSE platforms also
+        # had "yum".  Keep this ordered above yum so we don't try to
+        # install the rh package.  suse calls it just "lsb"
+        sudo zypper -y install lsb
+    elif [[ -x $(which dnf 2>/dev/null) ]]; then
+        sudo dnf install -y redhat-lsb-core
+    elif [[ -x $(which yum 2>/dev/null) ]]; then
+        # all rh patforms (fedora, centos, rhel) have this pkg
+        sudo yum install -y redhat-lsb-core
+    else
+        die $LINENO "Unable to find or auto-install lsb_release"
+    fi
+}
 
 # GetOSVersion
+#  Set the following variables:
+#  - os_RELEASE
+#  - os_CODENAME
+#  - os_VENDOR
+#  - os_PACKAGE
 function GetOSVersion {
+    # We only support distros that provide a sane lsb_release
+    _ensure_lsb_release
 
-    # Figure out which vendor we are
-    if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
-        # OS/X
-        os_VENDOR=`sw_vers -productName`
-        os_RELEASE=`sw_vers -productVersion`
-        os_UPDATE=${os_RELEASE##*.}
-        os_RELEASE=${os_RELEASE%.*}
-        os_PACKAGE=""
-        if [[ "$os_RELEASE" =~ "10.7" ]]; then
-            os_CODENAME="lion"
-        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
-            os_CODENAME="snow leopard"
-        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
-            os_CODENAME="leopard"
-        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
-            os_CODENAME="tiger"
-        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
-            os_CODENAME="panther"
-        else
-            os_CODENAME=""
-        fi
-    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
-        os_VENDOR=$(lsb_release -i -s)
-        os_RELEASE=$(lsb_release -r -s)
-        os_UPDATE=""
-        os_PACKAGE="rpm"
-        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
-            os_PACKAGE="deb"
-        elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
-            lsb_release -d -s | grep -q openSUSE
-            if [[ $? -eq 0 ]]; then
-                os_VENDOR="openSUSE"
-            fi
-        elif [[ $os_VENDOR == "openSUSE project" ]]; then
-            os_VENDOR="openSUSE"
-        elif [[ $os_VENDOR =~ Red.*Hat ]]; then
-            os_VENDOR="Red Hat"
-        fi
-        os_CODENAME=$(lsb_release -c -s)
-    elif [[ -r /etc/redhat-release ]]; then
-        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
-        # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
-        # CentOS release 5.5 (Final)
-        # CentOS Linux release 6.0 (Final)
-        # Fedora release 16 (Verne)
-        # XenServer release 6.2.0-70446c (xenenterprise)
-        # Oracle Linux release 7
-        # CloudLinux release 7.1
-        os_CODENAME=""
-        for r in "Red Hat" CentOS Fedora XenServer CloudLinux; do
-            os_VENDOR=$r
-            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
-                ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
-                os_CODENAME=${ver#*|}
-                os_RELEASE=${ver%|*}
-                os_UPDATE=${os_RELEASE##*.}
-                os_RELEASE=${os_RELEASE%.*}
-                break
-            fi
-            os_VENDOR=""
-        done
-        if [ "$os_VENDOR" = "Red Hat" ] && [[ -r /etc/oracle-release ]]; then
-            os_VENDOR=OracleLinux
-        fi
-        os_PACKAGE="rpm"
-    elif [[ -r /etc/SuSE-release ]]; then
-        for r in openSUSE "SUSE Linux"; do
-            if [[ "$r" = "SUSE Linux" ]]; then
-                os_VENDOR="SUSE LINUX"
-            else
-                os_VENDOR=$r
-            fi
+    os_RELEASE=$(lsb_release -r -s)
+    os_CODENAME=$(lsb_release -c -s)
+    os_VENDOR=$(lsb_release -i -s)
 
-            if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
-                os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
-                break
-            fi
-            os_VENDOR=""
-        done
-        os_PACKAGE="rpm"
-    # If lsb_release is not installed, we should be able to detect Debian OS
-    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
-        os_VENDOR="Debian"
+    if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
         os_PACKAGE="deb"
-        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
-        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
+    else
+        os_PACKAGE="rpm"
     fi
-    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
+
+    typeset -xr os_VENDOR
+    typeset -xr os_RELEASE
+    typeset -xr os_PACKAGE
+    typeset -xr os_CODENAME
 }
 
 # Translate the OS version values into common nomenclature
@@ -406,8 +365,10 @@
 
 function GetDistro {
     GetOSVersion
-    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
-        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
+    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \
+            "$os_VENDOR" =~ (LinuxMint) ]]; then
+        # 'Everyone' refers to Ubuntu / Debian / Mint releases by
+        # the code name adjective
         DISTRO=$os_CODENAME
     elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
         # For Fedora, just use 'f' and the release
@@ -415,26 +376,22 @@
     elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
         DISTRO="opensuse-$os_RELEASE"
     elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
-        # For SLE, also use the service pack
-        if [[ -z "$os_UPDATE" ]]; then
-            DISTRO="sle${os_RELEASE}"
-        else
-            DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
-        fi
-    elif [[ "$os_VENDOR" =~ (Red Hat) || \
+        # just use major release
+        DISTRO="sle${os_RELEASE%.*}"
+    elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
         "$os_VENDOR" =~ (CentOS) || \
         "$os_VENDOR" =~ (OracleLinux) ]]; then
         # Drop the . release as we assume it's compatible
+        # XXX re-evaluate when we get RHEL10
         DISTRO="rhel${os_RELEASE::1}"
     elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
-        DISTRO="xs$os_RELEASE"
+        DISTRO="xs${os_RELEASE%.*}"
     elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then
         DISTRO="${os_VENDOR}${os_RELEASE::1}"
     else
-        # Catch-all for now is Vendor + Release + Update
-        DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
+        die $LINENO "Unable to determine DISTRO"
     fi
-    export DISTRO
+    typeset -xr DISTRO
 }
 
 # Utility function for checking machine architecture
diff --git a/lib/cinder b/lib/cinder
index 3aea050..e1e1f2a 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -369,20 +369,21 @@
 
         create_service_user "cinder"
 
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        get_or_create_service "cinder" "volume" "Cinder Volume Service"
+        get_or_create_endpoint \
+            "volume" \
+            "$REGION_NAME" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
 
-            get_or_create_service "cinder" "volume" "Cinder Volume Service"
-            get_or_create_endpoint "volume" "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
-
-            get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
-            get_or_create_endpoint "volumev2" "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
-        fi
+        get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
+        get_or_create_endpoint \
+            "volumev2" \
+            "$REGION_NAME" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
+            "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
     fi
 }
 
diff --git a/lib/glance b/lib/glance
index fa93e6e..c248611 100644
--- a/lib/glance
+++ b/lib/glance
@@ -244,15 +244,13 @@
             get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
 
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            get_or_create_service "glance" "image" "Glance Image Service"
-            get_or_create_endpoint "image" \
-                "$REGION_NAME" \
-                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
-                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
-                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
-        fi
+        get_or_create_service "glance" "image" "Glance Image Service"
+        get_or_create_endpoint \
+            "image" \
+            "$REGION_NAME" \
+            "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+            "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+            "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
     fi
 }
 
diff --git a/lib/heat b/lib/heat
index df44b76..1bb753d 100644
--- a/lib/heat
+++ b/lib/heat
@@ -387,23 +387,21 @@
     if [[ "$HEAT_STANDALONE" != "True" ]]; then
 
         create_service_user "heat" "admin"
+        get_or_create_service "heat" "orchestration" "Heat Orchestration Service"
+        get_or_create_endpoint \
+            "orchestration" \
+            "$REGION_NAME" \
+            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
 
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            get_or_create_service "heat" "orchestration" "Heat Orchestration Service"
-            get_or_create_endpoint "orchestration" \
-                "$REGION_NAME" \
-                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
-
-            get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service"
-            get_or_create_endpoint "cloudformation"  \
-                "$REGION_NAME" \
-                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
-        fi
+        get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service"
+        get_or_create_endpoint \
+            "cloudformation"  \
+            "$REGION_NAME" \
+            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
 
         # heat_stack_user role is for users created by Heat
         get_or_create_role "heat_stack_user"
diff --git a/lib/horizon b/lib/horizon
index dca3111..abc1f6d 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -89,7 +89,6 @@
     cp $HORIZON_SETTINGS $local_settings
 
     _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\"
-    _horizon_config_set $local_settings "" CUSTOM_THEME_PATH \"themes/webroot\"
 
     _horizon_config_set $local_settings "" COMPRESS_OFFLINE True
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
diff --git a/lib/keystone b/lib/keystone
index 38a0acf..3c67693 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -52,13 +52,12 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 
+# NOTE(sdague): remove in Newton
+KEYSTONE_CATALOG_BACKEND="sql"
+
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
 
-# Select the Catalog backend driver
-KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
-KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
-
 # Select the token persistence backend driver
 KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql}
 
@@ -254,39 +253,6 @@
 
     iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND"
 
-    iniset $KEYSTONE_CONF catalog driver "$KEYSTONE_CATALOG_BACKEND"
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-        # Configure ``keystone.conf`` to use sql
-        inicomment $KEYSTONE_CONF catalog template_file
-    else
-        deprecated "Using templated service catalog in devstack is deprecated"
-        cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
-
-        # Add swift endpoints to service catalog if swift is enabled
-        if is_service_enabled s-proxy; then
-            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
-        fi
-
-        # Add neutron endpoints to service catalog if neutron is enabled
-        if is_service_enabled neutron; then
-            echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.network.name = Neutron Service" >> $KEYSTONE_CATALOG
-        fi
-
-        sed -e "
-            s,%SERVICE_HOST%,$SERVICE_HOST,g;
-            s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
-        " -i $KEYSTONE_CATALOG
-
-        # Configure ``keystone.conf`` to use templates
-        iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
-    fi
-
     # Set up logging
     if [ "$SYSLOG" != "False" ]; then
         iniset $KEYSTONE_CONF DEFAULT use_syslog "True"
@@ -588,7 +554,6 @@
 # - ``KEYSTONE_BIN_DIR``
 # - ``ADMIN_PASSWORD``
 # - ``IDENTITY_API_VERSION``
-# - ``KEYSTONE_CATALOG_BACKEND``
 # - ``KEYSTONE_AUTH_URI``
 # - ``REGION_NAME``
 # - ``KEYSTONE_SERVICE_PROTOCOL``
@@ -616,19 +581,17 @@
         --os-identity-api-version 3 --os-auth-url $KEYSTONE_AUTH_URI \
         --os-password $ADMIN_PASSWORD)
 
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+    export OS_TOKEN=$token_id
+    export OS_URL=$KEYSTONE_AUTH_URI/v3
+    export OS_IDENTITY_API_VERSION=3
 
-        export OS_TOKEN=$token_id
-        export OS_URL=$KEYSTONE_AUTH_URI/v3
-        export OS_IDENTITY_API_VERSION=3
-
-        get_or_create_service "keystone" "identity" "Keystone Identity Service"
-        get_or_create_endpoint "identity" \
-            "$REGION_NAME" \
-            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
-            "$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION" \
-            "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
-    fi
+    get_or_create_service "keystone" "identity" "Keystone Identity Service"
+    get_or_create_endpoint \
+        "identity" \
+        "$REGION_NAME" \
+        "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
+        "$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION" \
+        "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
 
     unset OS_TOKEN OS_URL OS_IDENTITY_API_VERSION
 }
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 73a1bcd..d0523f4 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -525,15 +525,13 @@
 
         create_service_user "neutron"
 
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            get_or_create_service "neutron" "network" "Neutron Service"
-            get_or_create_endpoint "network" \
-                "$REGION_NAME" \
-                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
-                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
-                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
-        fi
+        get_or_create_service "neutron" "network" "Neutron Service"
+        get_or_create_endpoint \
+            "network" \
+            "$REGION_NAME" \
+            "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+            "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+            "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
     fi
 }
 
@@ -623,6 +621,13 @@
 
 # install_neutron() - Collect source and prepare
 function install_neutron {
+    # Install neutron-lib from git so we make sure we're testing
+    # the latest code.
+    if use_library_from_git "neutron-lib"; then
+        git_clone_by_name "neutron-lib"
+        setup_dev_lib "neutron-lib"
+    fi
+
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
     if is_service_enabled q-fwaas; then
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
deleted file mode 100644
index ca0b70c..0000000
--- a/lib/neutron_plugins/midonet
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# REVISIT(devvesa): This file is needed so Q_PLUGIN=midonet will work.
-
-# FIXME(yamamoto): This function should not be here, but unfortunately
-# devstack calls it before the external plugins are fetched
-function has_neutron_plugin_security_group {
-    # 0 means True here
-    return 0
-}
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index b1acacd..94a2689 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -37,6 +37,7 @@
 }
 
 function neutron_plugin_configure_plugin_agent {
+    local mappings_array mapping phys_bridge
     # Setup integration bridge
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
     _neutron_ovs_base_configure_firewall_driver
@@ -58,9 +59,15 @@
     # complex physical network configurations.
     if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
         OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+    fi
 
-        # Configure bridge manually with physical interface as port for multi-node
-        _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
+    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+        IFS=',' read -a mappings_array <<< "$OVS_BRIDGE_MAPPINGS"
+        for mapping in "${mappings_array[@]}"; do
+            phys_bridge=`echo $mapping | cut -f 2 -d ":"`
+            # Configure bridge manually with physical interface as port for multi-node
+            _neutron_ovs_base_add_bridge $phys_bridge
+        done
     fi
     if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 2b7f32d..40968fa 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -7,7 +7,8 @@
 _XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace)
 set +o xtrace
 
-FWAAS_PLUGIN=neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin
+FWAAS_PLUGIN=${FWAAS_PLUGIN:-neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin}
+FWAAS_DRIVER=${FWAAS_DRIVER:-neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver}
 
 function neutron_fwaas_configure_common {
     _neutron_service_plugin_class_add $FWAAS_PLUGIN
@@ -21,7 +22,7 @@
     cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME
 
     iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True
-    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
+    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "$FWAAS_DRIVER"
 }
 
 function neutron_fwaas_stop {
diff --git a/lib/nova b/lib/nova
index dc425a1..cce538d 100644
--- a/lib/nova
+++ b/lib/nova
@@ -96,7 +96,7 @@
 
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
-SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
+SCHEDULER=${SCHEDULER:-filter_scheduler}
 
 # The following FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
@@ -415,29 +415,28 @@
         # this service user when notifying nova of changes and that requires the admin role.
         create_service_user "nova" "admin"
 
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            local nova_api_url
-            if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
-                nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
-            else
-                nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
-            fi
-
-            get_or_create_service "nova_legacy" "compute_legacy" \
-                "Nova Compute Service (Legacy 2.0)"
-            get_or_create_endpoint "compute_legacy" \
-                "$REGION_NAME" \
-                "$nova_api_url/v2/\$(tenant_id)s" \
-                "$nova_api_url/v2/\$(tenant_id)s" \
-                "$nova_api_url/v2/\$(tenant_id)s"
-
-            get_or_create_service "nova" "compute" "Nova Compute Service"
-            get_or_create_endpoint "compute" \
-                "$REGION_NAME" \
-                "$nova_api_url/v2.1/\$(tenant_id)s" \
-                "$nova_api_url/v2.1/\$(tenant_id)s" \
-                "$nova_api_url/v2.1/\$(tenant_id)s"
+        local nova_api_url
+        if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
+            nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
+        else
+            nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
         fi
+
+        get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)"
+        get_or_create_endpoint \
+            "compute_legacy" \
+            "$REGION_NAME" \
+            "$nova_api_url/v2/\$(tenant_id)s" \
+            "$nova_api_url/v2/\$(tenant_id)s" \
+            "$nova_api_url/v2/\$(tenant_id)s"
+
+        get_or_create_service "nova" "compute" "Nova Compute Service"
+        get_or_create_endpoint \
+            "compute" \
+            "$REGION_NAME" \
+            "$nova_api_url/v2.1/\$(tenant_id)s" \
+            "$nova_api_url/v2.1/\$(tenant_id)s" \
+            "$nova_api_url/v2.1/\$(tenant_id)s"
     fi
 
     if is_service_enabled n-api; then
@@ -451,15 +450,13 @@
 
     # S3
     if is_service_enabled swift3; then
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            get_or_create_service "s3" "s3" "S3"
-            get_or_create_endpoint "s3" \
-                "$REGION_NAME" \
-                "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-                "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-                "http://$SERVICE_HOST:$S3_SERVICE_PORT"
-        fi
+        get_or_create_service "s3" "s3" "S3"
+        get_or_create_endpoint \
+            "s3" \
+            "$REGION_NAME" \
+            "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            "http://$SERVICE_HOST:$S3_SERVICE_PORT"
     fi
 }
 
@@ -689,8 +686,6 @@
     iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
     iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
-    # force down dhcp leases to 5 minutes, which lets us expire faster
-    iniset $NOVA_CONF DEFAULT dhcp_lease_time 300
     if [ -n "$FLAT_INTERFACE" ]; then
         iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
     fi
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c6ed85d..43e7797 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -41,7 +41,7 @@
 
     iniset $NOVA_CONF DEFAULT compute_driver nova.virt.ironic.IronicDriver
     iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-    iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.ironic_host_manager.IronicHostManager
+    iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
     iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
     iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
diff --git a/lib/swift b/lib/swift
index 9edeb0a..947d2ab 100644
--- a/lib/swift
+++ b/lib/swift
@@ -612,8 +612,6 @@
     export swiftusertest3_password=testing3
     export swiftusertest4_password=testing4
 
-    KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
-
     local another_role
     another_role=$(get_or_create_role "anotherrole")
 
@@ -621,15 +619,13 @@
     # temp urls, which break when uploaded by a non-admin role
     create_service_user "swift" "admin"
 
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-        get_or_create_service "swift" "object-store" "Swift Service"
-        get_or_create_endpoint "object-store" \
-            "$REGION_NAME" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s"
-    fi
+    get_or_create_service "swift" "object-store" "Swift Service"
+    get_or_create_endpoint \
+        "object-store" \
+        "$REGION_NAME" \
+        "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" \
+        "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \
+        "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s"
 
     local swift_tenant_test1
     swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
diff --git a/stack.sh b/stack.sh
index c21ff77..6dddea4 100755
--- a/stack.sh
+++ b/stack.sh
@@ -138,7 +138,7 @@
 source $TOP_DIR/lib/stack
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
 # and ``DISTRO``
 GetDistro
 
diff --git a/stackrc b/stackrc
index 887d4b3..7294f7b 100644
--- a/stackrc
+++ b/stackrc
@@ -15,7 +15,7 @@
 export LC_ALL
 
 # Make tracing more educational
-export PS4='+ ${BASH_SOURCE}:${FUNCNAME[0]}:L${LINENO}:   '
+export PS4='+ ${BASH_SOURCE:-}:${FUNCNAME[0]:-}:L${LINENO:-}:   '
 
 # Find the other rc files
 RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
@@ -483,6 +483,15 @@
 # this doesn't exist in a lib file, so set it here
 GITDIR["ironic-lib"]=$DEST/ironic-lib
 
+# diskimage-builder tool
+GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
+GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-master}
+GITDIR["diskimage-builder"]=$DEST/diskimage-builder
+
+# neutron-lib library containing neutron stable non-REST interfaces
+GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git}
+GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master}
+GITDIR["neutron-lib"]=$DEST/neutron-lib
 
 ##################
 #
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 326241d..ee11fd2 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -41,7 +41,8 @@
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports osprofiler"
-ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep"
+ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
+ALL_LIBS+=" diskimage-builder"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index b49164b..c0b7ac7 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -24,7 +24,7 @@
 source $TOP_DIR/functions
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
 # and ``DISTRO``
 GetDistro
 
diff --git a/tools/info.sh b/tools/info.sh
index 433206e..c056fa7 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -52,10 +52,6 @@
 echo "os|distro=$DISTRO"
 echo "os|vendor=$os_VENDOR"
 echo "os|release=$os_RELEASE"
-if [ -n "$os_UPDATE" ]; then
-    echo "os|version=$os_UPDATE"
-fi
-
 
 # Repos
 # -----
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 031f8a8..8895e1e 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -28,7 +28,7 @@
     source $TOP_DIR/functions
 
     # Determine what system we are running on.  This provides ``os_VENDOR``,
-    # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+    # ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
     # and ``DISTRO``
     GetDistro
 
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 9d2b082..d129374 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -56,6 +56,7 @@
     print
     try:
         subprocess.check_call(cmd, shell=True)
+        print
     except subprocess.CalledProcessError:
         print "*** Failed to run: %s" % cmd
 
@@ -100,14 +101,38 @@
         _dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table)
 
 
+def _netns_list():
+    process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE)
+    stdout, _ = process.communicate()
+    return stdout.split()
+
+
 def network_dump():
     _header("Network Dump")
 
     _dump_cmd("brctl show")
     _dump_cmd("arp -n")
-    _dump_cmd("ip addr")
-    _dump_cmd("ip link")
-    _dump_cmd("ip route")
+    ip_cmds = ["addr", "link", "route"]
+    for cmd in ip_cmds + ['netns']:
+        _dump_cmd("ip %s" % cmd)
+    for netns_ in _netns_list():
+        for cmd in ip_cmds:
+            args = {'netns': netns_, 'cmd': cmd}
+            _dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
+
+
+def ovs_dump():
+    _header("Open vSwitch Dump")
+
+    # NOTE(ihrachys): worlddump is used outside of devstack context (f.e. in
+    # grenade), so there is no single place to determine the bridge names from.
+    # Hardcode for now.
+    bridges = ('br-int', 'br-tun', 'br-ex')
+    _dump_cmd("sudo ovs-vsctl show")
+    for bridge in bridges:
+        _dump_cmd("sudo ovs-ofctl show %s" % bridge)
+    for bridge in bridges:
+        _dump_cmd("sudo ovs-ofctl dump-flows %s" % bridge)
 
 
 def process_list():
@@ -147,6 +172,7 @@
         disk_space()
         process_list()
         network_dump()
+        ovs_dump()
         iptables_dump()
         ebtables_dump()
         compute_consoles()
diff --git a/unstack.sh b/unstack.sh
index 47beb04..d69e3f5 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -84,7 +84,7 @@
 load_plugin_settings
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
 GetOSVersion
 
 set -o xtrace