Merge "add heat to the default devstack service list"
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index edcc6d4..99b2c8e 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -24,6 +24,7 @@
 source $TOP_DIR/functions
 source $TOP_DIR/stackrc
 source $TOP_DIR/openrc
+source $TOP_DIR/lib/infra
 source $TOP_DIR/lib/tempest
 source $TOP_DIR/lib/cinder
 
@@ -89,9 +90,8 @@
 sleep 5
 
 # run tempest api/volume/test_*
-log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True
-exec 2> >(tee -a $TEMPFILE)
-`./tools/pretty_tox.sh api.volume`
+log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True
+./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE
 if [[ $? = 0 ]]; then
     log_message "CONGRATULATIONS!!!  Device driver PASSED!", True
     log_message "Submit output: ($TEMPFILE)"
diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh
index f68a146..9e61dc5 100644
--- a/extras.d/50-ironic.sh
+++ b/extras.d/50-ironic.sh
@@ -28,6 +28,9 @@
 
     if [[ "$1" == "unstack" ]]; then
         stop_ironic
+    fi
+
+    if [[ "$1" == "clean" ]]; then
         cleanup_ironic
     fi
 fi
diff --git a/functions b/functions
index dc3278b..5eae7fe 100644
--- a/functions
+++ b/functions
@@ -1,563 +1,21 @@
-# functions - Common functions used by DevStack components
+# functions - DevStack-specific functions
 #
 # The following variables are assumed to be defined by certain functions:
 #
 # - ``ENABLED_SERVICES``
-# - ``ERROR_ON_CLONE``
 # - ``FILES``
 # - ``GLANCE_HOSTPORT``
-# - ``OFFLINE``
-# - ``PIP_DOWNLOAD_CACHE``
-# - ``PIP_USE_MIRRORS``
-# - ``RECLONE``
 # - ``TRACK_DEPENDS``
-# - ``http_proxy``, ``https_proxy``, ``no_proxy``
 
+# Include the common functions
+FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
+source ${FUNC_DIR}/functions-common
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
-# Convert CIDR notation to a IPv4 netmask
-# cidr2netmask cidr-bits
-function cidr2netmask() {
-    local maskpat="255 255 255 255"
-    local maskdgt="254 252 248 240 224 192 128"
-    set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
-    echo ${1-0}.${2-0}.${3-0}.${4-0}
-}
-
-
-# Return the network portion of the given IP address using netmask
-# netmask is in the traditional dotted-quad format
-# maskip ip-address netmask
-function maskip() {
-    local ip=$1
-    local mask=$2
-    local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
-    local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
-    echo $subnet
-}
-
-
-# Exit 0 if address is in network or 1 if address is not in network
-# ip-range is in CIDR notation: 1.2.3.4/20
-# address_in_net ip-address ip-range
-function address_in_net() {
-    local ip=$1
-    local range=$2
-    local masklen=${range#*/}
-    local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
-    local subnet=$(maskip $ip $(cidr2netmask $masklen))
-    [[ $network == $subnet ]]
-}
-
-
-# Wrapper for ``apt-get`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``
-# apt_get operation package [package ...]
-function apt_get() {
-    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo DEBIAN_FRONTEND=noninteractive \
-        http_proxy=$http_proxy https_proxy=$https_proxy \
-        no_proxy=$no_proxy \
-        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
-}
-
-
-# Gracefully cp only if source file/dir exists
-# cp_it source destination
-function cp_it {
-    if [ -e $1 ] || [ -d $1 ]; then
-        cp -pRL $1 $2
-    fi
-}
-
-
-# Prints backtrace info
-# filename:lineno:function
-function backtrace {
-    local level=$1
-    local deep=$((${#BASH_SOURCE[@]} - 1))
-    echo "[Call Trace]"
-    while [ $level -le $deep ]; do
-        echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
-        deep=$((deep - 1))
-    done
-}
-
-
-# Prints line number and "message" then exits
-# die $LINENO "message"
-function die() {
-    local exitcode=$?
-    set +o xtrace
-    local line=$1; shift
-    if [ $exitcode == 0 ]; then
-        exitcode=1
-    fi
-    backtrace 2
-    err $line "$*"
-    exit $exitcode
-}
-
-
-# Checks an environment variable is not set or has length 0 OR if the
-# exit code is non-zero and prints "message" and exits
-# NOTE: env-var is the variable name without a '$'
-# die_if_not_set $LINENO env-var "message"
-function die_if_not_set() {
-    local exitcode=$?
-    FXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local line=$1; shift
-    local evar=$1; shift
-    if ! is_set $evar || [ $exitcode != 0 ]; then
-        die $line "$*"
-    fi
-    $FXTRACE
-}
-
-
-# Prints line number and "message" in error format
-# err $LINENO "message"
-function err() {
-    local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        echo $msg >> "${SCREEN_LOGDIR}/error.log"
-    fi
-    $errXTRACE
-    return $exitcode
-}
-
-
-# Checks an environment variable is not set or has length 0 OR if the
-# exit code is non-zero and prints "message"
-# NOTE: env-var is the variable name without a '$'
-# err_if_not_set $LINENO env-var "message"
-function err_if_not_set() {
-    local exitcode=$?
-    errinsXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local line=$1; shift
-    local evar=$1; shift
-    if ! is_set $evar || [ $exitcode != 0 ]; then
-        err $line "$*"
-    fi
-    $errinsXTRACE
-    return $exitcode
-}
-
-
-# Prints line number and "message" in warning format
-# warn $LINENO "message"
-function warn() {
-    local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        echo $msg >> "${SCREEN_LOGDIR}/error.log"
-    fi
-    $errXTRACE
-    return $exitcode
-}
-
-
-# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
-# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
-# ``localrc`` or on the command line if necessary::
-#
-# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
-#
-#     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
-
-function export_proxy_variables() {
-    if [[ -n "$http_proxy" ]]; then
-        export http_proxy=$http_proxy
-    fi
-    if [[ -n "$https_proxy" ]]; then
-        export https_proxy=$https_proxy
-    fi
-    if [[ -n "$no_proxy" ]]; then
-        export no_proxy=$no_proxy
-    fi
-}
-
-
-# Grab a numbered field from python prettytable output
-# Fields are numbered starting with 1
-# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
-# get_field field-number
-function get_field() {
-    while read data; do
-        if [ "$1" -lt 0 ]; then
-            field="(\$(NF$1))"
-        else
-            field="\$$(($1 + 1))"
-        fi
-        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
-    done
-}
-
-
-# Get the default value for HOST_IP
-# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
-function get_default_host_ip() {
-    local fixed_range=$1
-    local floating_range=$2
-    local host_ip_iface=$3
-    local host_ip=$4
-
-    # Find the interface used for the default route
-    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
-    # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
-    if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
-        host_ip=""
-        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
-        for IP in $host_ips; do
-            # Attempt to filter out IP addresses that are part of the fixed and
-            # floating range. Note that this method only works if the ``netaddr``
-            # python library is installed. If it is not installed, an error
-            # will be printed and the first IP from the interface will be used.
-            # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
-            # address.
-            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
-                host_ip=$IP
-                break;
-            fi
-        done
-    fi
-    echo $host_ip
-}
-
-
-function _get_package_dir() {
-    local pkg_dir
-    if is_ubuntu; then
-        pkg_dir=$FILES/apts
-    elif is_fedora; then
-        pkg_dir=$FILES/rpms
-    elif is_suse; then
-        pkg_dir=$FILES/rpms-suse
-    else
-        exit_distro_not_supported "list of packages"
-    fi
-    echo "$pkg_dir"
-}
-
-
-# get_packages() collects a list of package names of any type from the
-# prerequisite files in ``files/{apts|rpms}``.  The list is intended
-# to be passed to a package installer such as apt or yum.
-#
-# Only packages required for the services in 1st argument will be
-# included.  Two bits of metadata are recognized in the prerequisite files:
-#
-# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
-# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
-#   of the package to the distros listed.  The distro names are case insensitive.
-function get_packages() {
-    local services=$@
-    local package_dir=$(_get_package_dir)
-    local file_to_parse
-    local service
-
-    if [[ -z "$package_dir" ]]; then
-        echo "No package directory supplied"
-        return 1
-    fi
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-    for service in ${services//,/ }; do
-        # Allow individual services to specify dependencies
-        if [[ -e ${package_dir}/${service} ]]; then
-            file_to_parse="${file_to_parse} $service"
-        fi
-        # NOTE(sdague) n-api needs glance for now because that's where
-        # glance client is
-        if [[ $service == n-api ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == c-* ]]; then
-            if [[ ! $file_to_parse =~ cinder ]]; then
-                file_to_parse="${file_to_parse} cinder"
-            fi
-        elif [[ $service == ceilometer-* ]]; then
-            if [[ ! $file_to_parse =~ ceilometer ]]; then
-                file_to_parse="${file_to_parse} ceilometer"
-            fi
-        elif [[ $service == s-* ]]; then
-            if [[ ! $file_to_parse =~ swift ]]; then
-                file_to_parse="${file_to_parse} swift"
-            fi
-        elif [[ $service == n-* ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-        elif [[ $service == g-* ]]; then
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == key* ]]; then
-            if [[ ! $file_to_parse =~ keystone ]]; then
-                file_to_parse="${file_to_parse} keystone"
-            fi
-        elif [[ $service == q-* ]]; then
-            if [[ ! $file_to_parse =~ neutron ]]; then
-                file_to_parse="${file_to_parse} neutron"
-            fi
-        fi
-    done
-
-    for file in ${file_to_parse}; do
-        local fname=${package_dir}/${file}
-        local OIFS line package distros distro
-        [[ -e $fname ]] || continue
-
-        OIFS=$IFS
-        IFS=$'\n'
-        for line in $(<${fname}); do
-            if [[ $line =~ "NOPRIME" ]]; then
-                continue
-            fi
-
-            # Assume we want this package
-            package=${line%#*}
-            inst_pkg=1
-
-            # Look for # dist:xxx in comment
-            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
-                # We are using BASH regexp matching feature.
-                package=${BASH_REMATCH[1]}
-                distros=${BASH_REMATCH[2]}
-                # In bash ${VAR,,} will lowecase VAR
-                # Look for a match in the distro list
-                if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
-                    # If no match then skip this package
-                    inst_pkg=0
-                fi
-            fi
-
-            # Look for # testonly in comment
-            if [[ $line =~ (.*)#.*testonly.* ]]; then
-                package=${BASH_REMATCH[1]}
-                # Are we installing test packages? (test for the default value)
-                if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then
-                    # If not installing test packages the skip this package
-                    inst_pkg=0
-                fi
-            fi
-
-            if [[ $inst_pkg = 1 ]]; then
-                echo $package
-            fi
-        done
-        IFS=$OIFS
-    done
-}
-
-
-# Determine OS Vendor, Release and Update
-# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
-# Returns results in global variables:
-# os_VENDOR - vendor name
-# os_RELEASE - release
-# os_UPDATE - update
-# os_PACKAGE - package type
-# os_CODENAME - vendor's codename for release
-# GetOSVersion
-GetOSVersion() {
-    # Figure out which vendor we are
-    if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
-        # OS/X
-        os_VENDOR=`sw_vers -productName`
-        os_RELEASE=`sw_vers -productVersion`
-        os_UPDATE=${os_RELEASE##*.}
-        os_RELEASE=${os_RELEASE%.*}
-        os_PACKAGE=""
-        if [[ "$os_RELEASE" =~ "10.7" ]]; then
-            os_CODENAME="lion"
-        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
-            os_CODENAME="snow leopard"
-        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
-            os_CODENAME="leopard"
-        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
-            os_CODENAME="tiger"
-        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
-            os_CODENAME="panther"
-        else
-            os_CODENAME=""
-        fi
-    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
-        os_VENDOR=$(lsb_release -i -s)
-        os_RELEASE=$(lsb_release -r -s)
-        os_UPDATE=""
-        os_PACKAGE="rpm"
-        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
-            os_PACKAGE="deb"
-        elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
-            lsb_release -d -s | grep -q openSUSE
-            if [[ $? -eq 0 ]]; then
-                os_VENDOR="openSUSE"
-            fi
-        elif [[ $os_VENDOR == "openSUSE project" ]]; then
-            os_VENDOR="openSUSE"
-        elif [[ $os_VENDOR =~ Red.*Hat ]]; then
-            os_VENDOR="Red Hat"
-        fi
-        os_CODENAME=$(lsb_release -c -s)
-    elif [[ -r /etc/redhat-release ]]; then
-        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
-        # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
-        # CentOS release 5.5 (Final)
-        # CentOS Linux release 6.0 (Final)
-        # Fedora release 16 (Verne)
-        # XenServer release 6.2.0-70446c (xenenterprise)
-        os_CODENAME=""
-        for r in "Red Hat" CentOS Fedora XenServer; do
-            os_VENDOR=$r
-            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
-                ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
-                os_CODENAME=${ver#*|}
-                os_RELEASE=${ver%|*}
-                os_UPDATE=${os_RELEASE##*.}
-                os_RELEASE=${os_RELEASE%.*}
-                break
-            fi
-            os_VENDOR=""
-        done
-        os_PACKAGE="rpm"
-    elif [[ -r /etc/SuSE-release ]]; then
-        for r in openSUSE "SUSE Linux"; do
-            if [[ "$r" = "SUSE Linux" ]]; then
-                os_VENDOR="SUSE LINUX"
-            else
-                os_VENDOR=$r
-            fi
-
-            if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
-                os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
-                break
-            fi
-            os_VENDOR=""
-        done
-        os_PACKAGE="rpm"
-    # If lsb_release is not installed, we should be able to detect Debian OS
-    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
-        os_VENDOR="Debian"
-        os_PACKAGE="deb"
-        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
-        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
-    fi
-    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
-}
-
-
-# Translate the OS version values into common nomenclature
-# Sets ``DISTRO`` from the ``os_*`` values
-function GetDistro() {
-    GetOSVersion
-    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
-        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
-        DISTRO=$os_CODENAME
-    elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
-        # For Fedora, just use 'f' and the release
-        DISTRO="f$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
-        DISTRO="opensuse-$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
-        # For SLE, also use the service pack
-        if [[ -z "$os_UPDATE" ]]; then
-            DISTRO="sle${os_RELEASE}"
-        else
-            DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
-        fi
-    elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
-        # Drop the . release as we assume it's compatible
-        DISTRO="rhel${os_RELEASE::1}"
-    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
-        DISTRO="xs$os_RELEASE"
-    else
-        # Catch-all for now is Vendor + Release + Update
-        DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
-    fi
-    export DISTRO
-}
-
-
-# Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS, etc).
-# is_fedora
-function is_fedora {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ]
-}
-
-
-# Determine if current distribution is a SUSE-based distribution
-# (openSUSE, SLE).
-# is_suse
-function is_suse {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
-}
-
-
-# Determine if current distribution is an Ubuntu-based distribution
-# It will also detect non-Ubuntu but Debian-based distros
-# is_ubuntu
-function is_ubuntu {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    [ "$os_PACKAGE" = "deb" ]
-}
-
-
-# Exit after outputting a message about the distribution not being supported.
-# exit_distro_not_supported [optional-string-telling-what-is-missing]
-function exit_distro_not_supported {
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-
-    if [ $# -gt 0 ]; then
-        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
-    else
-        die $LINENO "Support for $DISTRO is incomplete."
-    fi
-}
-
-# Utility function for checking machine architecture
-# is_arch arch-type
-function is_arch {
-    ARCH_TYPE=$1
-
-    [[ "$(uname -m)" == "$ARCH_TYPE" ]]
-}
-
 # Checks if installed Apache is <= given version
 # $1 = x.y.z (version string of Apache)
 function check_apache_version {
@@ -570,488 +28,6 @@
     expr "$version" '>=' $1 > /dev/null
 }
 
-# git clone only if directory doesn't exist already.  Since ``DEST`` might not
-# be owned by the installation user, we create the directory and change the
-# ownership to the proper user.
-# Set global RECLONE=yes to simulate a clone when dest-dir exists
-# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
-# does not exist (default is False, meaning the repo will be cloned).
-# Uses global ``OFFLINE``
-# git_clone remote dest-dir branch
-function git_clone {
-    GIT_REMOTE=$1
-    GIT_DEST=$2
-    GIT_REF=$3
-    RECLONE=$(trueorfalse False $RECLONE)
-
-    if [[ "$OFFLINE" = "True" ]]; then
-        echo "Running in offline mode, clones already exist"
-        # print out the results so we know what change was used in the logs
-        cd $GIT_DEST
-        git show --oneline | head -1
-        return
-    fi
-
-    if echo $GIT_REF | egrep -q "^refs"; then
-        # If our branch name is a gerrit style refs/changes/...
-        if [[ ! -d $GIT_DEST ]]; then
-            [[ "$ERROR_ON_CLONE" = "True" ]] && \
-                die $LINENO "Cloning not allowed in this configuration"
-            git clone $GIT_REMOTE $GIT_DEST
-        fi
-        cd $GIT_DEST
-        git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
-    else
-        # do a full clone only if the directory doesn't exist
-        if [[ ! -d $GIT_DEST ]]; then
-            [[ "$ERROR_ON_CLONE" = "True" ]] && \
-                die $LINENO "Cloning not allowed in this configuration"
-            git clone $GIT_REMOTE $GIT_DEST
-            cd $GIT_DEST
-            # This checkout syntax works for both branches and tags
-            git checkout $GIT_REF
-        elif [[ "$RECLONE" = "True" ]]; then
-            # if it does exist then simulate what clone does if asked to RECLONE
-            cd $GIT_DEST
-            # set the url to pull from and fetch
-            git remote set-url origin $GIT_REMOTE
-            git fetch origin
-            # remove the existing ignored files (like pyc) as they cause breakage
-            # (due to the py files having older timestamps than our pyc, so python
-            # thinks the pyc files are correct using them)
-            find $GIT_DEST -name '*.pyc' -delete
-
-            # handle GIT_REF accordingly to type (tag, branch)
-            if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
-                git_update_tag $GIT_REF
-            elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
-                git_update_branch $GIT_REF
-            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
-                git_update_remote_branch $GIT_REF
-            else
-                die $LINENO "$GIT_REF is neither branch nor tag"
-            fi
-
-        fi
-    fi
-
-    # print out the results so we know what change was used in the logs
-    cd $GIT_DEST
-    git show --oneline | head -1
-}
-
-
-# git update using reference as a branch.
-# git_update_branch ref
-function git_update_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -f origin/$GIT_BRANCH
-    # a local branch might not exist
-    git branch -D $GIT_BRANCH || true
-    git checkout -b $GIT_BRANCH
-}
-
-
-# git update using reference as a branch.
-# git_update_remote_branch ref
-function git_update_remote_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
-}
-
-
-# git update using reference as a tag. Be careful editing source at that repo
-# as working copy will be in a detached mode
-# git_update_tag ref
-function git_update_tag() {
-
-    GIT_TAG=$1
-
-    git tag -d $GIT_TAG
-    # fetching given tag only
-    git fetch origin tag $GIT_TAG
-    git checkout -f $GIT_TAG
-}
-
-
-# Comment an option in an INI file
-# inicomment config-file section option
-function inicomment() {
-    local file=$1
-    local section=$2
-    local option=$3
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
-}
-
-
-# Uncomment an option in an INI file
-# iniuncomment config-file section option
-function iniuncomment() {
-    local file=$1
-    local section=$2
-    local option=$3
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
-}
-
-
-# Get an option from an INI file
-# iniget config-file section option
-function iniget() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    echo ${line#*=}
-}
-
-
-# Determinate is the given option present in the INI file
-# ini_has_option config-file section option
-function ini_has_option() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    [ -n "$line" ]
-}
-
-
-# Set an option in an INI file
-# iniset config-file section option value
-function iniset() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local value=$4
-
-    [[ -z $section || -z $option ]] && return
-
-    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    fi
-    if ! ini_has_option "$file" "$section" "$option"; then
-        # Add it
-        sed -i -e "/^\[$section\]/ a\\
-$option = $value
-" "$file"
-    else
-        local sep=$(echo -ne "\x01")
-        # Replace it
-        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
-    fi
-}
-
-
-# Get a multiple line option from an INI file
-# iniget_multiline config-file section option
-function iniget_multiline() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local values
-    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
-    echo ${values}
-}
-
-
-# Set a multiple line option in an INI file
-# iniset_multiline config-file section option value1 value2 valu3 ...
-function iniset_multiline() {
-    local file=$1
-    local section=$2
-    local option=$3
-    shift 3
-    local values
-    for v in $@; do
-        # The later sed command inserts each new value in the line next to
-        # the section identifier, which causes the values to be inserted in
-        # the reverse order. Do a reverse here to keep the original order.
-        values="$v ${values}"
-    done
-    if ! grep -q "^\[$section\]" "$file"; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    else
-        # Remove old values
-        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
-    fi
-    # Add new ones
-    for v in $values; do
-        sed -i -e "/^\[$section\]/ a\\
-$option = $v
-" "$file"
-    done
-}
-
-
-# Append a new option in an ini file without replacing the old value
-# iniadd config-file section option value1 value2 value3 ...
-function iniadd() {
-    local file=$1
-    local section=$2
-    local option=$3
-    shift 3
-    local values="$(iniget_multiline $file $section $option) $@"
-    iniset_multiline $file $section $option $values
-}
-
-# Find out if a process exists by partial name.
-# is_running name
-function is_running() {
-    local name=$1
-    ps auxw | grep -v grep | grep ${name} > /dev/null
-    RC=$?
-    # some times I really hate bash reverse binary logic
-    return $RC
-}
-
-
-# is_service_enabled() checks if the service(s) specified as arguments are
-# enabled by the user in ``ENABLED_SERVICES``.
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# There are special cases for some 'catch-all' services::
-#   **nova** returns true if any service enabled start with **n-**
-#   **cinder** returns true if any service enabled start with **c-**
-#   **ceilometer** returns true if any service enabled start with **ceilometer**
-#   **glance** returns true if any service enabled start with **g-**
-#   **neutron** returns true if any service enabled start with **q-**
-#   **swift** returns true if any service enabled start with **s-**
-#   **trove** returns true if any service enabled start with **tr-**
-#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
-#   **s-** services will be enabled. This will be deprecated in the future.
-#
-# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
-# We also need to make sure to treat **n-cell-region** and **n-cell-child**
-# as enabled in this case.
-#
-# Uses global ``ENABLED_SERVICES``
-# is_service_enabled service [service ...]
-function is_service_enabled() {
-    services=$@
-    for service in ${services}; do
-        [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
-
-        # Look for top-level 'enabled' function for this service
-        if type is_${service}_enabled >/dev/null 2>&1; then
-            # A function exists for this service, use it
-            is_${service}_enabled
-            return $?
-        fi
-
-        # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
-        #                are implemented
-        [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
-        [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
-        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
-        [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
-        [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
-        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
-        [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
-        [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
-        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
-        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
-    done
-    return 1
-}
-
-
-# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
-# _cleanup_service_list service-list
-function _cleanup_service_list () {
-    echo "$1" | sed -e '
-        s/,,/,/g;
-        s/^,//;
-        s/,$//
-    '
-}
-
-
-# enable_service() adds the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are not already present.
-#
-# For example:
-#   enable_service qpid
-#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
-# enable_service service [service ...]
-function enable_service() {
-    local tmpsvcs="${ENABLED_SERVICES}"
-    for service in $@; do
-        if ! is_service_enabled $service; then
-            tmpsvcs+=",$service"
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-    disable_negated_services
-}
-
-
-# disable_service() removes the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are present.
-#
-# For example:
-#   disable_service rabbit
-#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
-# disable_service service [service ...]
-function disable_service() {
-    local tmpsvcs=",${ENABLED_SERVICES},"
-    local service
-    for service in $@; do
-        if is_service_enabled $service; then
-            tmpsvcs=${tmpsvcs//,$service,/,}
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-}
-
-
-# disable_all_services() removes all current services
-# from ``ENABLED_SERVICES`` to reset the configuration
-# before a minimal installation
-# Uses global ``ENABLED_SERVICES``
-# disable_all_services
-function disable_all_services() {
-    ENABLED_SERVICES=""
-}
-
-
-# Remove all services starting with '-'.  For example, to install all default
-# services except rabbit (rabbit) set in ``localrc``:
-# ENABLED_SERVICES+=",-rabbit"
-# Uses global ``ENABLED_SERVICES``
-# disable_negated_services
-function disable_negated_services() {
-    local tmpsvcs="${ENABLED_SERVICES}"
-    local service
-    for service in ${tmpsvcs//,/ }; do
-        if [[ ${service} == -* ]]; then
-            tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-}
-
-
-# Distro-agnostic package installer
-# install_package package [package ...]
-function install_package() {
-    if is_ubuntu; then
-        [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update
-        NO_UPDATE_REPOS=True
-
-        apt_get install "$@"
-    elif is_fedora; then
-        yum_install "$@"
-    elif is_suse; then
-        zypper_install "$@"
-    else
-        exit_distro_not_supported "installing packages"
-    fi
-}
-
-
-# Distro-agnostic package uninstaller
-# uninstall_package package [package ...]
-function uninstall_package() {
-    if is_ubuntu; then
-        apt_get purge "$@"
-    elif is_fedora; then
-        sudo yum remove -y "$@"
-    elif is_suse; then
-        sudo zypper rm "$@"
-    else
-        exit_distro_not_supported "uninstalling packages"
-    fi
-}
-
-
-# Distro-agnostic function to tell if a package is installed
-# is_package_installed package [package ...]
-function is_package_installed() {
-    if [[ -z "$@" ]]; then
-        return 1
-    fi
-
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        dpkg -s "$@" > /dev/null 2> /dev/null
-    elif [[ "$os_PACKAGE" = "rpm" ]]; then
-        rpm --quiet -q "$@"
-    else
-        exit_distro_not_supported "finding if a package is installed"
-    fi
-}
-
-
-# Test if the named environment variable is set and not zero length
-# is_set env-var
-function is_set() {
-    local var=\$"$1"
-    eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
-}
-
-
-# Wrapper for ``pip install`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
-# ``TRACK_DEPENDS``, ``*_proxy``
-# pip_install package [package ...]
-function pip_install {
-    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    if [[ $TRACK_DEPENDS = True ]]; then
-        source $DEST/.venv/bin/activate
-        CMD_PIP=$DEST/.venv/bin/pip
-        SUDO_PIP="env"
-    else
-        SUDO_PIP="sudo"
-        CMD_PIP=$(get_pip_command)
-    fi
-
-    # Mirror option not needed anymore because pypi has CDN available,
-    # but it's useful in certain circumstances
-    PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
-    if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
-        PIP_MIRROR_OPT="--use-mirrors"
-    fi
-
-    # pip < 1.4 has a bug where it will use an already existing build
-    # directory unconditionally.  Say an earlier component installs
-    # foo v1.1; pip will have built foo's source in
-    # /tmp/$USER-pip-build.  Even if a later component specifies foo <
-    # 1.1, the existing extracted build will be used and cause
-    # confusing errors.  By creating unique build directories we avoid
-    # this problem. See https://github.com/pypa/pip/issues/709
-    local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
-
-    $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
-        HTTP_PROXY=$http_proxy \
-        HTTPS_PROXY=$https_proxy \
-        NO_PROXY=$no_proxy \
-        $CMD_PIP install --build=${pip_build_tmp} \
-        $PIP_MIRROR_OPT $@ \
-        && $SUDO_PIP rm -rf ${pip_build_tmp}
-}
-
 
 # Cleanup anything from /tmp on unstack
 # clean_tmp
@@ -1062,243 +38,6 @@
     sudo rm -rf ${tmp_dir}/pip-build.*
 }
 
-# Service wrapper to restart services
-# restart_service service-name
-function restart_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 restart
-    else
-        sudo /sbin/service $1 restart
-    fi
-}
-
-
-# _run_process() is designed to be backgrounded by run_process() to simulate a
-# fork.  It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it().  The log filename is derived
-# from the service name and global-and-now-misnamed SCREEN_LOGDIR
-# _run_process service "command-line"
-function _run_process() {
-    local service=$1
-    local command="$2"
-
-    # Undo logging redirections and close the extra descriptors
-    exec 1>&3
-    exec 2>&3
-    exec 3>&-
-    exec 6>&-
-
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
-        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-
-        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
-        export PYTHONUNBUFFERED=1
-    fi
-
-    exec /bin/bash -c "$command"
-    die "$service exec failure: $command"
-}
-
-
-# run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command.  This is meant to duplicate the semantics
-# of screen_it() without screen.  PIDs are written to
-# $SERVICE_DIR/$SCREEN_NAME/$service.pid
-# run_process service "command-line"
-function run_process() {
-    local service=$1
-    local command="$2"
-
-    # Spawn the child process
-    _run_process "$service" "$command" &
-    echo $!
-}
-
-
-# Helper to launch a service in a named screen
-# screen_it service "command-line"
-function screen_it {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
-
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            screen -S $SCREEN_NAME -X screen -t $1
-
-            if [[ -n ${SCREEN_LOGDIR} ]]; then
-                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-                screen -S $SCREEN_NAME -p $1 -X log on
-                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-            fi
-
-            # sleep to allow bash to be ready to be send the command - we are
-            # creating a new window in screen and then sends characters, so if
-            # bash isn't running by the time we send the command, nothing happens
-            sleep 1.5
-
-            NL=`echo -ne '\015'`
-            # This fun command does the following:
-            # - the passed server command is backgrounded
-            # - the pid of the background process is saved in the usual place
-            # - the server process is brought back to the foreground
-            # - if the server process exits prematurely the fg command errors
-            #   and a message is written to stdout and the service failure file
-            # The pid saved can be used in screen_stop() as a process group
-            # id to kill off all child processes
-            screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
-        else
-            # Spawn directly without screen
-            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
-        fi
-    fi
-}
-
-
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# screen_stop service
-function screen_stop() {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
-
-    if is_service_enabled $1; then
-        # Kill via pid if we have one available
-        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
-            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
-            rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
-        fi
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            # Clean up the screen window
-            screen -S $SCREEN_NAME -p $1 -X kill
-        fi
-    fi
-}
-
-
-# Screen rc file builder
-# screen_rc service "command-line"
-function screen_rc {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
-    if [[ ! -e $SCREENRC ]]; then
-        # Name the screen session
-        echo "sessionname $SCREEN_NAME" > $SCREENRC
-        # Set a reasonable statusbar
-        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
-        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
-        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
-        echo "screen -t shell bash" >> $SCREENRC
-    fi
-    # If this service doesn't already exist in the screenrc file
-    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
-        NL=`echo -ne '\015'`
-        echo "screen -t $1 bash" >> $SCREENRC
-        echo "stuff \"$2$NL\"" >> $SCREENRC
-
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC
-            echo "log on" >>$SCREENRC
-        fi
-    fi
-}
-
-
-# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
-# This is used for ``service_check`` when all the ``screen_it`` are called finished
-# init_service_check
-function init_service_check() {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
-    fi
-
-    rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
-}
-
-
-# Helper to get the status of each running service
-# service_check
-function service_check() {
-    local service
-    local failures
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        echo "No service status directory found"
-        return
-    fi
-
-    # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
-    failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null`
-
-    for service in $failures; do
-        service=`basename $service`
-        service=${service%.failure}
-        echo "Error: Service $service is not running"
-    done
-
-    if [ -n "$failures" ]; then
-        echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
-    fi
-}
-
-# Returns true if the directory is on a filesystem mounted via NFS.
-function is_nfs_directory() {
-    local mount_type=`stat -f -L -c %T $1`
-    test "$mount_type" == "nfs"
-}
-
-# Only run the command if the target file (the last arg) is not on an
-# NFS filesystem.
-function _safe_permission_operation() {
-    local args=( $@ )
-    local last
-    local sudo_cmd
-    local dir_to_check
-
-    let last="${#args[*]} - 1"
-
-    dir_to_check=${args[$last]}
-    if [ ! -d "$dir_to_check" ]; then
-        dir_to_check=`dirname "$dir_to_check"`
-    fi
-
-    if is_nfs_directory "$dir_to_check" ; then
-        return 0
-    fi
-
-    if [[ $TRACK_DEPENDS = True ]]; then
-        sudo_cmd="env"
-    else
-        sudo_cmd="sudo"
-    fi
-
-    $sudo_cmd $@
-}
-
-# Only change ownership of a file or directory if it is not on an NFS
-# filesystem.
-function safe_chown() {
-    _safe_permission_operation chown $@
-}
-
-# Only change permissions of a file or directory if it is not on an
-# NFS filesystem.
-function safe_chmod() {
-    _safe_permission_operation chmod $@
-}
 
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
@@ -1340,6 +79,7 @@
     fi
 }
 
+
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
 # Uses globals ``STACK_USER``
@@ -1353,43 +93,6 @@
 }
 
 
-# Service wrapper to start services
-# start_service service-name
-function start_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 start
-    else
-        sudo /sbin/service $1 start
-    fi
-}
-
-
-# Service wrapper to stop services
-# stop_service service-name
-function stop_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 stop
-    else
-        sudo /sbin/service $1 stop
-    fi
-}
-
-
-# Normalize config values to True or False
-# Accepts as False: 0 no No NO false False FALSE
-# Accepts as True: 1 yes Yes YES true True TRUE
-# VAR=$(trueorfalse default-value test-value)
-function trueorfalse() {
-    local default=$1
-    local testval=$2
-
-    [[ -z "$testval" ]] && { echo "$default"; return; }
-    [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
-    [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
-    echo "$default"
-}
-
-
 # Retrieve an image from a URL and upload into Glance.
 # Uses the following variables:
 #
@@ -1685,23 +388,6 @@
 }
 
 
-# Toggle enable/disable_service for services that must run exclusive of each other
-#  $1 The name of a variable containing a space-separated list of services
-#  $2 The name of a variable in which to store the enabled service's name
-#  $3 The name of the service to enable
-function use_exclusive_service {
-    local options=${!1}
-    local selection=$3
-    out=$2
-    [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
-    for opt in $options;do
-        [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
-    done
-    eval "$out=$selection"
-    return 0
-}
-
-
 # Wait for an HTTP server to start answering requests
 # wait_for_service timeout url
 function wait_for_service() {
@@ -1711,30 +397,6 @@
 }
 
 
-# Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``
-# yum_install package [package ...]
-function yum_install() {
-    [[ "$OFFLINE" = "True" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
-        no_proxy=$no_proxy \
-        yum install -y "$@"
-}
-
-
-# zypper wrapper to set arguments correctly
-# zypper_install package [package ...]
-function zypper_install() {
-    [[ "$OFFLINE" = "True" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
-}
-
-
 # ping check
 # Uses globals ``ENABLED_SERVICES``
 # ping_check from-net ip boot-timeout expected
@@ -1809,36 +471,6 @@
 }
 
 
-# Add a user to a group.
-# add_user_to_group user group
-function add_user_to_group() {
-    local user=$1
-    local group=$2
-
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    # SLE11 and openSUSE 12.2 don't have the usual usermod
-    if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
-        sudo usermod -a -G "$group" "$user"
-    else
-        sudo usermod -A "$group" "$user"
-    fi
-}
-
-
-# Get the path to the direcotry where python executables are installed.
-# get_python_exec_prefix
-function get_python_exec_prefix() {
-    if is_fedora || is_suse; then
-        echo "/usr/bin"
-    else
-        echo "/usr/local/bin"
-    fi
-}
-
-
 # Get the location of the $module-rootwrap executables, where module is cinder
 # or nova.
 # get_rootwrap_location module
@@ -1849,17 +481,6 @@
 }
 
 
-# Get the path to the pip command.
-# get_pip_command
-function get_pip_command() {
-    which pip || which pip-python
-
-    if [ $? -ne 0 ]; then
-        die $LINENO "Unable to find pip; cannot continue"
-    fi
-}
-
-
 # Path permissions sanity check
 # check_path_perm_sanity path
 function check_path_perm_sanity() {
@@ -1944,37 +565,6 @@
 }
 
 
-# ``policy_add policy_file policy_name policy_permissions``
-#
-# Add a policy to a policy.json file
-# Do nothing if the policy already exists
-
-function policy_add() {
-    local policy_file=$1
-    local policy_name=$2
-    local policy_perm=$3
-
-    if grep -q ${policy_name} ${policy_file}; then
-        echo "Policy ${policy_name} already exists in ${policy_file}"
-        return
-    fi
-
-    # Add a terminating comma to policy lines without one
-    # Remove the closing '}' and all lines following to the end-of-file
-    local tmpfile=$(mktemp)
-    uniq ${policy_file} | sed -e '
-        s/]$/],/
-        /^[}]/,$d
-    ' > ${tmpfile}
-
-    # Append policy and closing brace
-    echo "    \"${policy_name}\": ${policy_perm}" >>${tmpfile}
-    echo "}" >>${tmpfile}
-
-    mv ${tmpfile} ${policy_file}
-}
-
-
 # This function sets log formatting options for colorizing log
 # output to stdout. It is meant to be called by lib modules.
 # The last two parameters are optional and can be used to specify
@@ -1994,10 +584,10 @@
     iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
 }
 
+
 # Restore xtrace
 $XTRACE
 
-
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/functions-common b/functions-common
new file mode 100644
index 0000000..d92e39c
--- /dev/null
+++ b/functions-common
@@ -0,0 +1,1446 @@
+# functions-common - Common functions used by DevStack components
+#
+# The canonical copy of this file is maintained in the DevStack repo.
+# All modifications should be made there and then sync'ed to other repos
+# as required.
+#
+# This file is sorted alphabetically within the function groups.
+#
+# - Config Functions
+# - Control Functions
+# - Distro Functions
+# - Git Functions
+# - OpenStack Functions
+# - Package Functions
+# - Process Functions
+# - Python Functions
+# - Service Functions
+#
+# The following variables are assumed to be defined by certain functions:
+#
+# - ``ENABLED_SERVICES``
+# - ``ERROR_ON_CLONE``
+# - ``FILES``
+# - ``OFFLINE``
+# - ``PIP_DOWNLOAD_CACHE``
+# - ``PIP_USE_MIRRORS``
+# - ``RECLONE``
+# - ``TRACK_DEPENDS``
+# - ``http_proxy``, ``https_proxy``, ``no_proxy``
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Config Functions
+# ================
+
+# Append a new option in an ini file without replacing the old value
+# iniadd config-file section option value1 value2 value3 ...
+function iniadd() {
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values="$(iniget_multiline $file $section $option) $@"
+    iniset_multiline $file $section $option $values
+}
+
+# Comment an option in an INI file
+# inicomment config-file section option
+function inicomment() {
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+}
+
+# Get an option from an INI file
+# iniget config-file section option
+function iniget() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    echo ${line#*=}
+}
+
+# Get a multiple line option from an INI file
+# iniget_multiline config-file section option
+function iniget_multiline() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local values
+    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
+    echo ${values}
+}
+
+# Determinate is the given option present in the INI file
+# ini_has_option config-file section option
+function ini_has_option() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    [ -n "$line" ]
+}
+
+# Set an option in an INI file
+# iniset config-file section option value
+function iniset() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+
+    [[ -z $section || -z $option ]] && return
+
+    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    fi
+    if ! ini_has_option "$file" "$section" "$option"; then
+        # Add it
+        sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" "$file"
+    else
+        local sep=$(echo -ne "\x01")
+        # Replace it
+        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+    fi
+}
+
+# Set a multiple line option in an INI file
+# iniset_multiline config-file section option value1 value2 valu3 ...
+function iniset_multiline() {
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values
+    for v in $@; do
+        # The later sed command inserts each new value in the line next to
+        # the section identifier, which causes the values to be inserted in
+        # the reverse order. Do a reverse here to keep the original order.
+        values="$v ${values}"
+    done
+    if ! grep -q "^\[$section\]" "$file"; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    else
+        # Remove old values
+        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+    fi
+    # Add new ones
+    for v in $values; do
+        sed -i -e "/^\[$section\]/ a\\
+$option = $v
+" "$file"
+    done
+}
+
+# Uncomment an option in an INI file
+# iniuncomment config-file section option
+function iniuncomment() {
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+}
+
+# Normalize config values to True or False
+# Accepts as False: 0 no No NO false False FALSE
+# Accepts as True: 1 yes Yes YES true True TRUE
+# VAR=$(trueorfalse default-value test-value)
+function trueorfalse() {
+    local default=$1
+    local testval=$2
+
+    [[ -z "$testval" ]] && { echo "$default"; return; }
+    [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
+    [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
+    echo "$default"
+}
+
+
+# Control Functions
+# =================
+
+# Prints backtrace info
+# filename:lineno:function
+# backtrace level
+function backtrace {
+    local level=$1
+    local deep=$((${#BASH_SOURCE[@]} - 1))
+    echo "[Call Trace]"
+    while [ $level -le $deep ]; do
+        echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
+        deep=$((deep - 1))
+    done
+}
+
+# Prints line number and "message" then exits
+# die $LINENO "message"
+function die() {
+    local exitcode=$?
+    set +o xtrace
+    local line=$1; shift
+    if [ $exitcode == 0 ]; then
+        exitcode=1
+    fi
+    backtrace 2
+    err $line "$*"
+    exit $exitcode
+}
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message" and exits
+# NOTE: env-var is the variable name without a '$'
+# die_if_not_set $LINENO env-var "message"
+function die_if_not_set() {
+    local exitcode=$?
+    FXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        die $line "$*"
+    fi
+    $FXTRACE
+}
+
+# Prints line number and "message" in error format
+# err $LINENO "message"
+function err() {
+    local exitcode=$?
+    errXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
+    $errXTRACE
+    return $exitcode
+}
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message"
+# NOTE: env-var is the variable name without a '$'
+# err_if_not_set $LINENO env-var "message"
+function err_if_not_set() {
+    local exitcode=$?
+    errinsXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        err $line "$*"
+    fi
+    $errinsXTRACE
+    return $exitcode
+}
+
+# Exit after outputting a message about the distribution not being supported.
+# exit_distro_not_supported [optional-string-telling-what-is-missing]
+function exit_distro_not_supported {
+    if [[ -z "$DISTRO" ]]; then
+        GetDistro
+    fi
+
+    if [ $# -gt 0 ]; then
+        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
+    else
+        die $LINENO "Support for $DISTRO is incomplete."
+    fi
+}
+
+# Test if the named environment variable is set and not zero length
+# is_set env-var
+function is_set() {
+    local var=\$"$1"
+    eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
+}
+
+# Prints line number and "message" in warning format
+# warn $LINENO "message"
+function warn() {
+    local exitcode=$?
+    errXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
+    $errXTRACE
+    return $exitcode
+}
+
+
+# Distro Functions
+# ================
+
+# Determine OS Vendor, Release and Update
+# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
+# Returns results in global variables:
+# os_VENDOR - vendor name
+# os_RELEASE - release
+# os_UPDATE - update
+# os_PACKAGE - package type
+# os_CODENAME - vendor's codename for release
+# GetOSVersion
+GetOSVersion() {
+    # Figure out which vendor we are
+    if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
+        # OS/X
+        os_VENDOR=`sw_vers -productName`
+        os_RELEASE=`sw_vers -productVersion`
+        os_UPDATE=${os_RELEASE##*.}
+        os_RELEASE=${os_RELEASE%.*}
+        os_PACKAGE=""
+        if [[ "$os_RELEASE" =~ "10.7" ]]; then
+            os_CODENAME="lion"
+        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
+            os_CODENAME="snow leopard"
+        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
+            os_CODENAME="leopard"
+        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
+            os_CODENAME="tiger"
+        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
+            os_CODENAME="panther"
+        else
+            os_CODENAME=""
+        fi
+    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
+        os_VENDOR=$(lsb_release -i -s)
+        os_RELEASE=$(lsb_release -r -s)
+        os_UPDATE=""
+        os_PACKAGE="rpm"
+        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
+            os_PACKAGE="deb"
+        elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
+            lsb_release -d -s | grep -q openSUSE
+            if [[ $? -eq 0 ]]; then
+                os_VENDOR="openSUSE"
+            fi
+        elif [[ $os_VENDOR == "openSUSE project" ]]; then
+            os_VENDOR="openSUSE"
+        elif [[ $os_VENDOR =~ Red.*Hat ]]; then
+            os_VENDOR="Red Hat"
+        fi
+        os_CODENAME=$(lsb_release -c -s)
+    elif [[ -r /etc/redhat-release ]]; then
+        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
+        # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
+        # CentOS release 5.5 (Final)
+        # CentOS Linux release 6.0 (Final)
+        # Fedora release 16 (Verne)
+        # XenServer release 6.2.0-70446c (xenenterprise)
+        os_CODENAME=""
+        for r in "Red Hat" CentOS Fedora XenServer; do
+            os_VENDOR=$r
+            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
+                ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
+                os_CODENAME=${ver#*|}
+                os_RELEASE=${ver%|*}
+                os_UPDATE=${os_RELEASE##*.}
+                os_RELEASE=${os_RELEASE%.*}
+                break
+            fi
+            os_VENDOR=""
+        done
+        os_PACKAGE="rpm"
+    elif [[ -r /etc/SuSE-release ]]; then
+        for r in openSUSE "SUSE Linux"; do
+            if [[ "$r" = "SUSE Linux" ]]; then
+                os_VENDOR="SUSE LINUX"
+            else
+                os_VENDOR=$r
+            fi
+
+            if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
+                os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
+                os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
+                os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
+                break
+            fi
+            os_VENDOR=""
+        done
+        os_PACKAGE="rpm"
+    # If lsb_release is not installed, we should be able to detect Debian OS
+    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
+        os_VENDOR="Debian"
+        os_PACKAGE="deb"
+        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
+        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
+    fi
+    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
+}
+
+# Translate the OS version values into common nomenclature
+# Sets global ``DISTRO`` from the ``os_*`` values
+function GetDistro() {
+    GetOSVersion
+    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
+        DISTRO=$os_CODENAME
+    elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
+        # For Fedora, just use 'f' and the release
+        DISTRO="f$os_RELEASE"
+    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
+        DISTRO="opensuse-$os_RELEASE"
+    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+        # For SLE, also use the service pack
+        if [[ -z "$os_UPDATE" ]]; then
+            DISTRO="sle${os_RELEASE}"
+        else
+            DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
+        fi
+    elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
+        # Drop the . release as we assume it's compatible
+        DISTRO="rhel${os_RELEASE::1}"
+    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
+        DISTRO="xs$os_RELEASE"
+    else
+        # Catch-all for now is Vendor + Release + Update
+        DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
+    fi
+    export DISTRO
+}
+
+# Utility function for checking machine architecture
+# is_arch arch-type
+function is_arch {
+    ARCH_TYPE=$1
+
+    [[ "$(uname -m)" == "$ARCH_TYPE" ]]
+}
+
+# Determine if current distribution is a Fedora-based distribution
+# (Fedora, RHEL, CentOS, etc).
+# is_fedora
+function is_fedora {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ]
+}
+
+
+# Determine if current distribution is a SUSE-based distribution
+# (openSUSE, SLE).
+# is_suse
+function is_suse {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
+}
+
+
+# Determine if current distribution is an Ubuntu-based distribution
+# It will also detect non-Ubuntu but Debian-based distros
+# is_ubuntu
+function is_ubuntu {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    [ "$os_PACKAGE" = "deb" ]
+}
+
+
+# Git Functions
+# =============
+
+# Returns openstack release name for a given branch name
+# ``get_release_name_from_branch branch-name``
+function get_release_name_from_branch(){
+    local branch=$1
+    if [[ $branch =~ "stable/" ]]; then
+        echo ${branch#*/}
+    else
+        echo "master"
+    fi
+}
+
+# git clone only if directory doesn't exist already.  Since ``DEST`` might not
+# be owned by the installation user, we create the directory and change the
+# ownership to the proper user.
+# Set global RECLONE=yes to simulate a clone when dest-dir exists
+# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# does not exist (default is False, meaning the repo will be cloned).
+# Uses global ``OFFLINE``
+# git_clone remote dest-dir branch
+function git_clone {
+    GIT_REMOTE=$1
+    GIT_DEST=$2
+    GIT_REF=$3
+    RECLONE=$(trueorfalse False $RECLONE)
+
+    if [[ "$OFFLINE" = "True" ]]; then
+        echo "Running in offline mode, clones already exist"
+        # print out the results so we know what change was used in the logs
+        cd $GIT_DEST
+        git show --oneline | head -1
+        return
+    fi
+
+    if echo $GIT_REF | egrep -q "^refs"; then
+        # If our branch name is a gerrit style refs/changes/...
+        if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && \
+                die $LINENO "Cloning not allowed in this configuration"
+            git clone $GIT_REMOTE $GIT_DEST
+        fi
+        cd $GIT_DEST
+        git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+    else
+        # do a full clone only if the directory doesn't exist
+        if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && \
+                die $LINENO "Cloning not allowed in this configuration"
+            git clone $GIT_REMOTE $GIT_DEST
+            cd $GIT_DEST
+            # This checkout syntax works for both branches and tags
+            git checkout $GIT_REF
+        elif [[ "$RECLONE" = "True" ]]; then
+            # if it does exist then simulate what clone does if asked to RECLONE
+            cd $GIT_DEST
+            # set the url to pull from and fetch
+            git remote set-url origin $GIT_REMOTE
+            git fetch origin
+            # remove the existing ignored files (like pyc) as they cause breakage
+            # (due to the py files having older timestamps than our pyc, so python
+            # thinks the pyc files are correct using them)
+            find $GIT_DEST -name '*.pyc' -delete
+
+            # handle GIT_REF accordingly to type (tag, branch)
+            if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
+                git_update_tag $GIT_REF
+            elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
+                git_update_branch $GIT_REF
+            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
+                git_update_remote_branch $GIT_REF
+            else
+                die $LINENO "$GIT_REF is neither branch nor tag"
+            fi
+
+        fi
+    fi
+
+    # print out the results so we know what change was used in the logs
+    cd $GIT_DEST
+    git show --oneline | head -1
+}
+
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch() {
+
+    GIT_BRANCH=$1
+
+    git checkout -f origin/$GIT_BRANCH
+    # a local branch might not exist
+    git branch -D $GIT_BRANCH || true
+    git checkout -b $GIT_BRANCH
+}
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch() {
+
+    GIT_BRANCH=$1
+
+    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+}
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag() {
+
+    GIT_TAG=$1
+
+    git tag -d $GIT_TAG
+    # fetching given tag only
+    git fetch origin tag $GIT_TAG
+    git checkout -f $GIT_TAG
+}
+
+
+# OpenStack Functions
+# ===================
+
+# Get the default value for HOST_IP
+# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
+function get_default_host_ip() {
+    local fixed_range=$1
+    local floating_range=$2
+    local host_ip_iface=$3
+    local host_ip=$4
+
+    # Find the interface used for the default route
+    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
+    # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
+    if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
+        host_ip=""
+        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
+        for IP in $host_ips; do
+            # Attempt to filter out IP addresses that are part of the fixed and
+            # floating range. Note that this method only works if the ``netaddr``
+            # python library is installed. If it is not installed, an error
+            # will be printed and the first IP from the interface will be used.
+            # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
+            # address.
+            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
+                host_ip=$IP
+                break;
+            fi
+        done
+    fi
+    echo $host_ip
+}
+
+# Grab a numbered field from python prettytable output
+# Fields are numbered starting with 1
+# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
+# get_field field-number
+function get_field() {
+    while read data; do
+        if [ "$1" -lt 0 ]; then
+            field="(\$(NF$1))"
+        else
+            field="\$$(($1 + 1))"
+        fi
+        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
+    done
+}
+
+# Add a policy to a policy.json file
+# Do nothing if the policy already exists
+# ``policy_add policy_file policy_name policy_permissions``
+function policy_add() {
+    local policy_file=$1
+    local policy_name=$2
+    local policy_perm=$3
+
+    if grep -q ${policy_name} ${policy_file}; then
+        echo "Policy ${policy_name} already exists in ${policy_file}"
+        return
+    fi
+
+    # Add a terminating comma to policy lines without one
+    # Remove the closing '}' and all lines following to the end-of-file
+    local tmpfile=$(mktemp)
+    uniq ${policy_file} | sed -e '
+        s/]$/],/
+        /^[}]/,$d
+    ' > ${tmpfile}
+
+    # Append policy and closing brace
+    echo "    \"${policy_name}\": ${policy_perm}" >>${tmpfile}
+    echo "}" >>${tmpfile}
+
+    mv ${tmpfile} ${policy_file}
+}
+
+
+# Package Functions
+# =================
+
+# _get_package_dir
+function _get_package_dir() {
+    local pkg_dir
+    if is_ubuntu; then
+        pkg_dir=$FILES/apts
+    elif is_fedora; then
+        pkg_dir=$FILES/rpms
+    elif is_suse; then
+        pkg_dir=$FILES/rpms-suse
+    else
+        exit_distro_not_supported "list of packages"
+    fi
+    echo "$pkg_dir"
+}
+
+# Wrapper for ``apt-get`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# apt_get operation package [package ...]
+function apt_get() {
+    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo DEBIAN_FRONTEND=noninteractive \
+        http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
+        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+}
+
+# get_packages() collects a list of package names of any type from the
+# prerequisite files in ``files/{apts|rpms}``.  The list is intended
+# to be passed to a package installer such as apt or yum.
+#
+# Only packages required for the services in 1st argument will be
+# included.  Two bits of metadata are recognized in the prerequisite files:
+#
+# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
+# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
+#   of the package to the distros listed.  The distro names are case insensitive.
+function get_packages() {
+    local services=$@
+    local package_dir=$(_get_package_dir)
+    local file_to_parse
+    local service
+
+    if [[ -z "$package_dir" ]]; then
+        echo "No package directory supplied"
+        return 1
+    fi
+    if [[ -z "$DISTRO" ]]; then
+        GetDistro
+    fi
+    for service in ${services//,/ }; do
+        # Allow individual services to specify dependencies
+        if [[ -e ${package_dir}/${service} ]]; then
+            file_to_parse="${file_to_parse} $service"
+        fi
+        # NOTE(sdague) n-api needs glance for now because that's where
+        # glance client is
+        if [[ $service == n-api ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == c-* ]]; then
+            if [[ ! $file_to_parse =~ cinder ]]; then
+                file_to_parse="${file_to_parse} cinder"
+            fi
+        elif [[ $service == ceilometer-* ]]; then
+            if [[ ! $file_to_parse =~ ceilometer ]]; then
+                file_to_parse="${file_to_parse} ceilometer"
+            fi
+        elif [[ $service == s-* ]]; then
+            if [[ ! $file_to_parse =~ swift ]]; then
+                file_to_parse="${file_to_parse} swift"
+            fi
+        elif [[ $service == n-* ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+        elif [[ $service == g-* ]]; then
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == key* ]]; then
+            if [[ ! $file_to_parse =~ keystone ]]; then
+                file_to_parse="${file_to_parse} keystone"
+            fi
+        elif [[ $service == q-* ]]; then
+            if [[ ! $file_to_parse =~ neutron ]]; then
+                file_to_parse="${file_to_parse} neutron"
+            fi
+        fi
+    done
+
+    for file in ${file_to_parse}; do
+        local fname=${package_dir}/${file}
+        local OIFS line package distros distro
+        [[ -e $fname ]] || continue
+
+        OIFS=$IFS
+        IFS=$'\n'
+        for line in $(<${fname}); do
+            if [[ $line =~ "NOPRIME" ]]; then
+                continue
+            fi
+
+            # Assume we want this package
+            package=${line%#*}
+            inst_pkg=1
+
+            # Look for # dist:xxx in comment
+            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
+                # We are using BASH regexp matching feature.
+                package=${BASH_REMATCH[1]}
+                distros=${BASH_REMATCH[2]}
+                # In bash ${VAR,,} will lowecase VAR
+                # Look for a match in the distro list
+                if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
+                    # If no match then skip this package
+                    inst_pkg=0
+                fi
+            fi
+
+            # Look for # testonly in comment
+            if [[ $line =~ (.*)#.*testonly.* ]]; then
+                package=${BASH_REMATCH[1]}
+                # Are we installing test packages? (test for the default value)
+                if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then
+                    # If not installing test packages the skip this package
+                    inst_pkg=0
+                fi
+            fi
+
+            if [[ $inst_pkg = 1 ]]; then
+                echo $package
+            fi
+        done
+        IFS=$OIFS
+    done
+}
+
+# Distro-agnostic package installer
+# install_package package [package ...]
+function install_package() {
+    if is_ubuntu; then
+        # if there are transient errors pulling the updates, that's fine. It may
+        # be secondary repositories that we don't really care about.
+        [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true
+        NO_UPDATE_REPOS=True
+
+        apt_get install "$@"
+    elif is_fedora; then
+        yum_install "$@"
+    elif is_suse; then
+        zypper_install "$@"
+    else
+        exit_distro_not_supported "installing packages"
+    fi
+}
+
+# Distro-agnostic function to tell if a package is installed
+# is_package_installed package [package ...]
+function is_package_installed() {
+    if [[ -z "$@" ]]; then
+        return 1
+    fi
+
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        dpkg -s "$@" > /dev/null 2> /dev/null
+    elif [[ "$os_PACKAGE" = "rpm" ]]; then
+        rpm --quiet -q "$@"
+    else
+        exit_distro_not_supported "finding if a package is installed"
+    fi
+}
+
+# Distro-agnostic package uninstaller
+# uninstall_package package [package ...]
+function uninstall_package() {
+    if is_ubuntu; then
+        apt_get purge "$@"
+    elif is_fedora; then
+        sudo yum remove -y "$@"
+    elif is_suse; then
+        sudo zypper rm "$@"
+    else
+        exit_distro_not_supported "uninstalling packages"
+    fi
+}
+
+# Wrapper for ``yum`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# yum_install package [package ...]
+function yum_install() {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
+        yum install -y "$@"
+}
+
+# zypper wrapper to set arguments correctly
+# zypper_install package [package ...]
+function zypper_install() {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        zypper --non-interactive install --auto-agree-with-licenses "$@"
+}
+
+
+# Process Functions
+# =================
+
+# _run_process() is designed to be backgrounded by run_process() to simulate a
+# fork.  It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it().  The log filename is derived
+# from the service name and global-and-now-misnamed SCREEN_LOGDIR
+# _run_process service "command-line"
+function _run_process() {
+    local service=$1
+    local command="$2"
+
+    # Undo logging redirections and close the extra descriptors
+    exec 1>&3
+    exec 2>&3
+    exec 3>&-
+    exec 6>&-
+
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+        export PYTHONUNBUFFERED=1
+    fi
+
+    exec /bin/bash -c "$command"
+    die "$service exec failure: $command"
+}
+
+# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
+# This is used for ``service_check`` when all the ``screen_it`` are called finished
+# init_service_check
+function init_service_check() {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+        mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
+    fi
+
+    rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
+}
+
+# Find out if a process exists by partial name.
+# is_running name
+function is_running() {
+    local name=$1
+    ps auxw | grep -v grep | grep ${name} > /dev/null
+    RC=$?
+    # some times I really hate bash reverse binary logic
+    return $RC
+}
+
+# run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command.  This is meant to duplicate the semantics
+# of screen_it() without screen.  PIDs are written to
+# $SERVICE_DIR/$SCREEN_NAME/$service.pid
+# run_process service "command-line"
+function run_process() {
+    local service=$1
+    local command="$2"
+
+    # Spawn the child process
+    _run_process "$service" "$command" &
+    echo $!
+}
+
+# Helper to launch a service in a named screen
+# screen_it service "command-line"
+function screen_it {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $1; then
+        # Append the service to the screen rc file
+        screen_rc "$1" "$2"
+
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            screen -S $SCREEN_NAME -X screen -t $1
+
+            if [[ -n ${SCREEN_LOGDIR} ]]; then
+                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+                screen -S $SCREEN_NAME -p $1 -X log on
+                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+            fi
+
+            # sleep to allow bash to be ready to be send the command - we are
+            # creating a new window in screen and then sends characters, so if
+            # bash isn't running by the time we send the command, nothing happens
+            sleep 1.5
+
+            NL=`echo -ne '\015'`
+            # This fun command does the following:
+            # - the passed server command is backgrounded
+            # - the pid of the background process is saved in the usual place
+            # - the server process is brought back to the foreground
+            # - if the server process exits prematurely the fg command errors
+            #   and a message is written to stdout and the service failure file
+            # The pid saved can be used in screen_stop() as a process group
+            # id to kill off all child processes
+            screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
+        else
+            # Spawn directly without screen
+            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+        fi
+    fi
+}
+
+# Screen rc file builder
+# screen_rc service "command-line"
+function screen_rc {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
+    if [[ ! -e $SCREENRC ]]; then
+        # Name the screen session
+        echo "sessionname $SCREEN_NAME" > $SCREENRC
+        # Set a reasonable statusbar
+        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
+        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
+        echo "screen -t shell bash" >> $SCREENRC
+    fi
+    # If this service doesn't already exist in the screenrc file
+    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+        NL=`echo -ne '\015'`
+        echo "screen -t $1 bash" >> $SCREENRC
+        echo "stuff \"$2$NL\"" >> $SCREENRC
+
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC
+            echo "log on" >>$SCREENRC
+        fi
+    fi
+}
+
+# Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# screen_stop service
+function screen_stop() {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $1; then
+        # Kill via pid if we have one available
+        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
+            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
+            rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
+        fi
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            # Clean up the screen window
+            screen -S $SCREEN_NAME -p $1 -X kill
+        fi
+    fi
+}
+
+# Helper to get the status of each running service
+# service_check
+function service_check() {
+    local service
+    local failures
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+
+    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+        echo "No service status directory found"
+        return
+    fi
+
+    # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
+    failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null`
+
+    for service in $failures; do
+        service=`basename $service`
+        service=${service%.failure}
+        echo "Error: Service $service is not running"
+    done
+
+    if [ -n "$failures" ]; then
+        echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
+    fi
+}
+
+
+# Python Functions
+# ================
+
+# Get the path to the pip command.
+# get_pip_command
+function get_pip_command() {
+    which pip || which pip-python
+
+    if [ $? -ne 0 ]; then
+        die $LINENO "Unable to find pip; cannot continue"
+    fi
+}
+
+# Get the path to the direcotry where python executables are installed.
+# get_python_exec_prefix
+function get_python_exec_prefix() {
+    if is_fedora || is_suse; then
+        echo "/usr/bin"
+    else
+        echo "/usr/local/bin"
+    fi
+}
+
+# Wrapper for ``pip install`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
+# ``TRACK_DEPENDS``, ``*_proxy``
+# pip_install package [package ...]
+function pip_install {
+    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    if [[ $TRACK_DEPENDS = True ]]; then
+        source $DEST/.venv/bin/activate
+        CMD_PIP=$DEST/.venv/bin/pip
+        SUDO_PIP="env"
+    else
+        SUDO_PIP="sudo"
+        CMD_PIP=$(get_pip_command)
+    fi
+
+    # Mirror option not needed anymore because pypi has CDN available,
+    # but it's useful in certain circumstances
+    PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
+    if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
+        PIP_MIRROR_OPT="--use-mirrors"
+    fi
+
+    # pip < 1.4 has a bug where it will use an already existing build
+    # directory unconditionally.  Say an earlier component installs
+    # foo v1.1; pip will have built foo's source in
+    # /tmp/$USER-pip-build.  Even if a later component specifies foo <
+    # 1.1, the existing extracted build will be used and cause
+    # confusing errors.  By creating unique build directories we avoid
+    # this problem. See https://github.com/pypa/pip/issues/709
+    local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
+
+    $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+        HTTP_PROXY=$http_proxy \
+        HTTPS_PROXY=$https_proxy \
+        NO_PROXY=$no_proxy \
+        $CMD_PIP install --build=${pip_build_tmp} \
+        $PIP_MIRROR_OPT $@ \
+        && $SUDO_PIP rm -rf ${pip_build_tmp}
+}
+
+
+# Service Functions
+# =================
+
+# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
+# _cleanup_service_list service-list
+function _cleanup_service_list () {
+    echo "$1" | sed -e '
+        s/,,/,/g;
+        s/^,//;
+        s/,$//
+    '
+}
+
+# disable_all_services() removes all current services
+# from ``ENABLED_SERVICES`` to reset the configuration
+# before a minimal installation
+# Uses global ``ENABLED_SERVICES``
+# disable_all_services
+function disable_all_services() {
+    ENABLED_SERVICES=""
+}
+
+# Remove all services starting with '-'.  For example, to install all default
+# services except rabbit (rabbit) set in ``localrc``:
+# ENABLED_SERVICES+=",-rabbit"
+# Uses global ``ENABLED_SERVICES``
+# disable_negated_services
+function disable_negated_services() {
+    local tmpsvcs="${ENABLED_SERVICES}"
+    local service
+    for service in ${tmpsvcs//,/ }; do
+        if [[ ${service} == -* ]]; then
+            tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+}
+
+# disable_service() removes the services passed as argument to the
+# ``ENABLED_SERVICES`` list, if they are present.
+#
+# For example:
+#   disable_service rabbit
+#
+# This function does not know about the special cases
+# for nova, glance, and neutron built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# disable_service service [service ...]
+function disable_service() {
+    local tmpsvcs=",${ENABLED_SERVICES},"
+    local service
+    for service in $@; do
+        if is_service_enabled $service; then
+            tmpsvcs=${tmpsvcs//,$service,/,}
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+}
+
+# enable_service() adds the services passed as argument to the
+# ``ENABLED_SERVICES`` list, if they are not already present.
+#
+# For example:
+#   enable_service qpid
+#
+# This function does not know about the special cases
+# for nova, glance, and neutron built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# enable_service service [service ...]
+function enable_service() {
+    local tmpsvcs="${ENABLED_SERVICES}"
+    for service in $@; do
+        if ! is_service_enabled $service; then
+            tmpsvcs+=",$service"
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+    disable_negated_services
+}
+
+# is_service_enabled() checks if the service(s) specified as arguments are
+# enabled by the user in ``ENABLED_SERVICES``.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# There are special cases for some 'catch-all' services::
+#   **nova** returns true if any service enabled start with **n-**
+#   **cinder** returns true if any service enabled start with **c-**
+#   **ceilometer** returns true if any service enabled start with **ceilometer**
+#   **glance** returns true if any service enabled start with **g-**
+#   **neutron** returns true if any service enabled start with **q-**
+#   **swift** returns true if any service enabled start with **s-**
+#   **trove** returns true if any service enabled start with **tr-**
+#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
+#   **s-** services will be enabled. This will be deprecated in the future.
+#
+# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
+# We also need to make sure to treat **n-cell-region** and **n-cell-child**
+# as enabled in this case.
+#
+# Uses global ``ENABLED_SERVICES``
+# is_service_enabled service [service ...]
+function is_service_enabled() {
+    services=$@
+    for service in ${services}; do
+        [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+
+        # Look for top-level 'enabled' function for this service
+        if type is_${service}_enabled >/dev/null 2>&1; then
+            # A function exists for this service, use it
+            is_${service}_enabled
+            return $?
+        fi
+
+        # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
+        #                are implemented
+
+        [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
+        [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
+        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
+        [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
+        [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
+        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
+        [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+        [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
+        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
+        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
+    done
+    return 1
+}
+
+# Toggle enable/disable_service for services that must run exclusive of each other
+#  $1 The name of a variable containing a space-separated list of services
+#  $2 The name of a variable in which to store the enabled service's name
+#  $3 The name of the service to enable
+function use_exclusive_service {
+    local options=${!1}
+    local selection=$3
+    out=$2
+    [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
+    for opt in $options;do
+        [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
+    done
+    eval "$out=$selection"
+    return 0
+}
+
+
+# System Function
+# ===============
+
+# Only run the command if the target file (the last arg) is not on an
+# NFS filesystem.
+function _safe_permission_operation() {
+    local args=( $@ )
+    local last
+    local sudo_cmd
+    local dir_to_check
+
+    let last="${#args[*]} - 1"
+
+    dir_to_check=${args[$last]}
+    if [ ! -d "$dir_to_check" ]; then
+        dir_to_check=`dirname "$dir_to_check"`
+    fi
+
+    if is_nfs_directory "$dir_to_check" ; then
+        return 0
+    fi
+
+    if [[ $TRACK_DEPENDS = True ]]; then
+        sudo_cmd="env"
+    else
+        sudo_cmd="sudo"
+    fi
+
+    $sudo_cmd $@
+}
+
+# Exit 0 if address is in network or 1 if address is not in network
+# ip-range is in CIDR notation: 1.2.3.4/20
+# address_in_net ip-address ip-range
+function address_in_net() {
+    local ip=$1
+    local range=$2
+    local masklen=${range#*/}
+    local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
+    local subnet=$(maskip $ip $(cidr2netmask $masklen))
+    [[ $network == $subnet ]]
+}
+
+# Add a user to a group.
+# add_user_to_group user group
+function add_user_to_group() {
+    local user=$1
+    local group=$2
+
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    # SLE11 and openSUSE 12.2 don't have the usual usermod
+    if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
+        sudo usermod -a -G "$group" "$user"
+    else
+        sudo usermod -A "$group" "$user"
+    fi
+}
+
+# Convert CIDR notation to a IPv4 netmask
+# cidr2netmask cidr-bits
+function cidr2netmask() {
+    local maskpat="255 255 255 255"
+    local maskdgt="254 252 248 240 224 192 128"
+    set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
+    echo ${1-0}.${2-0}.${3-0}.${4-0}
+}
+
+# Gracefully cp only if source file/dir exists
+# cp_it source destination
+function cp_it {
+    if [ -e $1 ] || [ -d $1 ]; then
+        cp -pRL $1 $2
+    fi
+}
+
+# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
+# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
+# ``localrc`` or on the command line if necessary::
+#
+# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
+#
+#     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
+
+function export_proxy_variables() {
+    if [[ -n "$http_proxy" ]]; then
+        export http_proxy=$http_proxy
+    fi
+    if [[ -n "$https_proxy" ]]; then
+        export https_proxy=$https_proxy
+    fi
+    if [[ -n "$no_proxy" ]]; then
+        export no_proxy=$no_proxy
+    fi
+}
+
+# Returns true if the directory is on a filesystem mounted via NFS.
+function is_nfs_directory() {
+    local mount_type=`stat -f -L -c %T $1`
+    test "$mount_type" == "nfs"
+}
+
+# Return the network portion of the given IP address using netmask
+# netmask is in the traditional dotted-quad format
+# maskip ip-address netmask
+function maskip() {
+    local ip=$1
+    local mask=$2
+    local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
+    local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
+    echo $subnet
+}
+
+# Service wrapper to restart services
+# restart_service service-name
+function restart_service() {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 restart
+    else
+        sudo /sbin/service $1 restart
+    fi
+}
+
+# Only change permissions of a file or directory if it is not on an
+# NFS filesystem.
+function safe_chmod() {
+    _safe_permission_operation chmod $@
+}
+
+# Only change ownership of a file or directory if it is not on an NFS
+# filesystem.
+function safe_chown() {
+    _safe_permission_operation chown $@
+}
+
+# Service wrapper to start services
+# start_service service-name
+function start_service() {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 start
+    else
+        sudo /sbin/service $1 start
+    fi
+}
+
+# Service wrapper to stop services
+# stop_service service-name
+function stop_service() {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 stop
+    else
+        sudo /sbin/service $1 stop
+    fi
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/ceilometer b/lib/ceilometer
index 4ca77bb..6c87d03 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -71,33 +71,33 @@
 
 create_ceilometer_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ceilometer
     if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
-        CEILOMETER_USER=$(keystone user-create \
-            --name=ceilometer \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant_id $SERVICE_TENANT \
-            --email=ceilometer@example.com \
+        CEILOMETER_USER=$(openstack user create \
+            ceilometer \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email ceilometer@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $CEILOMETER_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $CEILOMETER_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CEILOMETER_SERVICE=$(keystone service-create \
-                --name=ceilometer \
+            CEILOMETER_SERVICE=$(openstack service create \
+                ceilometer \
                 --type=metering \
                 --description="OpenStack Telemetry Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CEILOMETER_SERVICE \
                 --region RegionOne \
-                --service_id $CEILOMETER_SERVICE \
-                --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
-                --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
-                --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
+                --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
+                --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
+                --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
         fi
     fi
 }
diff --git a/lib/cinder b/lib/cinder
index e99f893..c8c90c0 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -184,43 +184,28 @@
 function configure_cinder_rootwrap() {
     # Set the paths of certain binaries
     CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
-    if [[ ! -x $CINDER_ROOTWRAP ]]; then
-        CINDER_ROOTWRAP=$(get_rootwrap_location oslo)
-        if [[ ! -x $CINDER_ROOTWRAP ]]; then
-            die $LINENO "No suitable rootwrap found."
-        fi
-    fi
 
-    # If Cinder ships the new rootwrap filters files, deploy them
-    # (owned by root) and add a parameter to $CINDER_ROOTWRAP
-    ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP"
-    if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then
-        # Wipe any existing rootwrap.d files first
-        if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then
-            sudo rm -rf $CINDER_CONF_DIR/rootwrap.d
-        fi
-        # Deploy filters to /etc/cinder/rootwrap.d
-        sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d
-        sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d
-        sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d
-        sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/*
-        # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d
-        if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then
-            sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
-        else
-            # rootwrap.conf is no longer shipped in Cinder itself
-            echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null
-        fi
-        sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf
-        sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
-        sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
-        # Specify rootwrap.conf as first parameter to rootwrap
-        CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf"
-        ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *"
+    # Deploy new rootwrap filters files (owned by root).
+    # Wipe any existing rootwrap.d files first
+    if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then
+        sudo rm -rf $CINDER_CONF_DIR/rootwrap.d
     fi
+    # Deploy filters to /etc/cinder/rootwrap.d
+    sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d
+    sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d
+    sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d
+    sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/*
+    # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d
+    sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
+    sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf
+    sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
+    sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
+    # Specify rootwrap.conf as first parameter to rootwrap
+    ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *"
 
+    # Set up the rootwrap sudoers for cinder
     TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
@@ -345,45 +330,44 @@
 # Migrated from keystone_data.sh
 create_cinder_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Cinder
     if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-        CINDER_USER=$(keystone user-create \
-            --name=cinder \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=cinder@example.com \
+        CINDER_USER=$(openstack user create \
+            cinder \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email cinder@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $CINDER_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $CINDER_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CINDER_SERVICE=$(keystone service-create \
-                --name=cinder \
+            CINDER_SERVICE=$(openstack service create \
+                cinder \
                 --type=volume \
                 --description="Cinder Volume Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CINDER_SERVICE \
                 --region RegionOne \
-                --service_id $CINDER_SERVICE \
                 --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
-            CINDER_V2_SERVICE=$(keystone service-create \
-                --name=cinderv2 \
+            CINDER_V2_SERVICE=$(openstack service create \
+                cinderv2 \
                 --type=volumev2 \
                 --description="Cinder Volume Service V2" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CINDER_V2_SERVICE \
                 --region RegionOne \
-                --service_id $CINDER_V2_SERVICE \
                 --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
-
         fi
     fi
 }
diff --git a/lib/heat b/lib/heat
index 9f5dd8b..efb01ef 100644
--- a/lib/heat
+++ b/lib/heat
@@ -110,6 +110,15 @@
     iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
     iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 
+    # stack user domain
+    # Note we have to pass token/endpoint here because the current endpoint and
+    # version negotiation in OSC means just --os-identity-api-version=3 won't work
+    KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
+    D_ID=$(openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        --os-identity-api-version=3 domain show heat \
+        | grep ' id ' | get_field 2)
+    iniset $HEAT_CONF stack_user_domain ${D_ID}
+
     # paste_deploy
     [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone
 
@@ -196,6 +205,17 @@
     upload_image "http://localhost/$output.qcow2" $TOKEN
 }
 
+# create_heat_accounts() - Set up common required heat accounts
+# Note this is in addition to what is in files/keystone_data.sh
+function create_heat_accounts() {
+    # Note we have to pass token/endpoint here because the current endpoint and
+    # version negotiation in OSC means just --os-identity-api-version=3 won't work
+    KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
+    openstack --os-token $OS_SERVICE_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        --os-identity-api-version=3 domain create heat \
+        --description "Owns users and projects created by heat"
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/ironic b/lib/ironic
index 3c0e3cb..607b131 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -145,30 +145,30 @@
 # service              ironic     admin        # if enabled
 create_ironic_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ironic
     if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
-        IRONIC_USER=$(keystone user-create \
-            --name=ironic \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=ironic@example.com \
+        IRONIC_USER=$(openstack user create \
+            ironic \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email ironic@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user_id $IRONIC_USER \
-            --role_id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $IRONIC_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            IRONIC_SERVICE=$(keystone service-create \
-                --name=ironic \
+            IRONIC_SERVICE=$(openstack service create \
+                ironic \
                 --type=baremetal \
                 --description="Ironic baremetal provisioning service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $IRONIC_SERVICE \
                 --region RegionOne \
-                --service_id $IRONIC_SERVICE \
                 --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT"
diff --git a/lib/keystone b/lib/keystone
index 4f7f68b..cebb4d3 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -201,7 +201,7 @@
         iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider
     fi
 
-    iniset $KEYSTONE_CONF sql connection `database_connection_url keystone`
+    iniset $KEYSTONE_CONF database connection `database_connection_url keystone`
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
 
     if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
@@ -275,60 +275,69 @@
 create_keystone_accounts() {
 
     # admin
-    ADMIN_TENANT=$(keystone tenant-create \
-        --name admin \
+    ADMIN_TENANT=$(openstack project create \
+        admin \
         | grep " id " | get_field 2)
-    ADMIN_USER=$(keystone user-create \
-        --name admin \
-        --pass "$ADMIN_PASSWORD" \
+    ADMIN_USER=$(openstack user create \
+        admin \
+        --project "$ADMIN_TENANT" \
         --email admin@example.com \
+        --password "$ADMIN_PASSWORD" \
         | grep " id " | get_field 2)
-    ADMIN_ROLE=$(keystone role-create \
-        --name admin \
+    ADMIN_ROLE=$(openstack role create \
+        admin \
         | grep " id " | get_field 2)
-    keystone user-role-add \
-        --user-id $ADMIN_USER \
-        --role-id $ADMIN_ROLE \
-        --tenant-id $ADMIN_TENANT
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $ADMIN_TENANT \
+        --user $ADMIN_USER
 
     # service
-    SERVICE_TENANT=$(keystone tenant-create \
-        --name $SERVICE_TENANT_NAME \
+    SERVICE_TENANT=$(openstack project create \
+        $SERVICE_TENANT_NAME \
         | grep " id " | get_field 2)
 
     # The Member role is used by Horizon and Swift so we need to keep it:
-    MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2)
+    MEMBER_ROLE=$(openstack role create \
+        Member \
+        | grep " id " | get_field 2)
     # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-    ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2)
+    ANOTHER_ROLE=$(openstack role create \
+        anotherrole \
+        | grep " id " | get_field 2)
 
     # invisible tenant - admin can't see this one
-    INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2)
+    INVIS_TENANT=$(openstack project create \
+        invisible_to_admin \
+        | grep " id " | get_field 2)
 
     # demo
-    DEMO_TENANT=$(keystone tenant-create \
-        --name=demo \
+    DEMO_TENANT=$(openstack project create \
+        demo \
         | grep " id " | get_field 2)
-    DEMO_USER=$(keystone user-create \
-        --name demo \
-        --pass "$ADMIN_PASSWORD" \
+    DEMO_USER=$(openstack user create \
+        demo \
+        --project $DEMO_TENANT \
         --email demo@example.com \
+        --password "$ADMIN_PASSWORD" \
         | grep " id " | get_field 2)
-    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT
+
+    openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE
+    openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE
+    openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE
+    openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE
 
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        KEYSTONE_SERVICE=$(keystone service-create \
-            --name keystone \
+        KEYSTONE_SERVICE=$(openstack service create \
+            keystone \
             --type identity \
             --description "Keystone Identity Service" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $KEYSTONE_SERVICE \
             --region RegionOne \
-            --service_id $KEYSTONE_SERVICE \
             --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
             --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \
             --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
diff --git a/lib/marconi b/lib/marconi
index d1ab5f3..b6ce57a 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -68,7 +68,9 @@
 # cleanup_marconi() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_marconi() {
-    mongo marconi --eval "db.dropDatabase();"
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then
+        die $LINENO "Mongo DB did not start"
+    fi
 }
 
 # configure_marconiclient() - Set config files, create data dirs, etc
@@ -89,10 +91,6 @@
     iniset $MARCONI_CONF DEFAULT verbose True
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0'
 
-    # Install the policy file for the API server
-    cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR
-    iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json
-
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
     iniset $MARCONI_CONF keystone_authtoken admin_user marconi
     iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -109,9 +107,16 @@
 function configure_mongodb() {
     # Set nssize to 2GB. This increases the number of namespaces supported
     # # per database.
-    sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
-
-    restart_service mongod
+    if is_ubuntu; then
+        sudo sed -i -e "
+            s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1|
+            s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047|
+        " /etc/mongodb.conf
+        restart_service mongodb
+    elif is_fedora; then
+        sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+        restart_service mongod
+    fi
 }
 
 # init_marconi() - Initialize etc.
@@ -148,25 +153,29 @@
 }
 
 function create_marconi_accounts() {
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    MARCONI_USER=$(get_id keystone user-create --name=marconi \
-                                                --pass="$SERVICE_PASSWORD" \
-                                                --tenant-id $SERVICE_TENANT \
-                                                --email=marconi@example.com)
-    keystone user-role-add --tenant-id $SERVICE_TENANT \
-                            --user-id $MARCONI_USER \
-                            --role-id $ADMIN_ROLE
+    MARCONI_USER=$(openstack user create \
+        marconi \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email marconi@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $MARCONI_USER
+
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        MARCONI_SERVICE=$(keystone service-create \
-            --name=marconi \
+        MARCONI_SERVICE=$(openstack service create \
+            marconi \
             --type=queuing \
             --description="Marconi Service" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $MARCONI_SERVICE \
             --region RegionOne \
-            --service_id $MARCONI_SERVICE \
             --publicurl "http://$SERVICE_HOST:8888" \
             --adminurl "http://$SERVICE_HOST:8888" \
             --internalurl "http://$SERVICE_HOST:8888"
diff --git a/lib/neutron b/lib/neutron
index 5bd38bc..df276c7 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -332,29 +332,29 @@
 # Migrated from keystone_data.sh
 function create_neutron_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-        NEUTRON_USER=$(keystone user-create \
-            --name=neutron \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=neutron@example.com \
+        NEUTRON_USER=$(openstack user create \
+            neutron \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email neutron@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $NEUTRON_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $NEUTRON_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            NEUTRON_SERVICE=$(keystone service-create \
-                --name=neutron \
+            NEUTRON_SERVICE=$(openstack service create \
+                neutron \
                 --type=network \
                 --description="Neutron Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NEUTRON_SERVICE \
                 --region RegionOne \
-                --service_id $NEUTRON_SERVICE \
                 --publicurl "http://$SERVICE_HOST:9696/" \
                 --adminurl "http://$SERVICE_HOST:9696/" \
                 --internalurl "http://$SERVICE_HOST:9696/"
@@ -363,7 +363,7 @@
 }
 
 function create_neutron_initial_network() {
-    TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+    TENANT_ID=$(openstack project list | grep " demo " | get_field 1)
     die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo"
 
     # Create a small network
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index ab4e347..4ceabe7 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -93,9 +93,9 @@
     # instead use its own config variable to indicate whether security
     # groups is enabled, and that will need to be set here instead.
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver
     else
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
 
     # Since we enable the tunnel TypeDrivers, also enable a local_ip
diff --git a/lib/nova b/lib/nova
index eaaaa62..fefeda1 100644
--- a/lib/nova
+++ b/lib/nova
@@ -324,41 +324,41 @@
 # Migrated from keystone_data.sh
 create_nova_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Nova
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-        NOVA_USER=$(keystone user-create \
-            --name=nova \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=nova@example.com \
+        NOVA_USER=$(openstack user create \
+            nova \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email nova@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $NOVA_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $NOVA_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            NOVA_SERVICE=$(keystone service-create \
-                --name=nova \
+            NOVA_SERVICE=$(openstack service create \
+                nova \
                 --type=compute \
                 --description="Nova Compute Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NOVA_SERVICE \
                 --region RegionOne \
-                --service_id $NOVA_SERVICE \
                 --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
-            NOVA_V3_SERVICE=$(keystone service-create \
-                --name=novav3 \
+            NOVA_V3_SERVICE=$(openstack service create \
+                novav3 \
                 --type=computev3 \
                 --description="Nova Compute Service V3" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NOVA_V3_SERVICE \
                 --region RegionOne \
-                --service_id $NOVA_V3_SERVICE \
                 --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
                 --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
@@ -513,12 +513,6 @@
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
     iniset_rpc_backend nova $NOVA_CONF DEFAULT
     iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
-
-    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # File injection is being disabled by default in the near future -
-        # disable it here for now to avoid surprises later.
-        iniset $NOVA_CONF libvirt inject_partition '-2'
-    fi
 }
 
 function init_nova_cells() {
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index cdd9317..b5df19d 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -32,7 +32,7 @@
 DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
 
 DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest}
-DOCKER_IMAGE_NAME=cirros
+DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME
 DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest}
 DOCKER_REGISTRY_IMAGE_NAME=registry
 DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 42d3af1..415244f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -23,6 +23,9 @@
 # Defaults
 # --------
 
+# File injection is disabled by default in Nova.  This will turn it back on.
+ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+
 
 # Entry Points
 # ------------
@@ -116,6 +119,19 @@
     if is_arch "ppc64"; then
         iniset $NOVA_CONF DEFAULT vnc_enabled "false"
     fi
+
+    ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION)
+    if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then
+        # When libguestfs is available for file injection, enable using
+        # libguestfs to inspect the image and figure out the proper
+        # partition to inject into.
+        iniset $NOVA_CONF libvirt inject_partition '-1'
+        iniset $NOVA_CONF libvirt inject_key 'true'
+    else
+        # File injection is being disabled by default in the near future -
+        # disable it here for now to avoid surprises later.
+        iniset $NOVA_CONF libvirt inject_partition '-2'
+    fi
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index f47994f..9843261 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -56,6 +56,34 @@
     # Need to avoid crash due to new firewall support
     XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
     iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
+
+    local dom0_ip
+    dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
+
+    local ssh_dom0
+    ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
+
+    # install nova plugins to dom0
+    tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ |
+        $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*'
+
+    # install console logrotate script
+    tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
+        $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest'
+
+    # Create a cron job that will rotate guest logs
+    $ssh_dom0 crontab - << CRONTAB
+* * * * * /root/rotate_xen_guest_logs.sh
+CRONTAB
+
+    # Create directories for kernels and images
+    {
+        echo "set -eux"
+        cat $TOP_DIR/tools/xen/functions
+        echo "create_directory_for_images"
+        echo "create_directory_for_kernels"
+    } | $ssh_dom0
+
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/oslo b/lib/oslo
index f644ed7..b089842 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -20,9 +20,13 @@
 
 # Defaults
 # --------
+CLIFF_DIR=$DEST/cliff
 OSLOCFG_DIR=$DEST/oslo.config
 OSLOMSG_DIR=$DEST/oslo.messaging
 OSLORWRAP_DIR=$DEST/oslo.rootwrap
+PYCADF_DIR=$DEST/pycadf
+STEVEDORE_DIR=$DEST/stevedore
+TASKFLOW_DIR=$DEST/taskflow
 
 # Entry Points
 # ------------
@@ -33,6 +37,9 @@
     # for a smoother transition of existing users.
     cleanup_oslo
 
+    git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
+    setup_develop $CLIFF_DIR
+
     git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
     setup_develop $OSLOCFG_DIR
 
@@ -41,6 +48,15 @@
 
     git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
     setup_develop $OSLORWRAP_DIR
+
+    git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
+    setup_develop $PYCADF_DIR
+
+    git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH
+    setup_develop $STEVEDORE_DIR
+
+    git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH
+    setup_develop $TASKFLOW_DIR
 }
 
 # cleanup_oslo() - purge possibly old versions of oslo
diff --git a/lib/savanna b/lib/savanna
index 6f42311..43c5e38 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -54,29 +54,29 @@
 # service     savanna    admin
 function create_savanna_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SAVANNA_USER=$(keystone user-create \
-        --name=savanna \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant-id $SERVICE_TENANT \
-        --email=savanna@example.com \
+    SAVANNA_USER=$(openstack user create \
+        savanna \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email savanna@example.com \
         | grep " id " | get_field 2)
-    keystone user-role-add \
-        --tenant-id $SERVICE_TENANT \
-        --user-id $SAVANNA_USER \
-        --role-id $ADMIN_ROLE
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SAVANNA_USER
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SAVANNA_SERVICE=$(keystone service-create \
-            --name=savanna \
+        SAVANNA_SERVICE=$(openstack service create \
+            savanna \
             --type=data_processing \
             --description="Savanna Data Processing" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $SAVANNA_SERVICE \
             --region RegionOne \
-            --service_id $SAVANNA_SERVICE \
             --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
index 7713a78..691b23f 100644
--- a/lib/savanna-dashboard
+++ b/lib/savanna-dashboard
@@ -37,8 +37,9 @@
 
 function configure_savanna_dashboard() {
 
-    echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
 
     if is_service_enabled neutron; then
         echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
diff --git a/lib/swift b/lib/swift
index be25c81..df586ab 100644
--- a/lib/swift
+++ b/lib/swift
@@ -527,39 +527,53 @@
 
     KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \
-        --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2)
-    keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
+    SWIFT_USER=$(openstack user create \
+        swift \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email=swift@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SWIFT_USER
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \
-            --description="Swift Service" | grep " id " | get_field 2)
-        keystone endpoint-create \
+        SWIFT_SERVICE=$(openstack service create \
+            swift \
+            --type="object-store" \
+            --description="Swift Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+            $SWIFT_SERVICE \
             --region RegionOne \
-            --service_id $SWIFT_SERVICE \
             --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
             --adminurl "http://$SERVICE_HOST:8080" \
             --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
+    SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
-    SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=$SWIFTUSERTEST1_PASSWORD --email=test@example.com | grep " id " | get_field 2)
+    SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \
+        --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
-    keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
+    openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE
 
-    SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=$SWIFTUSERTEST3_PASSWORD --email=test3@example.com | grep " id " | get_field 2)
+    SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \
+        --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
-    keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
+    openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE
 
-    SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
+    SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
-    SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=$SWIFTUSERTEST2_PASSWORD --email=test2@example.com | grep " id " | get_field 2)
+
+    SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \
+        --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
-    keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
+    openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE
 }
 
 # init_swift() - Initialize rings
diff --git a/lib/tempest b/lib/tempest
index 76da170..c8eebfc 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -348,9 +348,6 @@
         fi
     done
 
-    echo "Created tempest configuration file:"
-    cat $TEMPEST_CONFIG
-
     # Restore IFS
     IFS=$ifs
     #Restore errexit
diff --git a/lib/trove b/lib/trove
index bb45491..6834149 100644
--- a/lib/trove
+++ b/lib/trove
@@ -71,28 +71,29 @@
 
 create_trove_accounts() {
     # Trove
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
-        TROVE_USER=$(keystone user-create \
-            --name=trove \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=trove@example.com \
+        TROVE_USER=$(openstack user create \
+            trove \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email trove@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add --tenant-id $SERVICE_TENANT \
-            --user-id $TROVE_USER \
-            --role-id $SERVICE_ROLE
+        openstack role add \
+            $SERVICE_ROLE \
+            --project $SERVICE_TENANT \
+            --user $TROVE_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            TROVE_SERVICE=$(keystone service-create \
-                --name=trove \
+            TROVE_SERVICE=$(openstack service create \
+                trove \
                 --type=database \
                 --description="Trove Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $TROVE_SERVICE \
                 --region RegionOne \
-                --service_id $TROVE_SERVICE \
                 --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
diff --git a/openrc b/openrc
index 784b00e..fc066ad 100644
--- a/openrc
+++ b/openrc
@@ -67,7 +67,7 @@
 # Identity API version
 export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
 
-# Authenticating against an Openstack cloud using Keystone returns a **Token**
+# Authenticating against an OpenStack cloud using Keystone returns a **Token**
 # and **Service Catalog**.  The catalog contains the endpoints for all services
 # the user/tenant has access to - including nova, glance, keystone, swift, ...
 # We currently recommend using the 2.0 *identity api*.
diff --git a/stack.sh b/stack.sh
index e45707b..4a55225 100755
--- a/stack.sh
+++ b/stack.sh
@@ -925,6 +925,9 @@
     # Do the keystone-specific bits from keystone_data.sh
     export OS_SERVICE_TOKEN=$SERVICE_TOKEN
     export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
+    # Add temporarily to make openstackclient work
+    export OS_TOKEN=$SERVICE_TOKEN
+    export OS_URL=$SERVICE_ENDPOINT
     create_keystone_accounts
     create_nova_accounts
     create_cinder_accounts
@@ -938,6 +941,10 @@
         create_swift_accounts
     fi
 
+    if is_service_enabled heat; then
+        create_heat_accounts
+    fi
+
     # ``keystone_data.sh`` creates services, admin and demo users, and roles.
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
@@ -947,6 +954,7 @@
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
+    unset OS_TOKEN OS_URL
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
     export OS_USERNAME=admin
@@ -1102,6 +1110,47 @@
     start_glance
 fi
 
+# Install Images
+# ==============
+
+# Upload an image to glance.
+#
+# The default image is cirros, a small testing image which lets you login as **root**
+# cirros has a ``cloud-init`` analog supporting login via keypair and sending
+# scripts as userdata.
+# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
+#
+# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
+#  * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
+
+if is_service_enabled g-reg; then
+    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
+
+    if is_baremetal; then
+        echo_summary "Creating and uploading baremetal images"
+
+        # build and upload separate deploy kernel & ramdisk
+        upload_baremetal_deploy $TOKEN
+
+        # upload images, separating out the kernel & ramdisk for PXE boot
+        for image_url in ${IMAGE_URLS//,/ }; do
+            upload_baremetal_image $image_url $TOKEN
+        done
+    else
+        echo_summary "Uploading images"
+
+        # Option to upload legacy ami-tty, which works with xenserver
+        if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+            IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
+        fi
+
+        for image_url in ${IMAGE_URLS//,/ }; do
+            upload_image $image_url $TOKEN
+        done
+    fi
+fi
+
 # Create an access key and secret key for nova ec2 register image
 if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
     NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
@@ -1181,7 +1230,7 @@
 
 # Configure and launch heat engine, api and metadata
 if is_service_enabled heat; then
-    # Initialize heat, including replacing nova flavors
+    # Initialize heat
     echo_summary "Configuring Heat"
     init_heat
     echo_summary "Starting Heat"
@@ -1207,47 +1256,6 @@
 fi
 
 
-# Install Images
-# ==============
-
-# Upload an image to glance.
-#
-# The default image is cirros, a small testing image which lets you login as **root**
-# cirros has a ``cloud-init`` analog supporting login via keypair and sending
-# scripts as userdata.
-# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
-#
-# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
-#  * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
-
-if is_service_enabled g-reg; then
-    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
-
-    if is_baremetal; then
-        echo_summary "Creating and uploading baremetal images"
-
-        # build and upload separate deploy kernel & ramdisk
-        upload_baremetal_deploy $TOKEN
-
-        # upload images, separating out the kernel & ramdisk for PXE boot
-        for image_url in ${IMAGE_URLS//,/ }; do
-            upload_baremetal_image $image_url $TOKEN
-        done
-    else
-        echo_summary "Uploading images"
-
-        # Option to upload legacy ami-tty, which works with xenserver
-        if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
-            IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
-        fi
-
-        for image_url in ${IMAGE_URLS//,/ }; do
-            upload_image $image_url $TOKEN
-        done
-    fi
-fi
-
 # If we are running nova with baremetal driver, there are a few
 # last-mile configuration bits to attend to, which must happen
 # after n-api and n-sch have started.
@@ -1350,11 +1358,6 @@
     echo "Horizon is now available at http://$SERVICE_HOST/"
 fi
 
-# Warn that the default flavors have been changed by Heat
-if is_service_enabled heat; then
-    echo "Heat has replaced the default flavors. View by running: nova flavor-list"
-fi
-
 # If Keystone is present you can point ``nova`` cli to this server
 if is_service_enabled key; then
     echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
diff --git a/stackrc b/stackrc
index 165196c..0b081c4 100644
--- a/stackrc
+++ b/stackrc
@@ -151,6 +151,10 @@
 OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
 OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
 
+# cliff command line framework
+CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
+CLIFF_BRANCH=${CLIFF_BRANCH:-master}
+
 # oslo.config
 OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
 OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
@@ -163,6 +167,18 @@
 OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
 OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
 
+# pycadf auditing library
+PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
+PYCADF_BRANCH=${PYCADF_BRANCH:-master}
+
+# stevedore plugin manager
+STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
+STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master}
+
+# taskflow plugin manager
+TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
+TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master}
+
 # pbr drives the setuptools configs
 PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
 PBR_BRANCH=${PBR_BRANCH:-master}
@@ -256,6 +272,10 @@
     xenserver)
         # Xen config common to nova and neutron
         XENAPI_USER=${XENAPI_USER:-"root"}
+        # This user will be used for dom0 - domU communication
+        #   should be able to log in to dom0 without a password
+        #   will be used to install the plugins
+        DOMZERO_USER=${DOMZERO_USER:-"domzero"}
         ;;
     *)
         ;;
@@ -291,6 +311,9 @@
     openvz)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
+    docker)
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros}
+        IMAGE_URLS=${IMAGE_URLS:-};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index d9c93cc..a2d0c52 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -199,7 +199,7 @@
 export S3_URL="$S3_URL"
 # OpenStack USER ID = $user_id
 export OS_USERNAME="$user_name"
-# Openstack Tenant ID = $tenant_id
+# OpenStack Tenant ID = $tenant_id
 export OS_TENANT_NAME="$tenant_name"
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
index b9e1b24..27c8c82 100755
--- a/tools/docker/install_docker.sh
+++ b/tools/docker/install_docker.sh
@@ -30,15 +30,19 @@
 # Install Docker Service
 # ======================
 
-# Stop the auto-repo updates and do it when required here
-NO_UPDATE_REPOS=True
+if is_fedora; then
+    install_package docker-io socat
+else
+    # Stop the auto-repo updates and do it when required here
+    NO_UPDATE_REPOS=True
 
-# Set up home repo
-curl https://get.docker.io/gpg | sudo apt-key add -
-install_package python-software-properties && \
-    sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
-apt_get update
-install_package --force-yes lxc-docker socat
+    # Set up home repo
+    curl https://get.docker.io/gpg | sudo apt-key add -
+    install_package python-software-properties && \
+        sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
+    apt_get update
+    install_package --force-yes lxc-docker socat
+fi
 
 # Start the daemon - restart just in case the package ever auto-starts...
 restart_service docker
diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md
index 371017d..3586da9 100644
--- a/tools/jenkins/README.md
+++ b/tools/jenkins/README.md
@@ -1,6 +1,6 @@
 Getting Started With Jenkins and Devstack
 =========================================
-This little corner of devstack is to show how to get an Openstack jenkins
+This little corner of devstack is to show how to get an OpenStack jenkins
 environment up and running quickly, using the rcb configuration methodology.
 
 
diff --git a/tools/xen/README.md b/tools/xen/README.md
index ee1abcc..712782b 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,11 +1,11 @@
 # Getting Started With XenServer and Devstack
 
 The purpose of the code in this directory it to help developers bootstrap a
-XenServer 6.2 (older versions may also work) + Openstack development
+XenServer 6.2 (older versions may also work) + OpenStack development
 environment. This file gives some pointers on how to get started.
 
 Xenserver is a Type 1 hypervisor, so it is best installed on bare metal.  The
-Openstack services are configured to run within a virtual machine (called OS
+OpenStack services are configured to run within a virtual machine (called OS
 domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with
 the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`).
 
diff --git a/tools/xen/functions b/tools/xen/functions
index 97c56bc..ab0be84 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -336,3 +336,11 @@
     xe vm-param-set uuid=$vm VCPUs-max=$cpu_count
     xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
 }
+
+function get_domid() {
+    local vm_name_label
+
+    vm_name_label="$1"
+
+    xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index d0d81a2..7b59bae 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -67,21 +67,6 @@
 
 # Install plugins
 
-## Nova plugins
-NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)}
-EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL")
-install_xapi_plugins_from "$EXTRACTED_NOVA"
-
-LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print)
-if [ -n "$LOGROT_SCRIPT" ]; then
-    mkdir -p "/var/log/xen/guest"
-    cp "$LOGROT_SCRIPT" /root/consolelogrotate
-    chmod +x /root/consolelogrotate
-    echo "* * * * * /root/consolelogrotate" | crontab
-fi
-
-rm -rf "$EXTRACTED_NOVA"
-
 ## Install the netwrap xapi plugin to support agent control of dom0 networking
 if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then
     NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)}
@@ -90,9 +75,6 @@
     rm -rf "$EXTRACTED_NEUTRON"
 fi
 
-create_directory_for_kernels
-create_directory_for_images
-
 #
 # Configure Networking
 #
@@ -188,7 +170,7 @@
     set +x
     echo "Waiting for the VM to halt.  Progress in-VM can be checked with vncviewer:"
     mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
-    domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true)
+    domid=$(get_domid "$GUEST_NAME")
     port=$(xenstore-read /local/domain/$domid/console/vnc-port)
     echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
     while true; do
@@ -359,6 +341,37 @@
     fi
 fi
 
+# Create an ssh-keypair, and set it up for dom0 user
+rm -f /root/dom0key /root/dom0key.pub
+ssh-keygen -f /root/dom0key -P "" -C "dom0"
+DOMID=$(get_domid "$GUEST_NAME")
+
+xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)"
+xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID
+
+function run_on_appliance() {
+    ssh \
+        -i /root/dom0key \
+        -o UserKnownHostsFile=/dev/null \
+        -o StrictHostKeyChecking=no \
+        -o BatchMode=yes \
+        "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@"
+}
+
+# Wait until we can log in to the appliance
+while ! run_on_appliance true; do
+    sleep 1
+done
+
+# Remove authenticated_keys updater cronjob
+echo "" | run_on_appliance crontab -
+
+# Generate a passwordless ssh key for domzero user
+echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance
+
+# Authenticate that user to dom0
+run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+
 # If we have copied our ssh credentials, use ssh to monitor while the installation runs
 WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
 COPYENV=${COPYENV:-1}
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 05ac86c..0946126 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -18,6 +18,57 @@
 GUEST_PASSWORD="$1"
 XS_TOOLS_PATH="$2"
 STACK_USER="$3"
+DOMZERO_USER="$4"
+
+
+function setup_domzero_user() {
+    local username
+
+    username="$1"
+
+    local key_updater_script
+    local sudoers_file
+    key_updater_script="/home/$username/update_authorized_keys.sh"
+    sudoers_file="/etc/sudoers.d/allow_$username"
+
+    # Create user
+    adduser --disabled-password --quiet "$username" --gecos "$username"
+
+    # Give passwordless sudo
+    cat > $sudoers_file << EOF
+    $username ALL = NOPASSWD: ALL
+EOF
+    chmod 0440 $sudoers_file
+
+    # A script to populate this user's authenticated_keys from xenstore
+    cat > $key_updater_script << EOF
+#!/bin/bash
+set -eux
+
+DOMID=\$(sudo xenstore-read domid)
+sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username
+sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value
+cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys
+EOF
+
+    # Give the key updater to the user
+    chown $username:$username $key_updater_script
+    chmod 0700 $key_updater_script
+
+    # Setup the .ssh folder
+    mkdir -p /home/$username/.ssh
+    chown $username:$username /home/$username/.ssh
+    chmod 0700 /home/$username/.ssh
+    touch /home/$username/.ssh/authorized_keys
+    chown $username:$username /home/$username/.ssh/authorized_keys
+    chmod 0600 /home/$username/.ssh/authorized_keys
+
+    # Setup the key updater as a cron job
+    crontab -u $username - << EOF
+* * * * * $key_updater_script
+EOF
+
+}
 
 # Install basics
 apt-get update
@@ -48,6 +99,8 @@
 echo $STACK_USER:$GUEST_PASSWORD | chpasswd
 echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
 
+setup_domzero_user "$DOMZERO_USER"
+
 # Add an udev rule, so that new block devices could be written by stack user
 cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF
 KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660"
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
index 4fa70d3..eaab2fe 100755
--- a/tools/xen/prepare_guest_template.sh
+++ b/tools/xen/prepare_guest_template.sh
@@ -86,7 +86,7 @@
 cat <<EOF >$STAGING_DIR/etc/rc.local
 #!/bin/sh -e
 bash /opt/stack/prepare_guest.sh \\
-    "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\
+    "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" "$DOMZERO_USER" \\
     > /opt/stack/prepare_guest.log 2>&1
 EOF