Merge "test for adding crazy branches"
diff --git a/README.md b/README.md
index 9914b1e..a0f5b26 100644
--- a/README.md
+++ b/README.md
@@ -73,7 +73,7 @@
 This is a recent change (Oct 2013) from the previous behaviour of
 automatically creating a ``stack`` user.  Automatically creating
 user accounts is not the right response to running as root, so
-that bit is now an explicit step using ``tools/create-stack-user.sh``. 
+that bit is now an explicit step using ``tools/create-stack-user.sh``.
 Run that (as root!) or just check it out to see what DevStack's
 expectations are for the account it runs under.  Many people simply
 use their usual login (the default 'ubuntu' login on a UEC image
@@ -253,10 +253,6 @@
 
 If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`.
 
-# DevStack on Docker
-
-If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`.
-
 # Additional Projects
 
 DevStack has a hook mechanism to call out to a dispatch script at specific
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index f679669..dff8e7a 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,9 +44,6 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index ad852a4..3768b56 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -40,9 +40,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8dc44ef..1416d4d 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -40,9 +40,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -178,6 +175,10 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
     die $LINENO "Failure deleting security group rule from $SECGROUP"
 
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+    die $LINENO "Security group rule not deleted from $SECGROUP"
+fi
+
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
diff --git a/exercises/savanna.sh b/exercises/sahara.sh
similarity index 88%
rename from exercises/savanna.sh
rename to exercises/sahara.sh
index fc3f976..867920e 100755
--- a/exercises/savanna.sh
+++ b/exercises/sahara.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
-# **savanna.sh**
+# **sahara.sh**
 
-# Sanity check that Savanna started if enabled
+# Sanity check that Sahara started if enabled
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -33,9 +33,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-is_service_enabled savanna || exit 55
+is_service_enabled sahara || exit 55
 
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!"
+curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index d71a1e0..5f8b0a4 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -37,9 +37,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 
 # Testing Security Groups
 # =======================
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 83d25c7..0d556df 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -41,9 +41,6 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
new file mode 100644
index 0000000..80e07ff
--- /dev/null
+++ b/extras.d/70-sahara.sh
@@ -0,0 +1,37 @@
+# sahara.sh - DevStack extras script to install Sahara
+
+if is_service_enabled sahara; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/sahara
+        source $TOP_DIR/lib/sahara-dashboard
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing sahara"
+        install_sahara
+        cleanup_sahara
+        if is_service_enabled horizon; then
+            install_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring sahara"
+        configure_sahara
+        create_sahara_accounts
+        if is_service_enabled horizon; then
+            configure_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing sahara"
+        start_sahara
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_sahara
+        if is_service_enabled horizon; then
+            cleanup_sahara_dashboard
+        fi
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_sahara
+    fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
deleted file mode 100644
index edc1376..0000000
--- a/extras.d/70-savanna.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-# savanna.sh - DevStack extras script to install Savanna
-
-if is_service_enabled savanna; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/savanna
-        source $TOP_DIR/lib/savanna-dashboard
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Savanna"
-        install_savanna
-        cleanup_savanna
-        if is_service_enabled horizon; then
-            install_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Savanna"
-        configure_savanna
-        create_savanna_accounts
-        if is_service_enabled horizon; then
-            configure_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing Savanna"
-        start_savanna
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_savanna
-        if is_service_enabled horizon; then
-            cleanup_savanna_dashboard
-        fi
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_savanna
-    fi
-fi
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
new file mode 100644
index 0000000..57b4328
--- /dev/null
+++ b/extras.d/80-opendaylight.sh
@@ -0,0 +1,69 @@
+# opendaylight.sh - DevStack extras script
+
+if is_service_enabled odl-server odl-compute; then
+    # Initial source
+    [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
+fi
+
+if is_service_enabled odl-server; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight-compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        create_nova_conf_neutron
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing OpenDaylight"
+        ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+        ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+        read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+        sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
+        sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        sudo ovs-vsctl del-manager
+        BRIDGES=$(sudo ovs-vsctl list-br)
+        for bridge in $BRIDGES ; do
+            sudo ovs-vsctl del-controller $bridge
+        done
+
+        stop_opendaylight-compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index 71007ba..f1b692a 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,5 +1,5 @@
-python-pymongo
-mongodb-server
+python-pymongo #NOPRIME
+mongodb-server #NOPRIME
 libnspr4-dev
 pkg-config
 libxml2-dev
diff --git a/files/apts/general b/files/apts/general
index 32d31f0..995c0c6 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,8 +9,6 @@
 lsof # useful when debugging
 openssh-server
 openssl
-vim-nox
-locate # useful when debugging
 python-virtualenv
 python-unittest2
 iputils-ping
diff --git a/files/apts/opendaylight b/files/apts/opendaylight
new file mode 100644
index 0000000..ec3cc9d
--- /dev/null
+++ b/files/apts/opendaylight
@@ -0,0 +1,2 @@
+openvswitch-datapath-dkms # NOPRIME
+openvswitch-switch # NOPRIME
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 9a34c76..fc1e813 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -53,41 +53,6 @@
         --role ResellerAdmin
 fi
 
-# Heat
-if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
-    keystone user-create --name=heat \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=heat@example.com
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user heat \
-        --role service
-    # heat_stack_user role is for users created by Heat
-    keystone role-create --name heat_stack_user
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=heat-cfn \
-            --type=cloudformation \
-            --description="Heat CloudFormation Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat-cfn \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
-        keystone service-create \
-            --name=heat \
-            --type=orchestration \
-            --description="Heat Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
-    fi
-fi
-
 # Glance
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
     keystone user-create \
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
new file mode 100644
index 0000000..61f73ee
--- /dev/null
+++ b/files/rpms-suse/baremetal
@@ -0,0 +1 @@
+dnsmasq
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 704947e..ff27a3a 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,23 +1,22 @@
+bc
 bridge-utils
 ca-certificates-mozilla
 curl
 euca2ools
 git-core
 iputils
+libopenssl-devel # to rebuild pyOpenSSL if needed
+lsof # useful when debugging
+make
 openssh
 openssl
 psmisc
-python-setuptools # instead of python-distribute; dist:sle11sp2
 python-cmd2 # dist:opensuse-12.3
 python-pylint
+python-setuptools # instead of python-distribute; dist:sle11sp2
 python-unittest2
 screen
 tar
 tcpdump
 unzip
-vim-enhanced
 wget
-bc
-
-findutils-locate # useful when debugging
-lsof # useful when debugging
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index dd68ac0..d9844e9 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -8,5 +8,6 @@
 python-eventlet
 python-greenlet
 python-iso8601
+python-pyOpenSSL
 python-wsgiref
 python-xattr
diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight
new file mode 100644
index 0000000..d6c7146
--- /dev/null
+++ b/files/rpms-suse/opendaylight
@@ -0,0 +1,4 @@
+openvswitch # NOPRIME
+openvswitch-controller # NOPRIME
+openvswitch-switch # NOPRIME
+
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
 python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c91bac3..9cf580d 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,4 +1,4 @@
 selinux-policy-targeted
-mongodb-server
-pymongo
+mongodb-server #NOPRIME
+pymongo # NOPRIME
 mongodb # NOPRIME
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 199ae10..423d57c 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -4,4 +4,4 @@
 python-devel
 postgresql-devel
 iscsi-initiator-utils
-python-lxml         #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
diff --git a/files/rpms/glance b/files/rpms/glance
index 25c5d39..2007e2e 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -9,10 +9,10 @@
 python-devel
 python-eventlet
 python-greenlet
-python-lxml         #dist:f18,f19,f20,rhel7
-python-paste-deploy #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
+python-paste-deploy #dist:f19,f20,rhel7
 python-routes
 python-sqlalchemy
-python-wsgiref
+python-wsgiref      #dist:f18,f19,f20
 pyxattr
 zlib-devel          # testonly
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 59503cc..2dd24e0 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,8 +16,8 @@
 python-migrate
 python-mox
 python-nose
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
 python-routes
 python-sphinx
 python-sqlalchemy
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 99e8524..7182091 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,9 +1,9 @@
 python-greenlet
 libxslt-devel       # dist:f20
-python-lxml         #dist:f18,f19,f20
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
-python-paste-script #dist:f18,f19,f20
+python-lxml         #dist:f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
+python-paste-script #dist:f19,f20
 python-routes
 python-sqlalchemy
 python-webob
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 42d7f68..06ea0ea 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
 python-iso8601
 python-kombu
 #rhel6 gets via pip
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/nova b/files/rpms/nova
index a607d92..45d6e0b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,11 +28,11 @@
 python-lockfile
 python-migrate
 python-mox
-python-paramiko # dist:f18,f19,f20,rhel7
+python-paramiko # dist:f19,f20,rhel7
 # ^ on RHEL6, brings in python-crypto which conflicts with version from
 # pip we need
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight
new file mode 100644
index 0000000..98aaaf4
--- /dev/null
+++ b/files/rpms/opendaylight
@@ -0,0 +1 @@
+openvswitch # NOPRIME
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/rpms/swift b/files/rpms/swift
index 72253f7..bf29ea2 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -9,7 +9,7 @@
 python-greenlet
 python-netifaces
 python-nose
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-simplejson
 python-webob
 pyxattr
diff --git a/functions b/functions
index ab8319b..1d30922 100644
--- a/functions
+++ b/functions
@@ -163,38 +163,37 @@
             if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
                             `" Filename provided: ${IMAGE_NAME}"
-            else
-                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-                flat_path="${image_url:0:$path_len}"
-                descriptor_url=$flat_path$descriptor_fname
-                warn $LINENO "$descriptor_data_pair_msg"`
-                                `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
-                if [[ $flat_path != file* ]]; then
-                    if [[ ! -f $FILES/$descriptor_fname || \
-                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
-                        wget -c $descriptor_url -O $FILES/$descriptor_fname
-                        if [[ $? -ne 0 ]]; then
-                            warn $LINENO "Descriptor not found $descriptor_url"
-                            descriptor_found=false
-                        fi
-                    fi
-                    descriptor_url="$FILES/$descriptor_fname"
-                else
-                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
-                    if [[ ! -f $descriptor_url || \
-                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+            fi
+
+            descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+            flat_path="${image_url:0:$path_len}"
+            descriptor_url=$flat_path$descriptor_fname
+            warn $LINENO "$descriptor_data_pair_msg"`
+                            `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+            if [[ $flat_path != file* ]]; then
+                if [[ ! -f $FILES/$descriptor_fname || \
+                "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                    wget -c $descriptor_url -O $FILES/$descriptor_fname
+                    if [[ $? -ne 0 ]]; then
                         warn $LINENO "Descriptor not found $descriptor_url"
                         descriptor_found=false
                     fi
                 fi
-                if $descriptor_found; then
-                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
-                    `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
-                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
-                    vmdk_adapter_type="${vmdk_adapter_type%?}"
+                descriptor_url="$FILES/$descriptor_fname"
+            else
+                descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                if [[ ! -f $descriptor_url || \
+                "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                    warn $LINENO "Descriptor not found $descriptor_url"
+                    descriptor_found=false
                 fi
             fi
+            if $descriptor_found; then
+                vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
+                vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                vmdk_adapter_type="${vmdk_adapter_type%?}"
+            fi
             vmdk_disktype="preallocated"
         else
             vmdk_disktype="preallocated"
diff --git a/functions-common b/functions-common
index 0db3ff3..c6fd5c7 100644
--- a/functions-common
+++ b/functions-common
@@ -517,12 +517,14 @@
     GIT_DEST=$2
     GIT_REF=$3
     RECLONE=$(trueorfalse False $RECLONE)
+    local orig_dir=`pwd`
 
     if [[ "$OFFLINE" = "True" ]]; then
         echo "Running in offline mode, clones already exist"
         # print out the results so we know what change was used in the logs
         cd $GIT_DEST
         git show --oneline | head -1
+        cd $orig_dir
         return
     fi
 
@@ -572,6 +574,7 @@
     # print out the results so we know what change was used in the logs
     cd $GIT_DEST
     git show --oneline | head -1
+    cd $orig_dir
 }
 
 # git can sometimes get itself infinitely stuck with transient network
@@ -938,9 +941,24 @@
     [[ "$OFFLINE" = "True" ]] && return
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    # The manual check for missing packages is because yum -y assumes
+    # missing packages are OK.  See
+    # https://bugzilla.redhat.com/show_bug.cgi?id=965567
     $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
         no_proxy=$no_proxy \
-        yum install -y "$@"
+        yum install -y "$@" 2>&1 | \
+        awk '
+            BEGIN { fail=0 }
+            /No package/ { fail=1 }
+            { print }
+            END { exit fail }' || \
+                die $LINENO "Missing packages detected"
+
+    # also ensure we catch a yum failure
+    if [[ ${PIPESTATUS[0]} != 0 ]]; then
+        die $LINENO "Yum install failure"
+    fi
 }
 
 # zypper wrapper to set arguments correctly
@@ -1233,7 +1251,7 @@
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements = "changed" ]]; then
+    if [[ $update_requirements != "changed" ]]; then
         (cd $REQUIREMENTS_DIR; \
             $SUDO_CMD python update.py $project_dir)
     fi
@@ -1249,7 +1267,7 @@
     # a variable that tells us whether or not we should UNDO the requirements
     # changes (this will be set to False in the OpenStack ci gate)
     if [ $UNDO_REQUIREMENTS = "True" ]; then
-        if [[ $update_requirements = "changed" ]]; then
+        if [[ $update_requirements != "changed" ]]; then
             (cd $project_dir && git reset --hard)
         fi
     fi
diff --git a/lib/ceilometer b/lib/ceilometer
index 04c1a34..6aaddce 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -106,7 +106,9 @@
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer {
-    mongo ceilometer --eval "db.dropDatabase();"
+    if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+        mongo ceilometer --eval "db.dropDatabase();"
+    fi
 }
 
 # configure_ceilometerclient() - Set config files, create data dirs, etc
@@ -129,6 +131,7 @@
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
+    iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -163,14 +166,27 @@
 }
 
 function configure_mongodb {
+    # server package is the same on all
+    local packages=mongodb-server
+
     if is_fedora; then
-        # install mongodb client
-        install_package mongodb
+        # mongodb client + python bindings
+        packages="${packages} mongodb pymongo"
+    else
+        packages="${packages} python-pymongo"
+    fi
+
+    install_package ${packages}
+
+    if is_fedora; then
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
         restart_service mongod
     fi
+
+    # give mongodb time to start-up
+    sleep 5
 }
 
 # init_ceilometer() - Initialize etc.
diff --git a/lib/heat b/lib/heat
index d0c0302..2d9d863 100644
--- a/lib/heat
+++ b/lib/heat
@@ -197,8 +197,62 @@
 }
 
 # create_heat_accounts() - Set up common required heat accounts
-# Note this is in addition to what is in files/keystone_data.sh
 function create_heat_accounts {
+    # migrated from files/keystone_data.sh
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    HEAT_USER=$(openstack user create \
+        heat \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email heat@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $HEAT_USER
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        HEAT_SERVICE=$(openstack service create \
+            heat \
+            --type=orchestration \
+            --description="Heat Orchestration Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+        HEAT_CFN_SERVICE=$(openstack service create \
+            heat \
+            --type=cloudformation \
+            --description="Heat CloudFormation Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_CFN_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+    fi
+
+    # heat_stack_user role is for users created by Heat
+    openstack role create heat_stack_user
+
+    # heat_stack_owner role is given to users who create Heat stacks,
+    # it's the default role used by heat to delegate to the heat service
+    # user (for performing deferred operations via trusts), see heat.conf
+    HEAT_OWNER_ROLE=$(openstack role create \
+        heat_stack_owner \
+        | grep " id " | get_field 2)
+
+    # Give the role to the demo and admin users so they can create stacks
+    # in either of the projects created by devstack
+    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+
     # Note we have to pass token/endpoint here because the current endpoint and
     # version negotiation in OSC means just --os-identity-api-version=3 won't work
     KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
diff --git a/lib/ironic b/lib/ironic
index 4e5edc9..b346de1 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -124,7 +124,7 @@
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
-    iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
diff --git a/lib/ldap b/lib/ldap
index 51d0251..efe2f09 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -154,7 +154,7 @@
 
 # clear_ldap_state() - Clear LDAP State
 function clear_ldap_state {
-    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
+    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || :
 }
 
 # Restore xtrace
diff --git a/lib/marconi b/lib/marconi
index 29ae386..3c4547f 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -34,7 +34,8 @@
 MARCONICLIENT_DIR=$DEST/python-marconiclient
 MARCONI_CONF_DIR=/etc/marconi
 MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_API_LOG_DIR=/var/log/marconi
+MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
 MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
 
 # Support potential entry-points console scripts
@@ -96,6 +97,7 @@
 
     iniset $MARCONI_CONF DEFAULT verbose True
     iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+    iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
@@ -104,8 +106,12 @@
     iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
 
-    if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
-        iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
+        iniset $MARCONI_CONF drivers storage sqlalchemy
+        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
+    else
+        iniset $MARCONI_CONF drivers storage mongodb
+        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
         configure_mongodb
         cleanup_marconi
     fi
@@ -148,7 +154,7 @@
 
 # start_marconi() - Start running processes, including screen
 function start_marconi {
-    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1"
     echo "Waiting for Marconi to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
         die $LINENO "Marconi did not start"
diff --git a/lib/neutron b/lib/neutron
index bb591ab..84e8277 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 # nova vif driver that all plugins should use
 NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
+
+    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -754,6 +761,16 @@
         iniset $NEUTRON_CONF DEFAULT ${I/=/ }
     done
 
+    # Configuration for neutron notifations to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
     # Configure plugin
     neutron_plugin_configure_service
 }
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 7728eb1..a1b089e 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -23,7 +23,7 @@
 # Specify ncclient package information
 NCCLIENT_DIR=$DEST/ncclient
 NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
-NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git}
+NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git}
 NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
 
 # This routine put a prefix on an existing function name
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
new file mode 100644
index 0000000..3649f39
--- /dev/null
+++ b/lib/neutron_plugins/nuage
@@ -0,0 +1,69 @@
+# Nuage Neutron Plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function neutron_plugin_create_nova_conf {
+    NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
+    iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+}
+
+function neutron_plugin_install_agent_packages {
+    :
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage
+    Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini
+    Q_DB_NAME="nuage_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin"
+    Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions
+    #Nuage specific Neutron defaults. Actual value must be set and sourced
+    NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'}
+    NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'}
+    NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'}
+    NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'}
+    NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'}
+    NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'}
+    NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''}
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    :
+}
+
+function neutron_plugin_configure_l3_agent {
+    :
+}
+
+function neutron_plugin_configure_plugin_agent {
+    :
+}
+
+function neutron_plugin_configure_service {
+    iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/
+    iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH
+    iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION
+    iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS
+    iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE
+    iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 1
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
new file mode 100644
index 0000000..0aebff6
--- /dev/null
+++ b/lib/neutron_plugins/oneconvergence
@@ -0,0 +1,76 @@
+# Neutron One Convergence plugin
+# ---------------------------
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+Q_L3_ENABLED=true
+Q_L3_ROUTER_PER_TENANT=true
+Q_USE_NAMESPACE=true
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+# Configure common parameters
+function neutron_plugin_configure_common {
+
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
+    Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
+    Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
+    Q_DB_NAME='oc_nvsd_neutron'
+}
+
+# Configure plugin specific information
+function neutron_plugin_configure_service {
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 0
+}
+
+function setup_integration_bridge {
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    setup_integration_bridge
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
+
+    _neutron_ovs_base_configure_firewall_driver
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
+        setup_integration_bridge
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 424a900..b2c1b61 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -18,14 +18,8 @@
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
 
-# configure_ryu can be called multiple times as neutron_pluing/ryu may call
-# this function for neutron-ryu-agent
-_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu {
-    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
-        setup_develop $RYU_DIR
-        _RYU_CONFIGURED=True
-    fi
+    :
 }
 
 function init_ryu {
@@ -63,6 +57,7 @@
 function install_ryu {
     if [[ "$_RYU_INSTALLED" == "False" ]]; then
         git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+        export PYTHONPATH=$RYU_DIR:$PYTHONPATH
         _RYU_INSTALLED=True
     fi
 }
diff --git a/lib/nova b/lib/nova
index 583a592..b01d107 100644
--- a/lib/nova
+++ b/lib/nova
@@ -308,7 +308,7 @@
     # Rebuild the config file from scratch
     create_nova_conf
 
-    if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         # Configure hypervisor plugin
         configure_nova_hypervisor
     fi
@@ -665,17 +665,6 @@
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # Enable client side traces for libvirt
-        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
-        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-        # Enable server side traces for libvirtd
-        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
@@ -726,19 +715,28 @@
     start_nova_rest
 }
 
-# stop_nova() - Stop running processes (non-screen)
-function stop_nova {
-    # Kill the nova screen windows
-    # Some services are listed here twice since more than one instance
-    # of a service may be running in certain configs.
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
-        screen_stop $serv
-    done
+function stop_nova_compute {
+    screen_stop n-cpu
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         stop_nova_hypervisor
     fi
 }
 
+function stop_nova_rest {
+    # Kill the nova screen windows
+    # Some services are listed here twice since more than one instance
+    # of a service may be running in certain configs.
+    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+        screen_stop $serv
+    done
+}
+
+# stop_nova() - Stop running processes (non-screen)
+function stop_nova {
+    stop_nova_rest
+    stop_nova_compute
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
deleted file mode 100644
index cdbc4d1..0000000
--- a/lib/nova_plugins/hypervisor-docker
+++ /dev/null
@@ -1,124 +0,0 @@
-# lib/nova_plugins/docker
-# Configure the Docker hypervisor
-
-# Enable with:
-#
-#   VIRT_DRIVER=docker
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``nova`` and ``glance`` configurations
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-DOCKER_DIR=$DEST/docker
-
-DOCKER_UNIX_SOCKET=/var/run/docker.sock
-DOCKER_PID_FILE=/var/run/docker.pid
-DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
-
-DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest}
-DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME
-DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest}
-DOCKER_REGISTRY_IMAGE_NAME=registry
-DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
-
-DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
-    stop_service docker
-
-    # Clean out work area
-    sudo rm -rf /var/lib/docker
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
-    iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
-    iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
-    # So far this is Ubuntu only
-    if ! is_ubuntu; then
-        die $LINENO "Docker is only supported on Ubuntu at this time"
-    fi
-
-    # Make sure Docker is installed
-    if ! is_package_installed lxc-docker; then
-        die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
-    fi
-
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running"
-    fi
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running, start the daemon"
-    fi
-
-    # Start the Docker registry container
-    docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \
-        -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \
-        -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \
-        -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \
-        -e OS_AUTH_URL=${OS_AUTH_URL} \
-        $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh
-
-    echo "Waiting for docker registry to start..."
-    DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then
-        die $LINENO "docker-registry did not start"
-    fi
-
-    # Tag image if not already tagged
-    if ! docker images | grep $DOCKER_REPOSITORY_NAME; then
-        docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME
-    fi
-
-    # Make sure we copied the image in Glance
-    if ! (glance image-show "$DOCKER_IMAGE"); then
-        docker push $DOCKER_REPOSITORY_NAME
-    fi
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
-    # Stop the docker registry container
-    docker kill $(docker ps | grep docker-registry | cut -d' ' -f1)
-}
-
-
-# Restore xtrace
-$MY_XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index bbf6554..5a51f33 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -25,6 +25,8 @@
 
 # File injection is disabled by default in Nova.  This will turn it back on.
 ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+# if we should turn on massive libvirt debugging
+DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT)
 
 
 # Entry Points
@@ -103,6 +105,18 @@
     fi
     add_user_to_group $STACK_USER $LIBVIRT_GROUP
 
+    # Enable server side traces for libvirtd
+    if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then
+        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+    fi
+
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
diff --git a/lib/opendaylight b/lib/opendaylight
new file mode 100644
index 0000000..ca81c20
--- /dev/null
+++ b/lib/opendaylight
@@ -0,0 +1,167 @@
+# lib/opendaylight
+# Functions to control the configuration and operation of the opendaylight service
+
+# Dependencies:
+#
+# - ``functions`` file
+# # ``DEST`` must be defined
+# # ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_opendaylight_enabled
+# - is_opendaylight-compute_enabled
+# - install_opendaylight
+# - install_opendaylight-compute
+# - configure_opendaylight
+# - init_opendaylight
+# - start_opendaylight
+# - stop_opendaylight-compute
+# - stop_opendaylight
+# - cleanup_opendaylight
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# For OVS_BRIDGE and PUBLIC_BRIDGE
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Defaults
+# --------
+
+# The IP address of ODL. Set this in local.conf.
+# ODL_MGR_IP=
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# <define global variables here that belong to this project>
+ODL_DIR=$DEST/opendaylight
+
+# The OpenDaylight Package, currently using 'Hydrogen' release
+ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip}
+
+# The OpenDaylight URL
+ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1}
+
+# Default arguments for OpenDaylight. This is typically used to set
+# Java memory options.
+#   ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m
+ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"}
+
+# How long to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60}
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# Test if OpenDaylight is enabled
+# is_opendaylight_enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+# cleanup_opendaylight() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    :
+}
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    # Remove simple forwarder
+    rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding*
+
+    # Configure OpenFlow 1.3
+    echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    local _pwd=$(pwd)
+
+    if is_ubuntu; then
+        install_package maven openjdk-7-jre openjdk-7-jdk
+    else
+        yum_install maven java-1.7.0-openjdk
+    fi
+
+    # Download OpenDaylight
+    mkdir -p $ODL_DIR
+    cd $ODL_DIR
+    wget -N $ODL_URL/$ODL_PKG
+    unzip -u $ODL_PKG
+}
+
+# install_opendaylight-compute - Make sure OVS is install
+function install_opendaylight-compute {
+    local kernel_version
+    # Install deps
+    # FIXME add to ``files/apts/neutron``, but don't install if not needed!
+    if is_ubuntu; then
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        install_package openvswitch
+        restart_service openvswitch-switch
+        restart_service openvswitch-controller
+    fi
+}
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    if is_ubuntu; then
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64
+    else
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk
+    fi
+
+    # The flags to ODL have the following meaning:
+    #   -of13: runs ODL using OpenFlow 1.3 protocol support.
+    #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+
+    # Sleep a bit to let OpenDaylight finish starting up
+    sleep $ODL_BOOT_WAIT
+}
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    screen_stop odl-server
+}
+
+# stop_opendaylight-compute() - Remove OVS bridges
+function stop_opendaylight-compute {
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Neutron
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/sahara b/lib/sahara
new file mode 100644
index 0000000..4cb04ec
--- /dev/null
+++ b/lib/sahara
@@ -0,0 +1,177 @@
+# lib/sahara
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_sahara
+# configure_sahara
+# start_sahara
+# stop_sahara
+# cleanup_sahara
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
+SAHARA_BRANCH=${SAHARA_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DIR=$DEST/sahara
+SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
+SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
+SAHARA_DEBUG=${SAHARA_DEBUG:-True}
+
+SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
+SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
+SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
+SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
+
+# Support entry points installation of console scripts
+if [[ -d $SAHARA_DIR/bin ]]; then
+    SAHARA_BIN_DIR=$SAHARA_DIR/bin
+else
+    SAHARA_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+# Tell Tempest this project is present
+TEMPEST_SERVICES+=,sahara
+
+# For backward compatibility with current tests in Tempest
+TEMPEST_SERVICES+=,savanna
+
+
+# Functions
+# ---------
+
+# create_sahara_accounts() - Set up common required sahara accounts
+#
+# Tenant      User       Roles
+# ------------------------------
+# service     sahara    admin
+function create_sahara_accounts {
+
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    SAHARA_USER=$(openstack user create \
+        sahara \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email sahara@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SAHARA_USER
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        SAHARA_SERVICE=$(openstack service create \
+            sahara \
+            --type=data_processing \
+            --description="Sahara Data Processing" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+            $SAHARA_SERVICE \
+            --region RegionOne \
+            --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+    fi
+}
+
+# cleanup_sahara() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_sahara {
+
+    # Cleanup auth cache dir
+    sudo rm -rf $SAHARA_AUTH_CACHE_DIR
+}
+
+# configure_sahara() - Set config files, create data dirs, etc
+function configure_sahara {
+
+    if [[ ! -d $SAHARA_CONF_DIR ]]; then
+        sudo mkdir -p $SAHARA_CONF_DIR
+    fi
+    sudo chown $STACK_USER $SAHARA_CONF_DIR
+
+    # Copy over sahara configuration file and configure common parameters.
+    # TODO(slukjanov): rename when sahara internals will be updated
+    cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE
+
+    # Create auth cache dir
+    sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
+    rm -rf $SAHARA_AUTH_CACHE_DIR/*
+
+    # Set obsolete keystone auth configs for backward compatibility
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+    # Set actual keystone auth configs
+    iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
+    iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
+    iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG
+
+    iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
+
+    if is_service_enabled neutron; then
+        iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
+        iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
+    fi
+
+    if is_service_enabled heat; then
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
+    else
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
+    fi
+
+    iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+    recreate_database sahara utf8
+    $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
+}
+
+# install_sahara() - Collect source and prepare
+function install_sahara {
+    git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
+    setup_develop $SAHARA_DIR
+}
+
+# start_sahara() - Start running processes, including screen
+function start_sahara {
+    screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
+}
+
+# stop_sahara() - Stop running processes
+function stop_sahara {
+    # Kill the Sahara screen windows
+    screen -S $SCREEN_NAME -p sahara -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard
new file mode 100644
index 0000000..a81df0f
--- /dev/null
+++ b/lib/sahara-dashboard
@@ -0,0 +1,72 @@
+# lib/sahara-dashboard
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_HOST``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_sahara_dashboard
+# - configure_sahara_dashboard
+# - cleanup_sahara_dashboard
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/horizon
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git}
+SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master}
+
+SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
+SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard
+SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
+
+# Functions
+# ---------
+
+function configure_sahara_dashboard {
+
+    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+
+    if is_service_enabled neutron; then
+        echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    fi
+}
+
+# install_sahara_dashboard() - Collect source and prepare
+function install_sahara_dashboard {
+    install_python_saharaclient
+    git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH
+    setup_develop $SAHARA_DASHBOARD_DIR
+}
+
+function install_python_saharaclient {
+    git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
+    setup_develop $SAHARA_PYTHONCLIENT_DIR
+}
+
+# Cleanup file settings.py from Sahara
+function cleanup_sahara_dashboard {
+    sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/savanna b/lib/savanna
deleted file mode 100644
index 2cb092c..0000000
--- a/lib/savanna
+++ /dev/null
@@ -1,173 +0,0 @@
-# lib/savanna
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_savanna
-# configure_savanna
-# start_savanna
-# stop_savanna
-# cleanup_savanna
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git}
-SAVANNA_BRANCH=${SAVANNA_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DIR=$DEST/savanna
-SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
-SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf
-SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
-
-SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
-SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
-SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
-
-# Support entry points installation of console scripts
-if [[ -d $SAVANNA_DIR/bin ]]; then
-    SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
-else
-    SAVANNA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,savanna
-
-
-# Functions
-# ---------
-
-# create_savanna_accounts() - Set up common required savanna accounts
-#
-# Tenant      User       Roles
-# ------------------------------
-# service     savanna    admin
-function create_savanna_accounts {
-
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
-
-    SAVANNA_USER=$(openstack user create \
-        savanna \
-        --password "$SERVICE_PASSWORD" \
-        --project $SERVICE_TENANT \
-        --email savanna@example.com \
-        | grep " id " | get_field 2)
-    openstack role add \
-        $ADMIN_ROLE \
-        --project $SERVICE_TENANT \
-        --user $SAVANNA_USER
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SAVANNA_SERVICE=$(openstack service create \
-            savanna \
-            --type=data_processing \
-            --description="Savanna Data Processing" \
-            | grep " id " | get_field 2)
-        openstack endpoint create \
-            $SAVANNA_SERVICE \
-            --region RegionOne \
-            --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-    fi
-}
-
-# cleanup_savanna() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_savanna {
-
-    # Cleanup auth cache dir
-    sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
-}
-
-# configure_savanna() - Set config files, create data dirs, etc
-function configure_savanna {
-
-    if [[ ! -d $SAVANNA_CONF_DIR ]]; then
-        sudo mkdir -p $SAVANNA_CONF_DIR
-    fi
-    sudo chown $STACK_USER $SAVANNA_CONF_DIR
-
-    # Copy over savanna configuration file and configure common parameters.
-    cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
-
-    # Create auth cache dir
-    sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
-    sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
-    rm -rf $SAVANNA_AUTH_CACHE_DIR/*
-
-    # Set obsolete keystone auth configs for backward compatibility
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
-
-    # Set actual keystone auth configs
-    iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
-    iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
-
-    iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
-
-    iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
-
-    if is_service_enabled neutron; then
-        iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true
-        iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
-    fi
-
-    if is_service_enabled heat; then
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
-    else
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
-    fi
-
-    iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
-    recreate_database savanna utf8
-    $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head
-}
-
-# install_savanna() - Collect source and prepare
-function install_savanna {
-    git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
-    setup_develop $SAVANNA_DIR
-}
-
-# start_savanna() - Start running processes, including screen
-function start_savanna {
-    screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE"
-}
-
-# stop_savanna() - Stop running processes
-function stop_savanna {
-    # Kill the Savanna screen windows
-    screen -S $SCREEN_NAME -p savanna -X kill
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
deleted file mode 100644
index 6fe15a3..0000000
--- a/lib/savanna-dashboard
+++ /dev/null
@@ -1,72 +0,0 @@
-# lib/savanna-dashboard
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_HOST``
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_savanna_dashboard
-# - configure_savanna_dashboard
-# - cleanup_savanna_dashboard
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/horizon
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git}
-SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master}
-
-SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git}
-SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
-SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
-
-# Functions
-# ---------
-
-function configure_savanna_dashboard {
-
-    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-    echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-
-    if is_service_enabled neutron; then
-        echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    fi
-}
-
-# install_savanna_dashboard() - Collect source and prepare
-function install_savanna_dashboard {
-    install_python_savannaclient
-    git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
-    setup_develop $SAVANNA_DASHBOARD_DIR
-}
-
-function install_python_savannaclient {
-    git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
-    setup_develop $SAVANNA_PYTHONCLIENT_DIR
-}
-
-# Cleanup file settings.py from Savanna
-function cleanup_savanna_dashboard {
-    sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
-}
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
-
diff --git a/lib/swift b/lib/swift
index 5d4d4ef..b655440 100644
--- a/lib/swift
+++ b/lib/swift
@@ -67,8 +67,8 @@
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
-# Default is ``staticweb, tempurl, formpost``
-SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb}
+# Default is ``staticweb, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
 # the end of the pipeline.
@@ -687,6 +687,11 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
+    # Maintain the iteration as screen_stop() has some desirable side-effects
+    for type in proxy object container account; do
+        screen_stop s-${type}
+    done
+    # Blast out any stragglers
     pkill -f swift-
 }
 
diff --git a/lib/tempest b/lib/tempest
index 16f8744..c74f00d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -149,8 +149,12 @@
 
     password=${ADMIN_PASSWORD:-secrete}
 
-    # See files/keystone_data.sh where alt_demo user
-    # and tenant are set up...
+    # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
+    # user and tenant are set up...
+    ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
+    ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+    TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo}
+    TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo}
     ALT_USERNAME=${ALT_USERNAME:-alt_demo}
     ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 
@@ -254,11 +258,16 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
+    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
     iniset $TEMPEST_CONFIG identity password "$password"
+    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
     iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG identity admin_password "$password"
+    iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
 
     # Image
     # for the gate we want to be able to override this variable so we aren't
@@ -285,7 +294,9 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
-    iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+    iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
+    iniset $TEMPEST_CONFIG "compute-admin" password "$password"
+    iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -293,7 +304,7 @@
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
-    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/stack.sh b/stack.sh
index ab1e8fe..a160464 100755
--- a/stack.sh
+++ b/stack.sh
@@ -142,7 +142,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -541,6 +541,7 @@
                     print
                     print > logfile
                     fflush("")
+                    fflush(logfile)
                 }' ) 2>&1
         # Set up a second fd for output
         exec 6> >( tee "${SUMFILE}" )
@@ -934,8 +935,7 @@
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \
-    HEAT_API_PORT=$HEAT_API_PORT \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
@@ -1419,3 +1419,9 @@
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
 echo_summary "stack.sh completed in $SECONDS seconds."
+
+# Restore/close logging file descriptors
+exec 1>&3
+exec 2>&3
+exec 3>&-
+exec 6>&-
diff --git a/stackrc b/stackrc
index 6bb6f37..4566378 100644
--- a/stackrc
+++ b/stackrc
@@ -320,9 +320,6 @@
     openvz)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
-    docker)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros}
-        IMAGE_URLS=${IMAGE_URLS:-};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
@@ -335,7 +332,7 @@
         ;;
     vsphere)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
-        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
+        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};;
     xenserver)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
         IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
diff --git a/tools/docker/README.md b/tools/docker/README.md
deleted file mode 100644
index 976111f..0000000
--- a/tools/docker/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# DevStack on Docker
-
-Using Docker as Nova's hypervisor requries two steps:
-
-* Configure DevStack by adding the following to `localrc`::
-
-    VIRT_DRIVER=docker
-
-* Download and install the Docker service and images::
-
-    tools/docker/install_docker.sh
-
-After this, `stack.sh` should run as normal.
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
deleted file mode 100755
index 27c8c82..0000000
--- a/tools/docker/install_docker.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-# **install_docker.sh** - Do the initial Docker installation and configuration
-
-# install_docker.sh
-#
-# Install docker package and images
-# * downloads a base busybox image and a glance registry image if necessary
-# * install the images in Docker's image cache
-
-
-# Keep track of the current directory
-SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-FILES=$TOP_DIR/files
-
-# Get our defaults
-source $TOP_DIR/lib/nova_plugins/hypervisor-docker
-
-SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
-
-
-# Install Docker Service
-# ======================
-
-if is_fedora; then
-    install_package docker-io socat
-else
-    # Stop the auto-repo updates and do it when required here
-    NO_UPDATE_REPOS=True
-
-    # Set up home repo
-    curl https://get.docker.io/gpg | sudo apt-key add -
-    install_package python-software-properties && \
-        sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
-    apt_get update
-    install_package --force-yes lxc-docker socat
-fi
-
-# Start the daemon - restart just in case the package ever auto-starts...
-restart_service docker
-
-echo "Waiting for docker daemon to start..."
-DOCKER_GROUP=$(groups | cut -d' ' -f1)
-CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do
-    # Set the right group on docker unix socket before retrying
-    sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET
-    sudo chmod g+rw $DOCKER_UNIX_SOCKET
-    sleep 1
-done"
-if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then
-    die $LINENO "docker did not start"
-fi
-
-# Get guest container image
-docker pull $DOCKER_IMAGE
-docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME
-
-# Get docker-registry image
-docker pull $DOCKER_REGISTRY_IMAGE
-docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 440774e..2b5e418 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -73,7 +73,7 @@
 # Install basics
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr
+apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
 pip install xenapi
 
 # Install XenServer guest utilities