Merge "Do not restart libvirt if n-cpu is disabled"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8dc44ef..8b7b961 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -178,6 +178,10 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
     die $LINENO "Failure deleting security group rule from $SECGROUP"
 
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+    die $LINENO "Security group rule not deleted from $SECGROUP"
+fi
+
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
new file mode 100644
index 0000000..57b4328
--- /dev/null
+++ b/extras.d/80-opendaylight.sh
@@ -0,0 +1,69 @@
+# opendaylight.sh - DevStack extras script
+
+if is_service_enabled odl-server odl-compute; then
+    # Initial source
+    [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
+fi
+
+if is_service_enabled odl-server; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight-compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        create_nova_conf_neutron
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing OpenDaylight"
+        ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+        ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+        read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+        sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
+        sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        sudo ovs-vsctl del-manager
+        BRIDGES=$(sudo ovs-vsctl list-br)
+        for bridge in $BRIDGES ; do
+            sudo ovs-vsctl del-controller $bridge
+        done
+
+        stop_opendaylight-compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index 71007ba..f1b692a 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,5 +1,5 @@
-python-pymongo
-mongodb-server
+python-pymongo #NOPRIME
+mongodb-server #NOPRIME
 libnspr4-dev
 pkg-config
 libxml2-dev
diff --git a/files/apts/general b/files/apts/general
index 32d31f0..995c0c6 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,8 +9,6 @@
 lsof # useful when debugging
 openssh-server
 openssl
-vim-nox
-locate # useful when debugging
 python-virtualenv
 python-unittest2
 iputils-ping
diff --git a/files/apts/opendaylight b/files/apts/opendaylight
new file mode 100644
index 0000000..ec3cc9d
--- /dev/null
+++ b/files/apts/opendaylight
@@ -0,0 +1,2 @@
+openvswitch-datapath-dkms # NOPRIME
+openvswitch-switch # NOPRIME
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 9a34c76..fc1e813 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -53,41 +53,6 @@
         --role ResellerAdmin
 fi
 
-# Heat
-if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
-    keystone user-create --name=heat \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=heat@example.com
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user heat \
-        --role service
-    # heat_stack_user role is for users created by Heat
-    keystone role-create --name heat_stack_user
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=heat-cfn \
-            --type=cloudformation \
-            --description="Heat CloudFormation Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat-cfn \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
-        keystone service-create \
-            --name=heat \
-            --type=orchestration \
-            --description="Heat Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
-    fi
-fi
-
 # Glance
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
     keystone user-create \
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
new file mode 100644
index 0000000..61f73ee
--- /dev/null
+++ b/files/rpms-suse/baremetal
@@ -0,0 +1 @@
+dnsmasq
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 704947e..ff27a3a 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,23 +1,22 @@
+bc
 bridge-utils
 ca-certificates-mozilla
 curl
 euca2ools
 git-core
 iputils
+libopenssl-devel # to rebuild pyOpenSSL if needed
+lsof # useful when debugging
+make
 openssh
 openssl
 psmisc
-python-setuptools # instead of python-distribute; dist:sle11sp2
 python-cmd2 # dist:opensuse-12.3
 python-pylint
+python-setuptools # instead of python-distribute; dist:sle11sp2
 python-unittest2
 screen
 tar
 tcpdump
 unzip
-vim-enhanced
 wget
-bc
-
-findutils-locate # useful when debugging
-lsof # useful when debugging
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index dd68ac0..d9844e9 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -8,5 +8,6 @@
 python-eventlet
 python-greenlet
 python-iso8601
+python-pyOpenSSL
 python-wsgiref
 python-xattr
diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight
new file mode 100644
index 0000000..d6c7146
--- /dev/null
+++ b/files/rpms-suse/opendaylight
@@ -0,0 +1,4 @@
+openvswitch # NOPRIME
+openvswitch-controller # NOPRIME
+openvswitch-switch # NOPRIME
+
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
 python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c91bac3..9cf580d 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,4 +1,4 @@
 selinux-policy-targeted
-mongodb-server
-pymongo
+mongodb-server #NOPRIME
+pymongo # NOPRIME
 mongodb # NOPRIME
diff --git a/files/rpms/glance b/files/rpms/glance
index 25c5d39..534097a 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -13,6 +13,6 @@
 python-paste-deploy #dist:f18,f19,f20,rhel7
 python-routes
 python-sqlalchemy
-python-wsgiref
+python-wsgiref      #dist:f18,f19,f20
 pyxattr
 zlib-devel          # testonly
diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight
new file mode 100644
index 0000000..98aaaf4
--- /dev/null
+++ b/files/rpms/opendaylight
@@ -0,0 +1 @@
+openvswitch # NOPRIME
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/functions b/functions
index ab8319b..1d30922 100644
--- a/functions
+++ b/functions
@@ -163,38 +163,37 @@
             if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
                             `" Filename provided: ${IMAGE_NAME}"
-            else
-                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-                flat_path="${image_url:0:$path_len}"
-                descriptor_url=$flat_path$descriptor_fname
-                warn $LINENO "$descriptor_data_pair_msg"`
-                                `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
-                if [[ $flat_path != file* ]]; then
-                    if [[ ! -f $FILES/$descriptor_fname || \
-                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
-                        wget -c $descriptor_url -O $FILES/$descriptor_fname
-                        if [[ $? -ne 0 ]]; then
-                            warn $LINENO "Descriptor not found $descriptor_url"
-                            descriptor_found=false
-                        fi
-                    fi
-                    descriptor_url="$FILES/$descriptor_fname"
-                else
-                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
-                    if [[ ! -f $descriptor_url || \
-                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+            fi
+
+            descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+            flat_path="${image_url:0:$path_len}"
+            descriptor_url=$flat_path$descriptor_fname
+            warn $LINENO "$descriptor_data_pair_msg"`
+                            `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+            if [[ $flat_path != file* ]]; then
+                if [[ ! -f $FILES/$descriptor_fname || \
+                "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                    wget -c $descriptor_url -O $FILES/$descriptor_fname
+                    if [[ $? -ne 0 ]]; then
                         warn $LINENO "Descriptor not found $descriptor_url"
                         descriptor_found=false
                     fi
                 fi
-                if $descriptor_found; then
-                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
-                    `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
-                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
-                    vmdk_adapter_type="${vmdk_adapter_type%?}"
+                descriptor_url="$FILES/$descriptor_fname"
+            else
+                descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                if [[ ! -f $descriptor_url || \
+                "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                    warn $LINENO "Descriptor not found $descriptor_url"
+                    descriptor_found=false
                 fi
             fi
+            if $descriptor_found; then
+                vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
+                vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                vmdk_adapter_type="${vmdk_adapter_type%?}"
+            fi
             vmdk_disktype="preallocated"
         else
             vmdk_disktype="preallocated"
diff --git a/functions-common b/functions-common
index 0db3ff3..90cd3df 100644
--- a/functions-common
+++ b/functions-common
@@ -938,9 +938,24 @@
     [[ "$OFFLINE" = "True" ]] && return
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    # The manual check for missing packages is because yum -y assumes
+    # missing packages are OK.  See
+    # https://bugzilla.redhat.com/show_bug.cgi?id=965567
     $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
         no_proxy=$no_proxy \
-        yum install -y "$@"
+        yum install -y "$@" 2>&1 | \
+        awk '
+            BEGIN { fail=0 }
+            /No package/ { fail=1 }
+            { print }
+            END { exit fail }' || \
+                die $LINENO "Missing packages detected"
+
+    # also ensure we catch a yum failure
+    if [[ ${PIPESTATUS[0]} != 0 ]]; then
+        die $LINENO "Yum install failure"
+    fi
 }
 
 # zypper wrapper to set arguments correctly
@@ -1233,7 +1248,7 @@
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements = "changed" ]]; then
+    if [[ $update_requirements != "changed" ]]; then
         (cd $REQUIREMENTS_DIR; \
             $SUDO_CMD python update.py $project_dir)
     fi
@@ -1249,7 +1264,7 @@
     # a variable that tells us whether or not we should UNDO the requirements
     # changes (this will be set to False in the OpenStack ci gate)
     if [ $UNDO_REQUIREMENTS = "True" ]; then
-        if [[ $update_requirements = "changed" ]]; then
+        if [[ $update_requirements != "changed" ]]; then
             (cd $project_dir && git reset --hard)
         fi
     fi
diff --git a/lib/ceilometer b/lib/ceilometer
index 04c1a34..6aaddce 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -106,7 +106,9 @@
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer {
-    mongo ceilometer --eval "db.dropDatabase();"
+    if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+        mongo ceilometer --eval "db.dropDatabase();"
+    fi
 }
 
 # configure_ceilometerclient() - Set config files, create data dirs, etc
@@ -129,6 +131,7 @@
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
+    iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -163,14 +166,27 @@
 }
 
 function configure_mongodb {
+    # server package is the same on all
+    local packages=mongodb-server
+
     if is_fedora; then
-        # install mongodb client
-        install_package mongodb
+        # mongodb client + python bindings
+        packages="${packages} mongodb pymongo"
+    else
+        packages="${packages} python-pymongo"
+    fi
+
+    install_package ${packages}
+
+    if is_fedora; then
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
         restart_service mongod
     fi
+
+    # give mongodb time to start-up
+    sleep 5
 }
 
 # init_ceilometer() - Initialize etc.
diff --git a/lib/heat b/lib/heat
index d0c0302..2d9d863 100644
--- a/lib/heat
+++ b/lib/heat
@@ -197,8 +197,62 @@
 }
 
 # create_heat_accounts() - Set up common required heat accounts
-# Note this is in addition to what is in files/keystone_data.sh
 function create_heat_accounts {
+    # migrated from files/keystone_data.sh
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    HEAT_USER=$(openstack user create \
+        heat \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email heat@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $HEAT_USER
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        HEAT_SERVICE=$(openstack service create \
+            heat \
+            --type=orchestration \
+            --description="Heat Orchestration Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+        HEAT_CFN_SERVICE=$(openstack service create \
+            heat \
+            --type=cloudformation \
+            --description="Heat CloudFormation Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_CFN_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+    fi
+
+    # heat_stack_user role is for users created by Heat
+    openstack role create heat_stack_user
+
+    # heat_stack_owner role is given to users who create Heat stacks,
+    # it's the default role used by heat to delegate to the heat service
+    # user (for performing deferred operations via trusts), see heat.conf
+    HEAT_OWNER_ROLE=$(openstack role create \
+        heat_stack_owner \
+        | grep " id " | get_field 2)
+
+    # Give the role to the demo and admin users so they can create stacks
+    # in either of the projects created by devstack
+    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+
     # Note we have to pass token/endpoint here because the current endpoint and
     # version negotiation in OSC means just --os-identity-api-version=3 won't work
     KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
diff --git a/lib/ldap b/lib/ldap
index 51d0251..efe2f09 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -154,7 +154,7 @@
 
 # clear_ldap_state() - Clear LDAP State
 function clear_ldap_state {
-    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
+    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || :
 }
 
 # Restore xtrace
diff --git a/lib/marconi b/lib/marconi
index 29ae386..3c4547f 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -34,7 +34,8 @@
 MARCONICLIENT_DIR=$DEST/python-marconiclient
 MARCONI_CONF_DIR=/etc/marconi
 MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_API_LOG_DIR=/var/log/marconi
+MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
 MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
 
 # Support potential entry-points console scripts
@@ -96,6 +97,7 @@
 
     iniset $MARCONI_CONF DEFAULT verbose True
     iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+    iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
@@ -104,8 +106,12 @@
     iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
 
-    if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
-        iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
+        iniset $MARCONI_CONF drivers storage sqlalchemy
+        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
+    else
+        iniset $MARCONI_CONF drivers storage mongodb
+        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
         configure_mongodb
         cleanup_marconi
     fi
@@ -148,7 +154,7 @@
 
 # start_marconi() - Start running processes, including screen
 function start_marconi {
-    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1"
     echo "Waiting for Marconi to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
         die $LINENO "Marconi did not start"
diff --git a/lib/neutron b/lib/neutron
index bb591ab..84e8277 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 # nova vif driver that all plugins should use
 NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
+
+    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -754,6 +761,16 @@
         iniset $NEUTRON_CONF DEFAULT ${I/=/ }
     done
 
+    # Configuration for neutron notifations to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
     # Configure plugin
     neutron_plugin_configure_service
 }
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
new file mode 100644
index 0000000..0aebff6
--- /dev/null
+++ b/lib/neutron_plugins/oneconvergence
@@ -0,0 +1,76 @@
+# Neutron One Convergence plugin
+# ---------------------------
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+Q_L3_ENABLED=true
+Q_L3_ROUTER_PER_TENANT=true
+Q_USE_NAMESPACE=true
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+# Configure common parameters
+function neutron_plugin_configure_common {
+
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
+    Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
+    Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
+    Q_DB_NAME='oc_nvsd_neutron'
+}
+
+# Configure plugin specific information
+function neutron_plugin_configure_service {
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 0
+}
+
+function setup_integration_bridge {
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    setup_integration_bridge
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
+
+    _neutron_ovs_base_configure_firewall_driver
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
+        setup_integration_bridge
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 424a900..b2c1b61 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -18,14 +18,8 @@
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
 
-# configure_ryu can be called multiple times as neutron_pluing/ryu may call
-# this function for neutron-ryu-agent
-_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu {
-    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
-        setup_develop $RYU_DIR
-        _RYU_CONFIGURED=True
-    fi
+    :
 }
 
 function init_ryu {
@@ -63,6 +57,7 @@
 function install_ryu {
     if [[ "$_RYU_INSTALLED" == "False" ]]; then
         git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+        export PYTHONPATH=$RYU_DIR:$PYTHONPATH
         _RYU_INSTALLED=True
     fi
 }
diff --git a/lib/nova b/lib/nova
index 2f6d04d..a1f49dc 100644
--- a/lib/nova
+++ b/lib/nova
@@ -665,17 +665,6 @@
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # Enable client side traces for libvirt
-        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
-        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-        # Enable server side traces for libvirtd
-        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
@@ -731,7 +720,7 @@
     # Kill the nova screen windows
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
+    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
         screen_stop $serv
     done
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index bbf6554..5a51f33 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -25,6 +25,8 @@
 
 # File injection is disabled by default in Nova.  This will turn it back on.
 ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+# if we should turn on massive libvirt debugging
+DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT)
 
 
 # Entry Points
@@ -103,6 +105,18 @@
     fi
     add_user_to_group $STACK_USER $LIBVIRT_GROUP
 
+    # Enable server side traces for libvirtd
+    if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then
+        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+    fi
+
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
diff --git a/lib/opendaylight b/lib/opendaylight
new file mode 100644
index 0000000..ca81c20
--- /dev/null
+++ b/lib/opendaylight
@@ -0,0 +1,167 @@
+# lib/opendaylight
+# Functions to control the configuration and operation of the opendaylight service
+
+# Dependencies:
+#
+# - ``functions`` file
+# # ``DEST`` must be defined
+# # ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_opendaylight_enabled
+# - is_opendaylight-compute_enabled
+# - install_opendaylight
+# - install_opendaylight-compute
+# - configure_opendaylight
+# - init_opendaylight
+# - start_opendaylight
+# - stop_opendaylight-compute
+# - stop_opendaylight
+# - cleanup_opendaylight
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# For OVS_BRIDGE and PUBLIC_BRIDGE
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Defaults
+# --------
+
+# The IP address of ODL. Set this in local.conf.
+# ODL_MGR_IP=
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# <define global variables here that belong to this project>
+ODL_DIR=$DEST/opendaylight
+
+# The OpenDaylight Package, currently using 'Hydrogen' release
+ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip}
+
+# The OpenDaylight URL
+ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1}
+
+# Default arguments for OpenDaylight. This is typically used to set
+# Java memory options.
+#   ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m
+ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"}
+
+# How long to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60}
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# Test if OpenDaylight is enabled
+# is_opendaylight_enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+# cleanup_opendaylight() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    :
+}
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    # Remove simple forwarder
+    rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding*
+
+    # Configure OpenFlow 1.3
+    echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    local _pwd=$(pwd)
+
+    if is_ubuntu; then
+        install_package maven openjdk-7-jre openjdk-7-jdk
+    else
+        yum_install maven java-1.7.0-openjdk
+    fi
+
+    # Download OpenDaylight
+    mkdir -p $ODL_DIR
+    cd $ODL_DIR
+    wget -N $ODL_URL/$ODL_PKG
+    unzip -u $ODL_PKG
+}
+
+# install_opendaylight-compute - Make sure OVS is install
+function install_opendaylight-compute {
+    local kernel_version
+    # Install deps
+    # FIXME add to ``files/apts/neutron``, but don't install if not needed!
+    if is_ubuntu; then
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        install_package openvswitch
+        restart_service openvswitch-switch
+        restart_service openvswitch-controller
+    fi
+}
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    if is_ubuntu; then
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64
+    else
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk
+    fi
+
+    # The flags to ODL have the following meaning:
+    #   -of13: runs ODL using OpenFlow 1.3 protocol support.
+    #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+
+    # Sleep a bit to let OpenDaylight finish starting up
+    sleep $ODL_BOOT_WAIT
+}
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    screen_stop odl-server
+}
+
+# stop_opendaylight-compute() - Remove OVS bridges
+function stop_opendaylight-compute {
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Neutron
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/swift b/lib/swift
index 5d4d4ef..b655440 100644
--- a/lib/swift
+++ b/lib/swift
@@ -67,8 +67,8 @@
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
-# Default is ``staticweb, tempurl, formpost``
-SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb}
+# Default is ``staticweb, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
 # the end of the pipeline.
@@ -687,6 +687,11 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
+    # Maintain the iteration as screen_stop() has some desirable side-effects
+    for type in proxy object container account; do
+        screen_stop s-${type}
+    done
+    # Blast out any stragglers
     pkill -f swift-
 }
 
diff --git a/lib/tempest b/lib/tempest
index 16f8744..c74f00d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -149,8 +149,12 @@
 
     password=${ADMIN_PASSWORD:-secrete}
 
-    # See files/keystone_data.sh where alt_demo user
-    # and tenant are set up...
+    # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
+    # user and tenant are set up...
+    ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
+    ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+    TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo}
+    TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo}
     ALT_USERNAME=${ALT_USERNAME:-alt_demo}
     ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 
@@ -254,11 +258,16 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
+    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
     iniset $TEMPEST_CONFIG identity password "$password"
+    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
     iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG identity admin_password "$password"
+    iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
 
     # Image
     # for the gate we want to be able to override this variable so we aren't
@@ -285,7 +294,9 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
-    iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+    iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
+    iniset $TEMPEST_CONFIG "compute-admin" password "$password"
+    iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -293,7 +304,7 @@
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
-    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/stack.sh b/stack.sh
index ab1e8fe..e76a55c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -142,7 +142,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -934,8 +934,7 @@
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \
-    HEAT_API_PORT=$HEAT_API_PORT \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
@@ -1419,3 +1418,9 @@
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
 echo_summary "stack.sh completed in $SECONDS seconds."
+
+# Restore/close logging file descriptors
+exec 1>&3
+exec 2>&3
+exec 3>&-
+exec 6>&-
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 440774e..2b5e418 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -73,7 +73,7 @@
 # Install basics
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr
+apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
 pip install xenapi
 
 # Install XenServer guest utilities