Merge "Add heat_stack_owner role for heat trusts usage"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8dc44ef..8b7b961 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -178,6 +178,10 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
     die $LINENO "Failure deleting security group rule from $SECGROUP"
 
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+    die $LINENO "Security group rule not deleted from $SECGROUP"
+fi
+
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
new file mode 100644
index 0000000..cc5c8de
--- /dev/null
+++ b/extras.d/80-opendaylight.sh
@@ -0,0 +1,67 @@
+# opendaylight.sh - DevStack extras script
+
+# Need this first to get the is_***_enabled for ODL
+source $TOP_DIR/lib/opendaylight
+
+if is_service_enabled odl-server; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight-compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        create_nova_conf_neutron
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing OpenDaylight"
+        ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+        ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+        read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+        sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
+        sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        sudo ovs-vsctl del-manager
+        BRIDGES=$(sudo ovs-vsctl list-br)
+        for bridge in $BRIDGES ; do
+            sudo ovs-vsctl del-controller $bridge
+        done
+
+        stop_opendaylight-compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/files/apts/general b/files/apts/general
index 32d31f0..995c0c6 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,8 +9,6 @@
 lsof # useful when debugging
 openssh-server
 openssl
-vim-nox
-locate # useful when debugging
 python-virtualenv
 python-unittest2
 iputils-ping
diff --git a/files/apts/opendaylight b/files/apts/opendaylight
new file mode 100644
index 0000000..ec3cc9d
--- /dev/null
+++ b/files/apts/opendaylight
@@ -0,0 +1,2 @@
+openvswitch-datapath-dkms # NOPRIME
+openvswitch-switch # NOPRIME
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
new file mode 100644
index 0000000..61f73ee
--- /dev/null
+++ b/files/rpms-suse/baremetal
@@ -0,0 +1 @@
+dnsmasq
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 704947e..ff27a3a 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,23 +1,22 @@
+bc
 bridge-utils
 ca-certificates-mozilla
 curl
 euca2ools
 git-core
 iputils
+libopenssl-devel # to rebuild pyOpenSSL if needed
+lsof # useful when debugging
+make
 openssh
 openssl
 psmisc
-python-setuptools # instead of python-distribute; dist:sle11sp2
 python-cmd2 # dist:opensuse-12.3
 python-pylint
+python-setuptools # instead of python-distribute; dist:sle11sp2
 python-unittest2
 screen
 tar
 tcpdump
 unzip
-vim-enhanced
 wget
-bc
-
-findutils-locate # useful when debugging
-lsof # useful when debugging
diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight
new file mode 100644
index 0000000..d6c7146
--- /dev/null
+++ b/files/rpms-suse/opendaylight
@@ -0,0 +1,4 @@
+openvswitch # NOPRIME
+openvswitch-controller # NOPRIME
+openvswitch-switch # NOPRIME
+
diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight
new file mode 100644
index 0000000..98aaaf4
--- /dev/null
+++ b/files/rpms/opendaylight
@@ -0,0 +1 @@
+openvswitch # NOPRIME
diff --git a/functions b/functions
index a844b1c..1d30922 100644
--- a/functions
+++ b/functions
@@ -55,7 +55,7 @@
     mkdir -p $FILES/images
     IMAGE_FNAME=`basename "$image_url"`
     if [[ $image_url != file* ]]; then
-        # Downloads the image (uec ami+aki style), then extracts it.
+        # Downloads the image (uec ami+akistyle), then extracts it.
         if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
             wget -c $image_url -O $FILES/$IMAGE_FNAME
             if [[ $? -ne 0 ]]; then
@@ -103,12 +103,12 @@
         vmdk_net_adapter=""
 
         # vmdk adapter type
-        vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)"
+        vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })"
         vmdk_adapter_type="${vmdk_adapter_type#*\"}"
         vmdk_adapter_type="${vmdk_adapter_type%?}"
 
         # vmdk disk type
-        vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
+        vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })"
         vmdk_create_type="${vmdk_create_type#*\"}"
         vmdk_create_type="${vmdk_create_type%\"*}"
 
@@ -119,7 +119,7 @@
         elif [[ "$vmdk_create_type" = "monolithicFlat" || \
         "$vmdk_create_type" = "vmfs" ]]; then
             # Attempt to retrieve the *-flat.vmdk
-            flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+            flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })"
             flat_fname="${flat_fname#*\"}"
             flat_fname="${flat_fname%?}"
             if [[ -z "$flat_name" ]]; then
@@ -163,38 +163,37 @@
             if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
                             `" Filename provided: ${IMAGE_NAME}"
-            else
-                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-                flat_path="${image_url:0:$path_len}"
-                descriptor_url=$flat_path$descriptor_fname
-                warn $LINENO "$descriptor_data_pair_msg"`
-                                `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
-                if [[ $flat_path != file* ]]; then
-                    if [[ ! -f $FILES/$descriptor_fname || \
-                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
-                        wget -c $descriptor_url -O $FILES/$descriptor_fname
-                        if [[ $? -ne 0 ]]; then
-                            warn $LINENO "Descriptor not found $descriptor_url"
-                            descriptor_found=false
-                        fi
-                    fi
-                    descriptor_url="$FILES/$descriptor_fname"
-                else
-                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
-                    if [[ ! -f $descriptor_url || \
-                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+            fi
+
+            descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+            flat_path="${image_url:0:$path_len}"
+            descriptor_url=$flat_path$descriptor_fname
+            warn $LINENO "$descriptor_data_pair_msg"`
+                            `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+            if [[ $flat_path != file* ]]; then
+                if [[ ! -f $FILES/$descriptor_fname || \
+                "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                    wget -c $descriptor_url -O $FILES/$descriptor_fname
+                    if [[ $? -ne 0 ]]; then
                         warn $LINENO "Descriptor not found $descriptor_url"
                         descriptor_found=false
                     fi
                 fi
-                if $descriptor_found; then
-                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
-                    `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
-                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
-                    vmdk_adapter_type="${vmdk_adapter_type%?}"
+                descriptor_url="$FILES/$descriptor_fname"
+            else
+                descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                if [[ ! -f $descriptor_url || \
+                "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                    warn $LINENO "Descriptor not found $descriptor_url"
+                    descriptor_found=false
                 fi
             fi
+            if $descriptor_found; then
+                vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
+                vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                vmdk_adapter_type="${vmdk_adapter_type%?}"
+            fi
             vmdk_disktype="preallocated"
         else
             vmdk_disktype="preallocated"
@@ -203,7 +202,7 @@
         # NOTE: For backwards compatibility reasons, colons may be used in place
         # of semi-colons for property delimiters but they are not permitted
         # characters in NTFS filesystems.
-        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'`
+        property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }`
         IFS=':;' read -a props <<< "$property_string"
         vmdk_disktype="${props[0]:-$vmdk_disktype}"
         vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
diff --git a/lib/baremetal b/lib/baremetal
index 473de0d..1d02e1e 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -77,14 +77,6 @@
 # These should be customized to your environment and hardware
 # -----------------------------------------------------------
 
-# whether to create a fake environment, eg. for devstack-gate
-BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV`
-
-# Extra options to pass to bm_poseur
-# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1
-# change the virtualization type: --engine qemu
-BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
-
 # To provide PXE, configure nova-network's dnsmasq rather than run the one
 # dedicated to baremetal. When enable this, make sure these conditions are
 # fulfilled:
@@ -97,15 +89,10 @@
 BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
 
 # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
-if [ "$BM_USE_FAKE_ENV" ]; then
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
-else
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
-    # if testing on a physical network,
-    # BM_DNSMASQ_RANGE must be changed to suit your network
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
-fi
+BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
+# if testing on a physical network,
+# BM_DNSMASQ_RANGE must be changed to suit your network
+BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
 
 # BM_DNSMASQ_DNS provide dns server to bootstrap clients
 BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-}
@@ -143,7 +130,6 @@
 # Below this, we set some path and filenames.
 # Defaults are probably sufficient.
 BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
-BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur}
 
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
@@ -177,7 +163,6 @@
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
     git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
-    git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
 
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
@@ -196,27 +181,6 @@
     fi
 }
 
-# set up virtualized environment for devstack-gate testing
-function create_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    # TODO(deva): add support for >1 VM
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm
-    BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
-
-    # NOTE: there is currently a limitation in baremetal driver
-    # that requires second MAC even if it is not used.
-    # Passing a fake value allows this to work.
-    # TODO(deva): remove this after driver issue is fixed.
-    BM_SECOND_MAC='12:34:56:78:90:12'
-}
-
-function cleanup_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
-}
-
 # prepare various directories needed by baremetal hypervisor
 function configure_baremetal_nova_dirs {
     # ensure /tftpboot is prepared
diff --git a/lib/ceilometer b/lib/ceilometer
index 2e6e7c5..b0899e2 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -129,6 +129,7 @@
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
+    iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -209,7 +210,7 @@
     screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 
     # only die on API if it was actually intended to be turned on
-    if service_enabled ceilometer-api; then
+    if is_service_enabled ceilometer-api; then
         echo "Waiting for ceilometer-api to start..."
         if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
             die $LINENO "ceilometer-api did not start"
diff --git a/lib/cinder b/lib/cinder
index d003f5d..dd2956a 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -491,10 +491,7 @@
         sudo rm -f /etc/tgt/conf.d/stack.conf
         _configure_tgt_for_config_d
         if is_ubuntu; then
-            # tgt in oneiric doesn't restart properly if tgtd isn't running
-            # do it in two steps
-            sudo stop tgt || true
-            sudo start tgt
+            sudo service tgt restart
         elif is_fedora; then
             if [[ $DISTRO =~ (rhel6) ]]; then
                 sudo /sbin/service tgtd restart
diff --git a/lib/ironic b/lib/ironic
index 4e5edc9..b346de1 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -124,7 +124,7 @@
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
-    iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
diff --git a/lib/ldap b/lib/ldap
index 51d0251..efe2f09 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -154,7 +154,7 @@
 
 # clear_ldap_state() - Clear LDAP State
 function clear_ldap_state {
-    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
+    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || :
 }
 
 # Restore xtrace
diff --git a/lib/marconi b/lib/marconi
index 29ae386..1e0cc7d 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -104,8 +104,12 @@
     iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
 
-    if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
-        iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
+        iniset $MARCONI_CONF drivers storage sqlalchemy
+        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
+    else
+        iniset $MARCONI_CONF drivers storage mongodb
+        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
         configure_mongodb
         cleanup_marconi
     fi
diff --git a/lib/neutron b/lib/neutron
index 7ca66a5..bb591ab 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -586,11 +586,9 @@
     # If additional config files exist, copy them over to neutron configuration
     # directory
     if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then
-        mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
         local f
         for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
             Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
-            cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
         done
     fi
 
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 7728eb1..a1b089e 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -23,7 +23,7 @@
 # Specify ncclient package information
 NCCLIENT_DIR=$DEST/ncclient
 NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
-NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git}
+NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git}
 NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
 
 # This routine put a prefix on an existing function name
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
new file mode 100644
index 0000000..22c8578
--- /dev/null
+++ b/lib/neutron_plugins/ibm
@@ -0,0 +1,133 @@
+# Neutron IBM SDN-VE plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+
+function _neutron_interface_setup {
+    # Setup one interface on the integration bridge if needed
+    # The plugin agent to be used if more than one interface is used
+    local bridge=$1
+    local interface=$2
+    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
+}
+
+function neutron_setup_integration_bridge {
+    # Setup integration bridge if needed
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        neutron_ovs_base_cleanup
+        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
+        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
+            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
+        fi
+    fi
+
+    # Set controller to SDNVE controller (1st of list) if exists
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        # Get the first controller
+        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
+        SDNVE_IP=${controllers[0]}
+        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
+    fi
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    # if n-cpu is enabled, then setup integration bridge
+    if is_service_enabled n-cpu; then
+        neutron_setup_integration_bridge
+    fi
+}
+
+function is_neutron_ovs_base_plugin {
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        # Yes, we use OVS.
+        return 0
+    else
+        # No, we do not use OVS.
+        return 1
+    fi
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
+    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
+    Q_DB_NAME="sdnve_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
+}
+
+function neutron_plugin_configure_service {
+    # Define extra "SDNVE" configuration options when q-svc is configured
+
+    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
+    fi
+
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
+    fi
+
+    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
+    fi
+
+    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
+    fi
+
+    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
+    fi
+
+    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
+    fi
+
+
+    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
+
+}
+
+function neutron_plugin_configure_plugin_agent {
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_setup_interface_driver {
+    return 0
+}
+
+function has_neutron_plugin_security_group {
+    # Does not support Security Groups
+    return 1
+}
+
+function neutron_ovs_base_cleanup {
+    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
+        # remove all OVS ports that look like Neutron created ports
+        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+            sudo ovs-vsctl del-port ${port}
+        done
+
+        # remove integration bridge created by Neutron
+        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
+            sudo ovs-vsctl del-br ${bridge}
+        done
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
new file mode 100644
index 0000000..724df41
--- /dev/null
+++ b/lib/neutron_plugins/ofagent_agent
@@ -0,0 +1,94 @@
+# OpenFlow Agent plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+source $TOP_DIR/lib/neutron_thirdparty/ryu  # for RYU_DIR, install_ryu, etc
+
+function neutron_plugin_create_nova_conf {
+    _neutron_ovs_base_configure_nova_vif_driver
+}
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+
+    # This agent uses ryu to talk with switches
+    install_package $(get_packages "ryu")
+    install_ryu
+    configure_ryu
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    _neutron_ovs_base_configure_firewall_driver
+
+    # Check a supported openflow version
+    OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2`
+    if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then
+        die $LINENO "This agent requires OpenFlow 1.3+ capable switch."
+    fi
+
+    # Enable tunnel networks if selected
+    if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+        # Verify tunnels are supported
+        # REVISIT - also check kernel module support for GRE and patch ports
+        OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"`
+        if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
+        fi
+        iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True
+        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+    fi
+
+    # Setup physical network bridge mappings.  Override
+    # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+    # complex physical network configurations.
+    if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+        # Configure bridge manually with physical interface as port for multi-node
+        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+    fi
+    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
+    fi
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent"
+
+    # Define extra "AGENT" configuration options when q-agt is configured by defining
+    # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ }
+    done
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT ovs_use_veth True
+}
+
+function neutron_plugin_check_adv_test_requirements {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index cdbc4d1..fd3c4fe 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -57,6 +57,18 @@
     iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
 }
 
+# is_docker_running - Return 0 (true) if Docker is running, otherwise 1
+function is_docker_running {
+    local docker_pid
+    if [ -f "$DOCKER_PID_FILE" ]; then
+        docker_pid=$(cat "$DOCKER_PID_FILE")
+    fi
+    if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then
+        return 1
+    fi
+    return 0
+}
+
 # install_nova_hypervisor() - Install external components
 function install_nova_hypervisor {
     # So far this is Ubuntu only
@@ -69,19 +81,15 @@
         die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
     fi
 
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
+    if ! (is_docker_running); then
         die $LINENO "Docker not running"
     fi
 }
 
 # start_nova_hypervisor - Start any required external services
 function start_nova_hypervisor {
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running, start the daemon"
+    if ! (is_docker_running); then
+        die $LINENO "Docker not running"
     fi
 
     # Start the Docker registry container
diff --git a/lib/opendaylight b/lib/opendaylight
new file mode 100644
index 0000000..ca81c20
--- /dev/null
+++ b/lib/opendaylight
@@ -0,0 +1,167 @@
+# lib/opendaylight
+# Functions to control the configuration and operation of the opendaylight service
+
+# Dependencies:
+#
+# - ``functions`` file
+# # ``DEST`` must be defined
+# # ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_opendaylight_enabled
+# - is_opendaylight-compute_enabled
+# - install_opendaylight
+# - install_opendaylight-compute
+# - configure_opendaylight
+# - init_opendaylight
+# - start_opendaylight
+# - stop_opendaylight-compute
+# - stop_opendaylight
+# - cleanup_opendaylight
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# For OVS_BRIDGE and PUBLIC_BRIDGE
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Defaults
+# --------
+
+# The IP address of ODL. Set this in local.conf.
+# ODL_MGR_IP=
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# <define global variables here that belong to this project>
+ODL_DIR=$DEST/opendaylight
+
+# The OpenDaylight Package, currently using 'Hydrogen' release
+ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip}
+
+# The OpenDaylight URL
+ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1}
+
+# Default arguments for OpenDaylight. This is typically used to set
+# Java memory options.
+#   ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m
+ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"}
+
+# How long to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60}
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# Test if OpenDaylight is enabled
+# is_opendaylight_enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+# cleanup_opendaylight() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    :
+}
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    # Remove simple forwarder
+    rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding*
+
+    # Configure OpenFlow 1.3
+    echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    local _pwd=$(pwd)
+
+    if is_ubuntu; then
+        install_package maven openjdk-7-jre openjdk-7-jdk
+    else
+        yum_install maven java-1.7.0-openjdk
+    fi
+
+    # Download OpenDaylight
+    mkdir -p $ODL_DIR
+    cd $ODL_DIR
+    wget -N $ODL_URL/$ODL_PKG
+    unzip -u $ODL_PKG
+}
+
+# install_opendaylight-compute - Make sure OVS is install
+function install_opendaylight-compute {
+    local kernel_version
+    # Install deps
+    # FIXME add to ``files/apts/neutron``, but don't install if not needed!
+    if is_ubuntu; then
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        install_package openvswitch
+        restart_service openvswitch-switch
+        restart_service openvswitch-controller
+    fi
+}
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    if is_ubuntu; then
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64
+    else
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk
+    fi
+
+    # The flags to ODL have the following meaning:
+    #   -of13: runs ODL using OpenFlow 1.3 protocol support.
+    #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+
+    # Sleep a bit to let OpenDaylight finish starting up
+    sleep $ODL_BOOT_WAIT
+}
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    screen_stop odl-server
+}
+
+# stop_opendaylight-compute() - Remove OVS bridges
+function stop_opendaylight-compute {
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Neutron
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/swift b/lib/swift
index 5d4d4ef..b8bc1b6 100644
--- a/lib/swift
+++ b/lib/swift
@@ -687,6 +687,11 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
+    # Maintain the iteration as screen_stop() has some desirable side-effects
+    for type in proxy object container account; do
+        screen_stop s-${type}
+    done
+    # Blast out any stragglers
     pkill -f swift-
 }
 
diff --git a/lib/tempest b/lib/tempest
index 16f8744..c74f00d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -149,8 +149,12 @@
 
     password=${ADMIN_PASSWORD:-secrete}
 
-    # See files/keystone_data.sh where alt_demo user
-    # and tenant are set up...
+    # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
+    # user and tenant are set up...
+    ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
+    ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+    TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo}
+    TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo}
     ALT_USERNAME=${ALT_USERNAME:-alt_demo}
     ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 
@@ -254,11 +258,16 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
+    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
     iniset $TEMPEST_CONFIG identity password "$password"
+    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
     iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG identity admin_password "$password"
+    iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
 
     # Image
     # for the gate we want to be able to override this variable so we aren't
@@ -285,7 +294,9 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
-    iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+    iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
+    iniset $TEMPEST_CONFIG "compute-admin" password "$password"
+    iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -293,7 +304,7 @@
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
-    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/stack.sh b/stack.sh
index ec8de2d..c990a1c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1052,9 +1052,6 @@
     echo_summary "Preparing for nova baremetal"
     prepare_baremetal_toolchain
     configure_baremetal_nova_dirs
-    if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-        create_fake_baremetal_env
-    fi
 fi
 
 
diff --git a/stackrc b/stackrc
index f235ccc..6bb6f37 100644
--- a/stackrc
+++ b/stackrc
@@ -229,12 +229,6 @@
 BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
 BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
 
-# bm_poseur
-# Used to simulate a hardware environment for baremetal
-# Only used if BM_USE_FAKE_ENV is set
-BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
-BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
-
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
 NOVNC_BRANCH=${NOVNC_BRANCH:-master}
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index ea943e1..ee3790f 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -1,7 +1,20 @@
 #!/usr/bin/python
-import urllib
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import json
 import sys
+import urllib
 
 
 def print_usage():
@@ -42,4 +55,4 @@
                                        'logUrl': log_url,
                                        'healthReport': config['healthReport']})
 
-print json.dumps(results)
+print(json.dumps(results))
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
index 5b845d8..1d994a6 100644
--- a/tools/uec/meta.py
+++ b/tools/uec/meta.py
@@ -1,10 +1,23 @@
-import sys
-from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
-from SimpleHTTPServer import SimpleHTTPRequestHandler
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
-def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
-         ServerClass = HTTPServer, protocol="HTTP/1.0"):
-    """simple http server that listens on a give address:port"""
+import BaseHTTPServer
+import SimpleHTTPServer
+import sys
+
+
+def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
+         ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
+    """simple http server that listens on a give address:port."""
 
     server_address = (host, port)
 
@@ -12,7 +25,7 @@
     httpd = ServerClass(server_address, HandlerClass)
 
     sa = httpd.socket.getsockname()
-    print "Serving HTTP on", sa[0], "port", sa[1], "..."
+    print("Serving HTTP on", sa[0], "port", sa[1], "...")
     httpd.serve_forever()
 
 if __name__ == '__main__':
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 440774e..2b5e418 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -73,7 +73,7 @@
 # Install basics
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr
+apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
 pip install xenapi
 
 # Install XenServer guest utilities
diff --git a/unstack.sh b/unstack.sh
index 6351fe0..a5e7b87 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -127,11 +127,6 @@
     killall stud
 fi
 
-# baremetal might have created a fake environment
-if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-    cleanup_fake_baremetal_env
-fi
-
 SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
 
 # Get the iSCSI volumes