Merge "Clarify deprecation of EXTRA_xxx_OPTS"
diff --git a/README.md b/README.md
index 9304240..89e3855 100644
--- a/README.md
+++ b/README.md
@@ -73,7 +73,7 @@
 This is a recent change (Oct 2013) from the previous behaviour of
 automatically creating a ``stack`` user.  Automatically creating
 user accounts is not the right response to running as root, so
-that bit is now an explicit step using ``tools/create-stack-user.sh``. 
+that bit is now an explicit step using ``tools/create-stack-user.sh``.
 Run that (as root!) or just check it out to see what DevStack's
 expectations are for the account it runs under.  Many people simply
 use their usual login (the default 'ubuntu' login on a UEC image
@@ -271,10 +271,6 @@
 
 If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`.
 
-# DevStack on Docker
-
-If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`.
-
 # Additional Projects
 
 DevStack has a hook mechanism to call out to a dispatch script at specific
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index f679669..dff8e7a 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,9 +44,6 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index ad852a4..3768b56 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -40,9 +40,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8dc44ef..1416d4d 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -40,9 +40,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -178,6 +175,10 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
     die $LINENO "Failure deleting security group rule from $SECGROUP"
 
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+    die $LINENO "Security group rule not deleted from $SECGROUP"
+fi
+
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
diff --git a/exercises/savanna.sh b/exercises/sahara.sh
similarity index 88%
rename from exercises/savanna.sh
rename to exercises/sahara.sh
index fc3f976..867920e 100755
--- a/exercises/savanna.sh
+++ b/exercises/sahara.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
-# **savanna.sh**
+# **sahara.sh**
 
-# Sanity check that Savanna started if enabled
+# Sanity check that Sahara started if enabled
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -33,9 +33,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-is_service_enabled savanna || exit 55
+is_service_enabled sahara || exit 55
 
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!"
+curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index d71a1e0..5f8b0a4 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -37,9 +37,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 
 # Testing Security Groups
 # =======================
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 83d25c7..0d556df 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -41,9 +41,6 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh
index 9e61dc5..3b8e3d5 100644
--- a/extras.d/50-ironic.sh
+++ b/extras.d/50-ironic.sh
@@ -24,10 +24,17 @@
         # Start the ironic API and ironic taskmgr components
         echo_summary "Starting Ironic"
         start_ironic
+
+        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
+            prepare_baremetal_basic_ops
+        fi
     fi
 
     if [[ "$1" == "unstack" ]]; then
         stop_ironic
+        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
+            cleanup_baremetal_basic_ops
+        fi
     fi
 
     if [[ "$1" == "clean" ]]; then
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
new file mode 100644
index 0000000..80e07ff
--- /dev/null
+++ b/extras.d/70-sahara.sh
@@ -0,0 +1,37 @@
+# sahara.sh - DevStack extras script to install Sahara
+
+if is_service_enabled sahara; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/sahara
+        source $TOP_DIR/lib/sahara-dashboard
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing sahara"
+        install_sahara
+        cleanup_sahara
+        if is_service_enabled horizon; then
+            install_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring sahara"
+        configure_sahara
+        create_sahara_accounts
+        if is_service_enabled horizon; then
+            configure_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing sahara"
+        start_sahara
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_sahara
+        if is_service_enabled horizon; then
+            cleanup_sahara_dashboard
+        fi
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_sahara
+    fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
deleted file mode 100644
index edc1376..0000000
--- a/extras.d/70-savanna.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-# savanna.sh - DevStack extras script to install Savanna
-
-if is_service_enabled savanna; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/savanna
-        source $TOP_DIR/lib/savanna-dashboard
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Savanna"
-        install_savanna
-        cleanup_savanna
-        if is_service_enabled horizon; then
-            install_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Savanna"
-        configure_savanna
-        create_savanna_accounts
-        if is_service_enabled horizon; then
-            configure_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing Savanna"
-        start_savanna
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_savanna
-        if is_service_enabled horizon; then
-            cleanup_savanna_dashboard
-        fi
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_savanna
-    fi
-fi
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
new file mode 100644
index 0000000..57b4328
--- /dev/null
+++ b/extras.d/80-opendaylight.sh
@@ -0,0 +1,69 @@
+# opendaylight.sh - DevStack extras script
+
+if is_service_enabled odl-server odl-compute; then
+    # Initial source
+    [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
+fi
+
+if is_service_enabled odl-server; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight-compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        create_nova_conf_neutron
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing OpenDaylight"
+        ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+        ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+        read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+        sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
+        sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        sudo ovs-vsctl del-manager
+        BRIDGES=$(sudo ovs-vsctl list-br)
+        for bridge in $BRIDGES ; do
+            sudo ovs-vsctl del-controller $bridge
+        done
+
+        stop_opendaylight-compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index 71007ba..f1b692a 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,5 +1,5 @@
-python-pymongo
-mongodb-server
+python-pymongo #NOPRIME
+mongodb-server #NOPRIME
 libnspr4-dev
 pkg-config
 libxml2-dev
diff --git a/files/apts/general b/files/apts/general
index 32d31f0..995c0c6 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,8 +9,6 @@
 lsof # useful when debugging
 openssh-server
 openssl
-vim-nox
-locate # useful when debugging
 python-virtualenv
 python-unittest2
 iputils-ping
diff --git a/files/apts/ironic b/files/apts/ironic
new file mode 100644
index 0000000..a749ad7
--- /dev/null
+++ b/files/apts/ironic
@@ -0,0 +1,10 @@
+libguestfs0
+libvirt-bin
+openssh-client
+openvswitch-switch
+openvswitch-datapath-dkms
+python-libguestfs
+python-libvirt
+syslinux
+tftpd-hpa
+xinetd
diff --git a/files/apts/opendaylight b/files/apts/opendaylight
new file mode 100644
index 0000000..ec3cc9d
--- /dev/null
+++ b/files/apts/opendaylight
@@ -0,0 +1,2 @@
+openvswitch-datapath-dkms # NOPRIME
+openvswitch-switch # NOPRIME
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 9a34c76..fc1e813 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -53,41 +53,6 @@
         --role ResellerAdmin
 fi
 
-# Heat
-if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
-    keystone user-create --name=heat \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=heat@example.com
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user heat \
-        --role service
-    # heat_stack_user role is for users created by Heat
-    keystone role-create --name heat_stack_user
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=heat-cfn \
-            --type=cloudformation \
-            --description="Heat CloudFormation Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat-cfn \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
-        keystone service-create \
-            --name=heat \
-            --type=orchestration \
-            --description="Heat Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
-    fi
-fi
-
 # Glance
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
     keystone user-create \
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
new file mode 100644
index 0000000..61f73ee
--- /dev/null
+++ b/files/rpms-suse/baremetal
@@ -0,0 +1 @@
+dnsmasq
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 704947e..ff27a3a 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,23 +1,22 @@
+bc
 bridge-utils
 ca-certificates-mozilla
 curl
 euca2ools
 git-core
 iputils
+libopenssl-devel # to rebuild pyOpenSSL if needed
+lsof # useful when debugging
+make
 openssh
 openssl
 psmisc
-python-setuptools # instead of python-distribute; dist:sle11sp2
 python-cmd2 # dist:opensuse-12.3
 python-pylint
+python-setuptools # instead of python-distribute; dist:sle11sp2
 python-unittest2
 screen
 tar
 tcpdump
 unzip
-vim-enhanced
 wget
-bc
-
-findutils-locate # useful when debugging
-lsof # useful when debugging
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index dd68ac0..d9844e9 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -8,5 +8,6 @@
 python-eventlet
 python-greenlet
 python-iso8601
+python-pyOpenSSL
 python-wsgiref
 python-xattr
diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight
new file mode 100644
index 0000000..d6c7146
--- /dev/null
+++ b/files/rpms-suse/opendaylight
@@ -0,0 +1,4 @@
+openvswitch # NOPRIME
+openvswitch-controller # NOPRIME
+openvswitch-switch # NOPRIME
+
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
 python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c91bac3..9cf580d 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,4 +1,4 @@
 selinux-policy-targeted
-mongodb-server
-pymongo
+mongodb-server #NOPRIME
+pymongo # NOPRIME
 mongodb # NOPRIME
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 199ae10..423d57c 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -4,4 +4,4 @@
 python-devel
 postgresql-devel
 iscsi-initiator-utils
-python-lxml         #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
diff --git a/files/rpms/glance b/files/rpms/glance
index 25c5d39..2007e2e 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -9,10 +9,10 @@
 python-devel
 python-eventlet
 python-greenlet
-python-lxml         #dist:f18,f19,f20,rhel7
-python-paste-deploy #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
+python-paste-deploy #dist:f19,f20,rhel7
 python-routes
 python-sqlalchemy
-python-wsgiref
+python-wsgiref      #dist:f18,f19,f20
 pyxattr
 zlib-devel          # testonly
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 59503cc..2dd24e0 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,8 +16,8 @@
 python-migrate
 python-mox
 python-nose
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
 python-routes
 python-sphinx
 python-sqlalchemy
diff --git a/files/rpms/ironic b/files/rpms/ironic
new file mode 100644
index 0000000..54b9829
--- /dev/null
+++ b/files/rpms/ironic
@@ -0,0 +1,9 @@
+libguestfs
+libvirt
+libvirt-python
+openssh-clients
+openvswitch
+python-libguestfs
+syslinux
+tftp-server
+xinetd
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 99e8524..7182091 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,9 +1,9 @@
 python-greenlet
 libxslt-devel       # dist:f20
-python-lxml         #dist:f18,f19,f20
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
-python-paste-script #dist:f18,f19,f20
+python-lxml         #dist:f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
+python-paste-script #dist:f19,f20
 python-routes
 python-sqlalchemy
 python-webob
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 42d7f68..06ea0ea 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
 python-iso8601
 python-kombu
 #rhel6 gets via pip
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/nova b/files/rpms/nova
index a607d92..45d6e0b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,11 +28,11 @@
 python-lockfile
 python-migrate
 python-mox
-python-paramiko # dist:f18,f19,f20,rhel7
+python-paramiko # dist:f19,f20,rhel7
 # ^ on RHEL6, brings in python-crypto which conflicts with version from
 # pip we need
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight
new file mode 100644
index 0000000..98aaaf4
--- /dev/null
+++ b/files/rpms/opendaylight
@@ -0,0 +1 @@
+openvswitch # NOPRIME
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/rpms/swift b/files/rpms/swift
index 72253f7..bf29ea2 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -9,7 +9,7 @@
 python-greenlet
 python-netifaces
 python-nose
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-simplejson
 python-webob
 pyxattr
diff --git a/functions b/functions
index a844b1c..e0d2b01 100644
--- a/functions
+++ b/functions
@@ -55,7 +55,7 @@
     mkdir -p $FILES/images
     IMAGE_FNAME=`basename "$image_url"`
     if [[ $image_url != file* ]]; then
-        # Downloads the image (uec ami+aki style), then extracts it.
+        # Downloads the image (uec ami+akistyle), then extracts it.
         if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
             wget -c $image_url -O $FILES/$IMAGE_FNAME
             if [[ $? -ne 0 ]]; then
@@ -103,12 +103,12 @@
         vmdk_net_adapter=""
 
         # vmdk adapter type
-        vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)"
+        vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })"
         vmdk_adapter_type="${vmdk_adapter_type#*\"}"
         vmdk_adapter_type="${vmdk_adapter_type%?}"
 
         # vmdk disk type
-        vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
+        vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })"
         vmdk_create_type="${vmdk_create_type#*\"}"
         vmdk_create_type="${vmdk_create_type%\"*}"
 
@@ -119,10 +119,10 @@
         elif [[ "$vmdk_create_type" = "monolithicFlat" || \
         "$vmdk_create_type" = "vmfs" ]]; then
             # Attempt to retrieve the *-flat.vmdk
-            flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+            flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })"
             flat_fname="${flat_fname#*\"}"
             flat_fname="${flat_fname%?}"
-            if [[ -z "$flat_name" ]]; then
+            if [[ -z "$flat_fname" ]]; then
                 flat_fname="$IMAGE_NAME-flat.vmdk"
             fi
             path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
@@ -133,27 +133,16 @@
                 if [[ ! -f $FILES/$flat_fname || \
                 "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
                     wget -c $flat_url -O $FILES/$flat_fname
-                    if [[ $? -ne 0 ]]; then
-                        echo "Flat disk not found: $flat_url"
-                        flat_found=false
-                    fi
                 fi
-                if $flat_found; then
-                    IMAGE="$FILES/${flat_fname}"
-                fi
+                IMAGE="$FILES/${flat_fname}"
             else
                 IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
                 if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
                     echo "Flat disk not found: $flat_url"
-                    flat_found=false
-                fi
-                if ! $flat_found; then
-                    IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+                    return 1
                 fi
             fi
-            if $flat_found; then
-                IMAGE_NAME="${flat_fname}"
-            fi
+            IMAGE_NAME="${flat_fname}"
             vmdk_disktype="preallocated"
         elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then
             vmdk_disktype="streamOptimized"
@@ -174,26 +163,19 @@
                     if [[ ! -f $FILES/$descriptor_fname || \
                     "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
                         wget -c $descriptor_url -O $FILES/$descriptor_fname
-                        if [[ $? -ne 0 ]]; then
-                            warn $LINENO "Descriptor not found $descriptor_url"
-                            descriptor_found=false
-                        fi
                     fi
                     descriptor_url="$FILES/$descriptor_fname"
                 else
                     descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
                     if [[ ! -f $descriptor_url || \
                     "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
-                        warn $LINENO "Descriptor not found $descriptor_url"
-                        descriptor_found=false
+                        echo "Descriptor not found: $descriptor_url"
+                        return 1
                     fi
                 fi
-                if $descriptor_found; then
-                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
-                    `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
-                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
-                    vmdk_adapter_type="${vmdk_adapter_type%?}"
-                fi
+                vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
+                vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                vmdk_adapter_type="${vmdk_adapter_type%?}"
             fi
             vmdk_disktype="preallocated"
         else
@@ -203,7 +185,7 @@
         # NOTE: For backwards compatibility reasons, colons may be used in place
         # of semi-colons for property delimiters but they are not permitted
         # characters in NTFS filesystems.
-        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'`
+        property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }`
         IFS=':;' read -a props <<< "$property_string"
         vmdk_disktype="${props[0]:-$vmdk_disktype}"
         vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
diff --git a/functions-common b/functions-common
index 0db3ff3..c6fd5c7 100644
--- a/functions-common
+++ b/functions-common
@@ -517,12 +517,14 @@
     GIT_DEST=$2
     GIT_REF=$3
     RECLONE=$(trueorfalse False $RECLONE)
+    local orig_dir=`pwd`
 
     if [[ "$OFFLINE" = "True" ]]; then
         echo "Running in offline mode, clones already exist"
         # print out the results so we know what change was used in the logs
         cd $GIT_DEST
         git show --oneline | head -1
+        cd $orig_dir
         return
     fi
 
@@ -572,6 +574,7 @@
     # print out the results so we know what change was used in the logs
     cd $GIT_DEST
     git show --oneline | head -1
+    cd $orig_dir
 }
 
 # git can sometimes get itself infinitely stuck with transient network
@@ -938,9 +941,24 @@
     [[ "$OFFLINE" = "True" ]] && return
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    # The manual check for missing packages is because yum -y assumes
+    # missing packages are OK.  See
+    # https://bugzilla.redhat.com/show_bug.cgi?id=965567
     $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
         no_proxy=$no_proxy \
-        yum install -y "$@"
+        yum install -y "$@" 2>&1 | \
+        awk '
+            BEGIN { fail=0 }
+            /No package/ { fail=1 }
+            { print }
+            END { exit fail }' || \
+                die $LINENO "Missing packages detected"
+
+    # also ensure we catch a yum failure
+    if [[ ${PIPESTATUS[0]} != 0 ]]; then
+        die $LINENO "Yum install failure"
+    fi
 }
 
 # zypper wrapper to set arguments correctly
@@ -1233,7 +1251,7 @@
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements = "changed" ]]; then
+    if [[ $update_requirements != "changed" ]]; then
         (cd $REQUIREMENTS_DIR; \
             $SUDO_CMD python update.py $project_dir)
     fi
@@ -1249,7 +1267,7 @@
     # a variable that tells us whether or not we should UNDO the requirements
     # changes (this will be set to False in the OpenStack ci gate)
     if [ $UNDO_REQUIREMENTS = "True" ]; then
-        if [[ $update_requirements = "changed" ]]; then
+        if [[ $update_requirements != "changed" ]]; then
             (cd $project_dir && git reset --hard)
         fi
     fi
diff --git a/lib/baremetal b/lib/baremetal
index 473de0d..eda92f9 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -77,14 +77,6 @@
 # These should be customized to your environment and hardware
 # -----------------------------------------------------------
 
-# whether to create a fake environment, eg. for devstack-gate
-BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV`
-
-# Extra options to pass to bm_poseur
-# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1
-# change the virtualization type: --engine qemu
-BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
-
 # To provide PXE, configure nova-network's dnsmasq rather than run the one
 # dedicated to baremetal. When enable this, make sure these conditions are
 # fulfilled:
@@ -97,15 +89,10 @@
 BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
 
 # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
-if [ "$BM_USE_FAKE_ENV" ]; then
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
-else
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
-    # if testing on a physical network,
-    # BM_DNSMASQ_RANGE must be changed to suit your network
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
-fi
+BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
+# if testing on a physical network,
+# BM_DNSMASQ_RANGE must be changed to suit your network
+BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
 
 # BM_DNSMASQ_DNS provide dns server to bootstrap clients
 BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-}
@@ -143,7 +130,6 @@
 # Below this, we set some path and filenames.
 # Defaults are probably sufficient.
 BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
-BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur}
 
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
@@ -154,7 +140,10 @@
 
 # If you need to add any extra flavors to the deploy ramdisk image
 # eg, specific network drivers, specify them here
-BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-}
+#
+# NOTE(deva): this will be moved to lib/ironic in a future patch
+#             for now, set the default to a suitable value for Ironic's needs
+BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic}
 
 # set URL and version for google shell-in-a-box
 BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz}
@@ -177,7 +166,6 @@
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
     git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
-    git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
 
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
@@ -196,27 +184,6 @@
     fi
 }
 
-# set up virtualized environment for devstack-gate testing
-function create_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    # TODO(deva): add support for >1 VM
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm
-    BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
-
-    # NOTE: there is currently a limitation in baremetal driver
-    # that requires second MAC even if it is not used.
-    # Passing a fake value allows this to work.
-    # TODO(deva): remove this after driver issue is fixed.
-    BM_SECOND_MAC='12:34:56:78:90:12'
-}
-
-function cleanup_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
-}
-
 # prepare various directories needed by baremetal hypervisor
 function configure_baremetal_nova_dirs {
     # ensure /tftpboot is prepared
@@ -256,7 +223,7 @@
         BM_DEPLOY_KERNEL=bm-deploy.kernel
         BM_DEPLOY_RAMDISK=bm-deploy.initramfs
         if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then
-            $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \
+            $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \
                 -o $TOP_DIR/files/bm-deploy
         fi
     fi
diff --git a/lib/ceilometer b/lib/ceilometer
index 2e6e7c5..6aaddce 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -106,7 +106,9 @@
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer {
-    mongo ceilometer --eval "db.dropDatabase();"
+    if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+        mongo ceilometer --eval "db.dropDatabase();"
+    fi
 }
 
 # configure_ceilometerclient() - Set config files, create data dirs, etc
@@ -129,6 +131,7 @@
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
+    iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -163,14 +166,27 @@
 }
 
 function configure_mongodb {
+    # server package is the same on all
+    local packages=mongodb-server
+
     if is_fedora; then
-        # install mongodb client
-        install_package mongodb
+        # mongodb client + python bindings
+        packages="${packages} mongodb pymongo"
+    else
+        packages="${packages} python-pymongo"
+    fi
+
+    install_package ${packages}
+
+    if is_fedora; then
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
         restart_service mongod
     fi
+
+    # give mongodb time to start-up
+    sleep 5
 }
 
 # init_ceilometer() - Initialize etc.
@@ -209,7 +225,7 @@
     screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 
     # only die on API if it was actually intended to be turned on
-    if service_enabled ceilometer-api; then
+    if is_service_enabled ceilometer-api; then
         echo "Waiting for ceilometer-api to start..."
         if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
             die $LINENO "ceilometer-api did not start"
diff --git a/lib/cinder b/lib/cinder
index d003f5d..dd2956a 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -491,10 +491,7 @@
         sudo rm -f /etc/tgt/conf.d/stack.conf
         _configure_tgt_for_config_d
         if is_ubuntu; then
-            # tgt in oneiric doesn't restart properly if tgtd isn't running
-            # do it in two steps
-            sudo stop tgt || true
-            sudo start tgt
+            sudo service tgt restart
         elif is_fedora; then
             if [[ $DISTRO =~ (rhel6) ]]; then
                 sudo /sbin/service tgtd restart
diff --git a/lib/heat b/lib/heat
index d0c0302..902333e 100644
--- a/lib/heat
+++ b/lib/heat
@@ -45,6 +45,13 @@
 # Functions
 # ---------
 
+# Test if any Heat services are enabled
+# is_heat_enabled
+function is_heat_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0
+    return 1
+}
+
 # cleanup_heat() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_heat {
@@ -197,8 +204,62 @@
 }
 
 # create_heat_accounts() - Set up common required heat accounts
-# Note this is in addition to what is in files/keystone_data.sh
 function create_heat_accounts {
+    # migrated from files/keystone_data.sh
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    HEAT_USER=$(openstack user create \
+        heat \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email heat@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $HEAT_USER
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        HEAT_SERVICE=$(openstack service create \
+            heat \
+            --type=orchestration \
+            --description="Heat Orchestration Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+        HEAT_CFN_SERVICE=$(openstack service create \
+            heat \
+            --type=cloudformation \
+            --description="Heat CloudFormation Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_CFN_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+    fi
+
+    # heat_stack_user role is for users created by Heat
+    openstack role create heat_stack_user
+
+    # heat_stack_owner role is given to users who create Heat stacks,
+    # it's the default role used by heat to delegate to the heat service
+    # user (for performing deferred operations via trusts), see heat.conf
+    HEAT_OWNER_ROLE=$(openstack role create \
+        heat_stack_owner \
+        | grep " id " | get_field 2)
+
+    # Give the role to the demo and admin users so they can create stacks
+    # in either of the projects created by devstack
+    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+
     # Note we have to pass token/endpoint here because the current endpoint and
     # version negotiation in OSC means just --os-identity-api-version=3 won't work
     KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
diff --git a/lib/ironic b/lib/ironic
index 4e5edc9..c6fa563 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -18,16 +18,19 @@
 # - stop_ironic
 # - cleanup_ironic
 
-# Save trace setting
+# Save trace and pipefail settings
 XTRACE=$(set +o | grep xtrace)
+PIPEFAIL=$(set +o | grep pipefail)
 set +o xtrace
-
+set +o pipefail
 
 # Defaults
 # --------
 
 # Set up default directories
 IRONIC_DIR=$DEST/ironic
+IRONIC_DATA_DIR=$DATA_DIR/ironic
+IRONIC_STATE_PATH=/var/lib/ironic
 IRONICCLIENT_DIR=$DEST/python-ironicclient
 IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic}
 IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
@@ -35,6 +38,28 @@
 IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
 IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
 
+# Set up defaults for functional / integration testing
+IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts}
+IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates}
+IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS)
+IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`}
+IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys}
+IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key}
+IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME
+IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh}
+IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot}
+IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-2222}
+IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP}
+IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
+IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
+IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256}
+IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
+IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64}
+IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm}
+IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24}
+IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv}
+IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys}
+
 # Support entry points installation of console scripts
 IRONIC_BIN_DIR=$(get_python_exec_prefix)
 
@@ -86,8 +111,8 @@
     iniset $IRONIC_CONF_FILE DEFAULT debug True
     inicomment $IRONIC_CONF_FILE DEFAULT log_file
     iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic`
+    iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH
     iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG
-
     # Configure Ironic conductor, if it was enabled.
     if is_service_enabled ir-cond; then
         configure_ironic_conductor
@@ -97,6 +122,10 @@
     if is_service_enabled ir-api; then
         configure_ironic_api
     fi
+
+    if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then
+        configure_ironic_auxiliary
+    fi
 }
 
 # configure_ironic_api() - Is used by configure_ironic(). Performs
@@ -124,7 +153,11 @@
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
-    iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE conductor api_url http://$SERVICE_HOST:6385
+    iniset $IRONIC_CONF_FILE pxe tftp_server $SERVICE_HOST
+    iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
+    iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
@@ -225,9 +258,233 @@
     screen -S $SCREEN_NAME -p ir-cond -X kill
 }
 
+function is_ironic {
+    if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then
+        return 0
+    fi
+    return 1
+}
 
-# Restore xtrace
+function configure_ironic_dirs {
+    sudo mkdir -p $IRONIC_DATA_DIR
+    sudo mkdir -p $IRONIC_STATE_PATH
+    sudo mkdir -p $IRONIC_TFTPBOOT_DIR
+    sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+    sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
+    if is_ubuntu; then
+        PXEBIN=/usr/lib/syslinux/pxelinux.0
+    elif is_fedora; then
+        PXEBIN=/usr/share/syslinux/pxelinux.0
+    fi
+    if [ ! -f $PXEBIN ]; then
+        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+    fi
+
+    cp $PXEBIN $IRONIC_TFTPBOOT_DIR
+    mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+}
+
+function ironic_ensure_libvirt_group {
+    groups $STACK_USER | grep -q $LIBVIRT_GROUP || adduser $STACK_USER $LIBVIRT_GROUP
+}
+
+function create_bridge_and_vms {
+    ironic_ensure_libvirt_group
+
+    # Call libvirt setup scripts in a new shell to ensure any new group membership
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
+
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \
+        $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
+        amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE
+
+}
+
+function enroll_vms {
+
+    CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
+    IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1)
+    local idx=0
+
+    # work around; need to know what netns neutron uses for private network
+    neutron port-create private
+
+    while read MAC; do
+
+        NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \
+            -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \
+            -i ssh_address=$IRONIC_VM_SSH_ADDRESS \
+            -i ssh_port=$IRONIC_VM_SSH_PORT \
+            -i ssh_username=$IRONIC_SSH_USERNAME \
+            -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \
+            -p cpus=$IRONIC_VM_SPECS_CPU \
+            -p memory_mb=$IRONIC_VM_SPECS_RAM \
+            -p local_gb=$IRONIC_VM_SPECS_DISK \
+            -p cpu_arch=x86_64 \
+            | grep " uuid " | get_field 2)
+
+        ironic port-create --address $MAC --node_uuid $NODE_ID
+
+        idx=$((idx+1))
+
+    done < $IRONIC_VM_MACS_CSV_FILE
+
+    # create the nova flavor
+    nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU
+    nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$BM_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$BM_DEPLOY_RAMDISK_ID"
+
+    # intentional sleep to make sure the tag has been set to port
+    sleep 10
+    TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
+    TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+
+    # make sure veth pair is not existing, otherwise delete its links
+    sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
+    sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
+    # create veth pair for future interconnection between br-int and brbm
+    sudo ip link add brbm-tap1 type veth peer name ovs-tap1
+    sudo ip link set dev brbm-tap1 up
+    sudo ip link set dev ovs-tap1 up
+
+    sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID
+    sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
+}
+
+function configure_tftpd {
+    # enable tftp natting for allowing connections to SERVICE_HOST's tftp server
+    sudo modprobe nf_conntrack_tftp
+    sudo modprobe nf_nat_tftp
+
+    if is_ubuntu; then
+        PXEBIN=/usr/lib/syslinux/pxelinux.0
+    elif is_fedora; then
+        PXEBIN=/usr/share/syslinux/pxelinux.0
+    fi
+    if [ ! -f $PXEBIN ]; then
+        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+    fi
+
+    # stop tftpd and setup serving via xinetd
+    stop_service tftpd-hpa || true
+    [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override
+    sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp
+    sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp
+
+    # setup tftp file mapping to satisfy requests at the root (booting) and
+    # /tftpboot/ sub-dir (as per deploy-ironic elements)
+    echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file
+    echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file
+
+    chmod -R 0755 $IRONIC_TFTPBOOT_DIR
+    restart_service xinetd
+}
+
+function configure_ironic_ssh_keypair {
+    # Generating ssh key pair for stack user
+    if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then
+        mkdir -p $IRONIC_SSH_KEY_DIR
+    fi
+    if [[ ! -d $HOME/.ssh ]]; then
+        mkdir -p $HOME/.ssh
+        chmod 700 $HOME/.ssh
+    fi
+    echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE
+    cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE
+}
+
+function ironic_ssh_check {
+    local KEY_FILE=$1
+    local FLOATING_IP=$2
+    local PORT=$3
+    local DEFAULT_INSTANCE_USER=$4
+    local ACTIVE_TIMEOUT=$5
+    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
+        die $LINENO "server didn't become ssh-able!"
+    fi
+}
+
+function configure_ironic_sshd {
+    # Ensure sshd server accepts connections from localhost only
+
+    SSH_CONFIG=/etc/ssh/sshd_config
+    HOST_PORT=$IRONIC_VM_SSH_ADDRESS:$IRONIC_VM_SSH_PORT
+    if ! sudo grep ListenAddress $SSH_CONFIG | grep $HOST_PORT; then
+        echo "ListenAddress $HOST_PORT" | sudo tee -a $SSH_CONFIG
+    fi
+
+    SSH_SERVICE_NAME=sshd
+    if is_ubuntu; then
+        SSH_SERVICE_NAME=ssh
+    fi
+
+    restart_service $SSH_SERVICE_NAME
+    # to ensure ssh service is up and running
+    sleep 3
+    ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
+
+}
+
+function configure_ironic_auxiliary {
+    configure_ironic_dirs
+    configure_ironic_ssh_keypair
+    configure_ironic_sshd
+}
+
+function prepare_baremetal_basic_ops {
+
+    # install diskimage-builder
+    git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+
+    # make sure all needed service were enabled
+    for srv in nova glance key neutron; do
+        if ! is_service_enabled "$srv"; then
+            die $LINENO "$srv should be enabled for ironic tests"
+        fi
+    done
+
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+    # stop all nova services
+    stop_nova || true
+
+    # remove any nova services failure status
+    find $SERVICE_DIR/$SCREEN_NAME -name 'n-*.failure' -exec rm -f '{}' \;
+
+    # start them again
+    start_nova_api
+    start_nova
+
+    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
+
+    echo_summary "Creating and uploading baremetal images for ironic"
+
+    # build and upload separate deploy kernel & ramdisk
+    upload_baremetal_deploy $TOKEN
+
+    create_bridge_and_vms
+    enroll_vms
+    configure_tftpd
+}
+
+function cleanup_baremetal_basic_ops {
+    rm -f $IRONIC_VM_MACS_CSV_FILE
+    if [ -f $IRONIC_KEY_FILE ]; then
+        KEY=`cat $IRONIC_KEY_FILE.pub`
+        # remove public key from authorized_keys
+        grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
+        chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
+    fi
+    sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE"
+    sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override
+    restart_service xinetd
+}
+
+# Restore xtrace + pipefail
 $XTRACE
+$PIPEFAIL
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/ldap b/lib/ldap
index 51d0251..efe2f09 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -154,7 +154,7 @@
 
 # clear_ldap_state() - Clear LDAP State
 function clear_ldap_state {
-    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
+    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || :
 }
 
 # Restore xtrace
diff --git a/lib/marconi b/lib/marconi
index 29ae386..fd1c351 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -34,7 +34,8 @@
 MARCONICLIENT_DIR=$DEST/python-marconiclient
 MARCONI_CONF_DIR=/etc/marconi
 MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_API_LOG_DIR=/var/log/marconi
+MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
 MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
 
 # Support potential entry-points console scripts
@@ -96,6 +97,7 @@
 
     iniset $MARCONI_CONF DEFAULT verbose True
     iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+    iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
@@ -104,8 +106,12 @@
     iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
 
-    if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
-        iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
+        iniset $MARCONI_CONF drivers storage sqlalchemy
+        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
+    else
+        iniset $MARCONI_CONF drivers storage mongodb
+        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
         configure_mongodb
         cleanup_marconi
     fi
diff --git a/lib/neutron b/lib/neutron
index 7ca66a5..84e8277 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 # nova vif driver that all plugins should use
 NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
+
+    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -586,11 +593,9 @@
     # If additional config files exist, copy them over to neutron configuration
     # directory
     if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then
-        mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
         local f
         for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
             Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
-            cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
         done
     fi
 
@@ -756,6 +761,16 @@
         iniset $NEUTRON_CONF DEFAULT ${I/=/ }
     done
 
+    # Configuration for neutron notifations to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
     # Configure plugin
     neutron_plugin_configure_service
 }
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 7728eb1..a1b089e 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -23,7 +23,7 @@
 # Specify ncclient package information
 NCCLIENT_DIR=$DEST/ncclient
 NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
-NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git}
+NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git}
 NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
 
 # This routine put a prefix on an existing function name
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
new file mode 100644
index 0000000..22c8578
--- /dev/null
+++ b/lib/neutron_plugins/ibm
@@ -0,0 +1,133 @@
+# Neutron IBM SDN-VE plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+
+function _neutron_interface_setup {
+    # Setup one interface on the integration bridge if needed
+    # The plugin agent to be used if more than one interface is used
+    local bridge=$1
+    local interface=$2
+    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
+}
+
+function neutron_setup_integration_bridge {
+    # Setup integration bridge if needed
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        neutron_ovs_base_cleanup
+        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
+        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
+            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
+        fi
+    fi
+
+    # Set controller to SDNVE controller (1st of list) if exists
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        # Get the first controller
+        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
+        SDNVE_IP=${controllers[0]}
+        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
+    fi
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    # if n-cpu is enabled, then setup integration bridge
+    if is_service_enabled n-cpu; then
+        neutron_setup_integration_bridge
+    fi
+}
+
+function is_neutron_ovs_base_plugin {
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        # Yes, we use OVS.
+        return 0
+    else
+        # No, we do not use OVS.
+        return 1
+    fi
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
+    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
+    Q_DB_NAME="sdnve_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
+}
+
+function neutron_plugin_configure_service {
+    # Define extra "SDNVE" configuration options when q-svc is configured
+
+    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
+    fi
+
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
+    fi
+
+    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
+    fi
+
+    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
+    fi
+
+    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
+    fi
+
+    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
+    fi
+
+
+    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
+
+}
+
+function neutron_plugin_configure_plugin_agent {
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_setup_interface_driver {
+    return 0
+}
+
+function has_neutron_plugin_security_group {
+    # Does not support Security Groups
+    return 1
+}
+
+function neutron_ovs_base_cleanup {
+    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
+        # remove all OVS ports that look like Neutron created ports
+        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+            sudo ovs-vsctl del-port ${port}
+        done
+
+        # remove integration bridge created by Neutron
+        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
+            sudo ovs-vsctl del-br ${bridge}
+        done
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
new file mode 100644
index 0000000..3649f39
--- /dev/null
+++ b/lib/neutron_plugins/nuage
@@ -0,0 +1,69 @@
+# Nuage Neutron Plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function neutron_plugin_create_nova_conf {
+    NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
+    iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+}
+
+function neutron_plugin_install_agent_packages {
+    :
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage
+    Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini
+    Q_DB_NAME="nuage_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin"
+    Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions
+    #Nuage specific Neutron defaults. Actual value must be set and sourced
+    NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'}
+    NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'}
+    NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'}
+    NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'}
+    NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'}
+    NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'}
+    NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''}
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    :
+}
+
+function neutron_plugin_configure_l3_agent {
+    :
+}
+
+function neutron_plugin_configure_plugin_agent {
+    :
+}
+
+function neutron_plugin_configure_service {
+    iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/
+    iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH
+    iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION
+    iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS
+    iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE
+    iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 1
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
new file mode 100644
index 0000000..724df41
--- /dev/null
+++ b/lib/neutron_plugins/ofagent_agent
@@ -0,0 +1,94 @@
+# OpenFlow Agent plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+source $TOP_DIR/lib/neutron_thirdparty/ryu  # for RYU_DIR, install_ryu, etc
+
+function neutron_plugin_create_nova_conf {
+    _neutron_ovs_base_configure_nova_vif_driver
+}
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+
+    # This agent uses ryu to talk with switches
+    install_package $(get_packages "ryu")
+    install_ryu
+    configure_ryu
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    _neutron_ovs_base_configure_firewall_driver
+
+    # Check a supported openflow version
+    OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2`
+    if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then
+        die $LINENO "This agent requires OpenFlow 1.3+ capable switch."
+    fi
+
+    # Enable tunnel networks if selected
+    if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+        # Verify tunnels are supported
+        # REVISIT - also check kernel module support for GRE and patch ports
+        OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"`
+        if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
+        fi
+        iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True
+        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+    fi
+
+    # Setup physical network bridge mappings.  Override
+    # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+    # complex physical network configurations.
+    if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+        # Configure bridge manually with physical interface as port for multi-node
+        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+    fi
+    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
+    fi
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent"
+
+    # Define extra "AGENT" configuration options when q-agt is configured by defining
+    # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ }
+    done
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT ovs_use_veth True
+}
+
+function neutron_plugin_check_adv_test_requirements {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
new file mode 100644
index 0000000..0aebff6
--- /dev/null
+++ b/lib/neutron_plugins/oneconvergence
@@ -0,0 +1,76 @@
+# Neutron One Convergence plugin
+# ---------------------------
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+Q_L3_ENABLED=true
+Q_L3_ROUTER_PER_TENANT=true
+Q_USE_NAMESPACE=true
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+# Configure common parameters
+function neutron_plugin_configure_common {
+
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
+    Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
+    Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
+    Q_DB_NAME='oc_nvsd_neutron'
+}
+
+# Configure plugin specific information
+function neutron_plugin_configure_service {
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 0
+}
+
+function setup_integration_bridge {
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    setup_integration_bridge
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
+
+    _neutron_ovs_base_configure_firewall_driver
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
+        setup_integration_bridge
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 424a900..b2c1b61 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -18,14 +18,8 @@
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
 
-# configure_ryu can be called multiple times as neutron_pluing/ryu may call
-# this function for neutron-ryu-agent
-_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu {
-    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
-        setup_develop $RYU_DIR
-        _RYU_CONFIGURED=True
-    fi
+    :
 }
 
 function init_ryu {
@@ -63,6 +57,7 @@
 function install_ryu {
     if [[ "$_RYU_INSTALLED" == "False" ]]; then
         git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+        export PYTHONPATH=$RYU_DIR:$PYTHONPATH
         _RYU_INSTALLED=True
     fi
 }
diff --git a/lib/nova b/lib/nova
index 583a592..b01d107 100644
--- a/lib/nova
+++ b/lib/nova
@@ -308,7 +308,7 @@
     # Rebuild the config file from scratch
     create_nova_conf
 
-    if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         # Configure hypervisor plugin
         configure_nova_hypervisor
     fi
@@ -665,17 +665,6 @@
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # Enable client side traces for libvirt
-        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
-        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-        # Enable server side traces for libvirtd
-        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
@@ -726,19 +715,28 @@
     start_nova_rest
 }
 
-# stop_nova() - Stop running processes (non-screen)
-function stop_nova {
-    # Kill the nova screen windows
-    # Some services are listed here twice since more than one instance
-    # of a service may be running in certain configs.
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
-        screen_stop $serv
-    done
+function stop_nova_compute {
+    screen_stop n-cpu
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         stop_nova_hypervisor
     fi
 }
 
+function stop_nova_rest {
+    # Kill the nova screen windows
+    # Some services are listed here twice since more than one instance
+    # of a service may be running in certain configs.
+    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+        screen_stop $serv
+    done
+}
+
+# stop_nova() - Stop running processes (non-screen)
+function stop_nova {
+    stop_nova_rest
+    stop_nova_compute
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
deleted file mode 100644
index cdbc4d1..0000000
--- a/lib/nova_plugins/hypervisor-docker
+++ /dev/null
@@ -1,124 +0,0 @@
-# lib/nova_plugins/docker
-# Configure the Docker hypervisor
-
-# Enable with:
-#
-#   VIRT_DRIVER=docker
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``nova`` and ``glance`` configurations
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-DOCKER_DIR=$DEST/docker
-
-DOCKER_UNIX_SOCKET=/var/run/docker.sock
-DOCKER_PID_FILE=/var/run/docker.pid
-DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
-
-DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest}
-DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME
-DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest}
-DOCKER_REGISTRY_IMAGE_NAME=registry
-DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
-
-DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
-    stop_service docker
-
-    # Clean out work area
-    sudo rm -rf /var/lib/docker
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
-    iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
-    iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
-    # So far this is Ubuntu only
-    if ! is_ubuntu; then
-        die $LINENO "Docker is only supported on Ubuntu at this time"
-    fi
-
-    # Make sure Docker is installed
-    if ! is_package_installed lxc-docker; then
-        die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
-    fi
-
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running"
-    fi
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running, start the daemon"
-    fi
-
-    # Start the Docker registry container
-    docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \
-        -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \
-        -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \
-        -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \
-        -e OS_AUTH_URL=${OS_AUTH_URL} \
-        $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh
-
-    echo "Waiting for docker registry to start..."
-    DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then
-        die $LINENO "docker-registry did not start"
-    fi
-
-    # Tag image if not already tagged
-    if ! docker images | grep $DOCKER_REPOSITORY_NAME; then
-        docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME
-    fi
-
-    # Make sure we copied the image in Glance
-    if ! (glance image-show "$DOCKER_IMAGE"); then
-        docker push $DOCKER_REPOSITORY_NAME
-    fi
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
-    # Stop the docker registry container
-    docker kill $(docker ps | grep docker-registry | cut -d' ' -f1)
-}
-
-
-# Restore xtrace
-$MY_XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
new file mode 100644
index 0000000..5af7c0b
--- /dev/null
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -0,0 +1,75 @@
+# lib/nova_plugins/hypervisor-ironic
+# Configure the ironic hypervisor
+
+# Enable with:
+# VIRT_DRIVER=ironic
+
+# Dependencies:
+# ``functions`` file
+# ``nova`` configuration
+
+# install_nova_hypervisor - install any external requirements
+# configure_nova_hypervisor - make configuration changes, including those to other services
+# start_nova_hypervisor - start any external services
+# stop_nova_hypervisor - stop any external services
+# cleanup_nova_hypervisor - remove transient data and cache
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Entry Points
+# ------------
+
+# clean_nova_hypervisor - Clean up an installation
+function cleanup_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+# configure_nova_hypervisor - Set config files, create data dirs, etc
+function configure_nova_hypervisor {
+    iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm`
+    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+    iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver
+    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+    iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager
+    iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+    iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
+    # ironic section
+    iniset $NOVA_CONF ironic admin_username admin
+    iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
+    iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+    iniset $NOVA_CONF ironic admin_tenant_name demo
+    iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6358/v1
+}
+
+# install_nova_hypervisor() - Install external components
+function install_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+# start_nova_hypervisor - Start any required external services
+function start_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+# stop_nova_hypervisor - Stop any external services
+function stop_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index bbf6554..5a51f33 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -25,6 +25,8 @@
 
 # File injection is disabled by default in Nova.  This will turn it back on.
 ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+# if we should turn on massive libvirt debugging
+DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT)
 
 
 # Entry Points
@@ -103,6 +105,18 @@
     fi
     add_user_to_group $STACK_USER $LIBVIRT_GROUP
 
+    # Enable server side traces for libvirtd
+    if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then
+        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+    fi
+
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
diff --git a/lib/opendaylight b/lib/opendaylight
new file mode 100644
index 0000000..ca81c20
--- /dev/null
+++ b/lib/opendaylight
@@ -0,0 +1,167 @@
+# lib/opendaylight
+# Functions to control the configuration and operation of the opendaylight service
+
+# Dependencies:
+#
+# - ``functions`` file
+# # ``DEST`` must be defined
+# # ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_opendaylight_enabled
+# - is_opendaylight-compute_enabled
+# - install_opendaylight
+# - install_opendaylight-compute
+# - configure_opendaylight
+# - init_opendaylight
+# - start_opendaylight
+# - stop_opendaylight-compute
+# - stop_opendaylight
+# - cleanup_opendaylight
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# For OVS_BRIDGE and PUBLIC_BRIDGE
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Defaults
+# --------
+
+# The IP address of ODL. Set this in local.conf.
+# ODL_MGR_IP=
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# <define global variables here that belong to this project>
+ODL_DIR=$DEST/opendaylight
+
+# The OpenDaylight Package, currently using 'Hydrogen' release
+ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip}
+
+# The OpenDaylight URL
+ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1}
+
+# Default arguments for OpenDaylight. This is typically used to set
+# Java memory options.
+#   ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m
+ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"}
+
+# How long to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60}
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# Test if OpenDaylight is enabled
+# is_opendaylight_enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+# cleanup_opendaylight() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    :
+}
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    # Remove simple forwarder
+    rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding*
+
+    # Configure OpenFlow 1.3
+    echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    local _pwd=$(pwd)
+
+    if is_ubuntu; then
+        install_package maven openjdk-7-jre openjdk-7-jdk
+    else
+        yum_install maven java-1.7.0-openjdk
+    fi
+
+    # Download OpenDaylight
+    mkdir -p $ODL_DIR
+    cd $ODL_DIR
+    wget -N $ODL_URL/$ODL_PKG
+    unzip -u $ODL_PKG
+}
+
+# install_opendaylight-compute - Make sure OVS is install
+function install_opendaylight-compute {
+    local kernel_version
+    # Install deps
+    # FIXME add to ``files/apts/neutron``, but don't install if not needed!
+    if is_ubuntu; then
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        install_package openvswitch
+        restart_service openvswitch-switch
+        restart_service openvswitch-controller
+    fi
+}
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    if is_ubuntu; then
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64
+    else
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk
+    fi
+
+    # The flags to ODL have the following meaning:
+    #   -of13: runs ODL using OpenFlow 1.3 protocol support.
+    #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+
+    # Sleep a bit to let OpenDaylight finish starting up
+    sleep $ODL_BOOT_WAIT
+}
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    screen_stop odl-server
+}
+
+# stop_opendaylight-compute() - Remove OVS bridges
+function stop_opendaylight-compute {
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Neutron
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index a0424b1..e922daa 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -186,7 +186,7 @@
         fi
     elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
-        iniset $file $section rabbit_host $RABBIT_HOST
+        iniset $file $section rabbit_hosts $RABBIT_HOST
         iniset $file $section rabbit_password $RABBIT_PASSWORD
     fi
 }
diff --git a/lib/sahara b/lib/sahara
new file mode 100644
index 0000000..4cb04ec
--- /dev/null
+++ b/lib/sahara
@@ -0,0 +1,177 @@
+# lib/sahara
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_sahara
+# configure_sahara
+# start_sahara
+# stop_sahara
+# cleanup_sahara
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
+SAHARA_BRANCH=${SAHARA_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DIR=$DEST/sahara
+SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
+SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
+SAHARA_DEBUG=${SAHARA_DEBUG:-True}
+
+SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
+SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
+SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
+SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
+
+# Support entry points installation of console scripts
+if [[ -d $SAHARA_DIR/bin ]]; then
+    SAHARA_BIN_DIR=$SAHARA_DIR/bin
+else
+    SAHARA_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+# Tell Tempest this project is present
+TEMPEST_SERVICES+=,sahara
+
+# For backward compatibility with current tests in Tempest
+TEMPEST_SERVICES+=,savanna
+
+
+# Functions
+# ---------
+
+# create_sahara_accounts() - Set up common required sahara accounts
+#
+# Tenant      User       Roles
+# ------------------------------
+# service     sahara    admin
+function create_sahara_accounts {
+
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    SAHARA_USER=$(openstack user create \
+        sahara \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email sahara@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SAHARA_USER
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        SAHARA_SERVICE=$(openstack service create \
+            sahara \
+            --type=data_processing \
+            --description="Sahara Data Processing" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+            $SAHARA_SERVICE \
+            --region RegionOne \
+            --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+    fi
+}
+
+# cleanup_sahara() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_sahara {
+
+    # Cleanup auth cache dir
+    sudo rm -rf $SAHARA_AUTH_CACHE_DIR
+}
+
+# configure_sahara() - Set config files, create data dirs, etc
+function configure_sahara {
+
+    if [[ ! -d $SAHARA_CONF_DIR ]]; then
+        sudo mkdir -p $SAHARA_CONF_DIR
+    fi
+    sudo chown $STACK_USER $SAHARA_CONF_DIR
+
+    # Copy over sahara configuration file and configure common parameters.
+    # TODO(slukjanov): rename when sahara internals will be updated
+    cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE
+
+    # Create auth cache dir
+    sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
+    rm -rf $SAHARA_AUTH_CACHE_DIR/*
+
+    # Set obsolete keystone auth configs for backward compatibility
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+    # Set actual keystone auth configs
+    iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
+    iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
+    iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG
+
+    iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
+
+    if is_service_enabled neutron; then
+        iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
+        iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
+    fi
+
+    if is_service_enabled heat; then
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
+    else
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
+    fi
+
+    iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+    recreate_database sahara utf8
+    $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
+}
+
+# install_sahara() - Collect source and prepare
+function install_sahara {
+    git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
+    setup_develop $SAHARA_DIR
+}
+
+# start_sahara() - Start running processes, including screen
+function start_sahara {
+    screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
+}
+
+# stop_sahara() - Stop running processes
+function stop_sahara {
+    # Kill the Sahara screen windows
+    screen -S $SCREEN_NAME -p sahara -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard
new file mode 100644
index 0000000..a81df0f
--- /dev/null
+++ b/lib/sahara-dashboard
@@ -0,0 +1,72 @@
+# lib/sahara-dashboard
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_HOST``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_sahara_dashboard
+# - configure_sahara_dashboard
+# - cleanup_sahara_dashboard
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/horizon
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git}
+SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master}
+
+SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
+SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard
+SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
+
+# Functions
+# ---------
+
+function configure_sahara_dashboard {
+
+    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+
+    if is_service_enabled neutron; then
+        echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    fi
+}
+
+# install_sahara_dashboard() - Collect source and prepare
+function install_sahara_dashboard {
+    install_python_saharaclient
+    git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH
+    setup_develop $SAHARA_DASHBOARD_DIR
+}
+
+function install_python_saharaclient {
+    git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
+    setup_develop $SAHARA_PYTHONCLIENT_DIR
+}
+
+# Cleanup file settings.py from Sahara
+function cleanup_sahara_dashboard {
+    sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/savanna b/lib/savanna
deleted file mode 100644
index 2cb092c..0000000
--- a/lib/savanna
+++ /dev/null
@@ -1,173 +0,0 @@
-# lib/savanna
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_savanna
-# configure_savanna
-# start_savanna
-# stop_savanna
-# cleanup_savanna
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git}
-SAVANNA_BRANCH=${SAVANNA_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DIR=$DEST/savanna
-SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
-SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf
-SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
-
-SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
-SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
-SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
-
-# Support entry points installation of console scripts
-if [[ -d $SAVANNA_DIR/bin ]]; then
-    SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
-else
-    SAVANNA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,savanna
-
-
-# Functions
-# ---------
-
-# create_savanna_accounts() - Set up common required savanna accounts
-#
-# Tenant      User       Roles
-# ------------------------------
-# service     savanna    admin
-function create_savanna_accounts {
-
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
-
-    SAVANNA_USER=$(openstack user create \
-        savanna \
-        --password "$SERVICE_PASSWORD" \
-        --project $SERVICE_TENANT \
-        --email savanna@example.com \
-        | grep " id " | get_field 2)
-    openstack role add \
-        $ADMIN_ROLE \
-        --project $SERVICE_TENANT \
-        --user $SAVANNA_USER
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SAVANNA_SERVICE=$(openstack service create \
-            savanna \
-            --type=data_processing \
-            --description="Savanna Data Processing" \
-            | grep " id " | get_field 2)
-        openstack endpoint create \
-            $SAVANNA_SERVICE \
-            --region RegionOne \
-            --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-    fi
-}
-
-# cleanup_savanna() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_savanna {
-
-    # Cleanup auth cache dir
-    sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
-}
-
-# configure_savanna() - Set config files, create data dirs, etc
-function configure_savanna {
-
-    if [[ ! -d $SAVANNA_CONF_DIR ]]; then
-        sudo mkdir -p $SAVANNA_CONF_DIR
-    fi
-    sudo chown $STACK_USER $SAVANNA_CONF_DIR
-
-    # Copy over savanna configuration file and configure common parameters.
-    cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
-
-    # Create auth cache dir
-    sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
-    sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
-    rm -rf $SAVANNA_AUTH_CACHE_DIR/*
-
-    # Set obsolete keystone auth configs for backward compatibility
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
-
-    # Set actual keystone auth configs
-    iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
-    iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
-
-    iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
-
-    iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
-
-    if is_service_enabled neutron; then
-        iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true
-        iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
-    fi
-
-    if is_service_enabled heat; then
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
-    else
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
-    fi
-
-    iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
-    recreate_database savanna utf8
-    $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head
-}
-
-# install_savanna() - Collect source and prepare
-function install_savanna {
-    git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
-    setup_develop $SAVANNA_DIR
-}
-
-# start_savanna() - Start running processes, including screen
-function start_savanna {
-    screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE"
-}
-
-# stop_savanna() - Stop running processes
-function stop_savanna {
-    # Kill the Savanna screen windows
-    screen -S $SCREEN_NAME -p savanna -X kill
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
deleted file mode 100644
index 6fe15a3..0000000
--- a/lib/savanna-dashboard
+++ /dev/null
@@ -1,72 +0,0 @@
-# lib/savanna-dashboard
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_HOST``
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_savanna_dashboard
-# - configure_savanna_dashboard
-# - cleanup_savanna_dashboard
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/horizon
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git}
-SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master}
-
-SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git}
-SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
-SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
-
-# Functions
-# ---------
-
-function configure_savanna_dashboard {
-
-    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-    echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-
-    if is_service_enabled neutron; then
-        echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    fi
-}
-
-# install_savanna_dashboard() - Collect source and prepare
-function install_savanna_dashboard {
-    install_python_savannaclient
-    git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
-    setup_develop $SAVANNA_DASHBOARD_DIR
-}
-
-function install_python_savannaclient {
-    git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
-    setup_develop $SAVANNA_PYTHONCLIENT_DIR
-}
-
-# Cleanup file settings.py from Savanna
-function cleanup_savanna_dashboard {
-    sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
-}
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
-
diff --git a/lib/swift b/lib/swift
index 5d4d4ef..b655440 100644
--- a/lib/swift
+++ b/lib/swift
@@ -67,8 +67,8 @@
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
-# Default is ``staticweb, tempurl, formpost``
-SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb}
+# Default is ``staticweb, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
 # the end of the pipeline.
@@ -687,6 +687,11 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
+    # Maintain the iteration as screen_stop() has some desirable side-effects
+    for type in proxy object container account; do
+        screen_stop s-${type}
+    done
+    # Blast out any stragglers
     pkill -f swift-
 }
 
diff --git a/lib/tempest b/lib/tempest
index 16f8744..c74f00d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -149,8 +149,12 @@
 
     password=${ADMIN_PASSWORD:-secrete}
 
-    # See files/keystone_data.sh where alt_demo user
-    # and tenant are set up...
+    # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
+    # user and tenant are set up...
+    ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
+    ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+    TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo}
+    TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo}
     ALT_USERNAME=${ALT_USERNAME:-alt_demo}
     ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 
@@ -254,11 +258,16 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
+    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
     iniset $TEMPEST_CONFIG identity password "$password"
+    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
     iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG identity admin_password "$password"
+    iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
 
     # Image
     # for the gate we want to be able to override this variable so we aren't
@@ -285,7 +294,9 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
-    iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+    iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
+    iniset $TEMPEST_CONFIG "compute-admin" password "$password"
+    iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -293,7 +304,7 @@
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
-    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/run_tests.sh b/run_tests.sh
index a0bfbee..685b203 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -27,3 +27,16 @@
 echo "Running bash8..."
 
 ./tools/bash8.py -v $FILES
+
+
+# Test that no one is trying to land crazy refs as branches
+
+echo "Ensuring we don't have crazy refs"
+
+REFS=`grep BRANCH stackrc | grep -v -- '-master'`
+rc=$?
+if [[ $rc -eq 0 ]]; then
+    echo "Branch defaults must be master. Found:"
+    echo $REFS
+    exit 1
+fi
diff --git a/stack.sh b/stack.sh
index 988fda5..09e9eec 100755
--- a/stack.sh
+++ b/stack.sh
@@ -142,7 +142,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -541,6 +541,7 @@
                     print
                     print > logfile
                     fflush("")
+                    fflush(logfile)
                 }' ) 2>&1
         # Set up a second fd for output
         exec 6> >( tee "${SUMFILE}" )
@@ -934,8 +935,7 @@
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \
-    HEAT_API_PORT=$HEAT_API_PORT \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
@@ -1053,9 +1053,6 @@
     echo_summary "Preparing for nova baremetal"
     prepare_baremetal_toolchain
     configure_baremetal_nova_dirs
-    if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-        create_fake_baremetal_env
-    fi
 fi
 
 
@@ -1464,3 +1461,9 @@
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
 echo_summary "stack.sh completed in $SECONDS seconds."
+
+# Restore/close logging file descriptors
+exec 1>&3
+exec 2>&3
+exec 3>&-
+exec 6>&-
diff --git a/stackrc b/stackrc
index f235ccc..4a997bf 100644
--- a/stackrc
+++ b/stackrc
@@ -229,12 +229,6 @@
 BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
 BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
 
-# bm_poseur
-# Used to simulate a hardware environment for baremetal
-# Only used if BM_USE_FAKE_ENV is set
-BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
-BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
-
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
 NOVNC_BRANCH=${NOVNC_BRANCH:-master}
@@ -273,7 +267,7 @@
 is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
 VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
 case "$VIRT_DRIVER" in
-    libvirt)
+    ironic|libvirt)
         LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
         if [[ "$os_VENDOR" =~ (Debian) ]]; then
             LIBVIRT_GROUP=libvirt
@@ -326,9 +320,6 @@
     openvz)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
-    docker)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros}
-        IMAGE_URLS=${IMAGE_URLS:-};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
@@ -341,7 +332,7 @@
         ;;
     vsphere)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
-        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
+        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};;
     xenserver)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
         IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
diff --git a/tools/docker/README.md b/tools/docker/README.md
deleted file mode 100644
index 976111f..0000000
--- a/tools/docker/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# DevStack on Docker
-
-Using Docker as Nova's hypervisor requries two steps:
-
-* Configure DevStack by adding the following to `localrc`::
-
-    VIRT_DRIVER=docker
-
-* Download and install the Docker service and images::
-
-    tools/docker/install_docker.sh
-
-After this, `stack.sh` should run as normal.
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
deleted file mode 100755
index 27c8c82..0000000
--- a/tools/docker/install_docker.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-# **install_docker.sh** - Do the initial Docker installation and configuration
-
-# install_docker.sh
-#
-# Install docker package and images
-# * downloads a base busybox image and a glance registry image if necessary
-# * install the images in Docker's image cache
-
-
-# Keep track of the current directory
-SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-FILES=$TOP_DIR/files
-
-# Get our defaults
-source $TOP_DIR/lib/nova_plugins/hypervisor-docker
-
-SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
-
-
-# Install Docker Service
-# ======================
-
-if is_fedora; then
-    install_package docker-io socat
-else
-    # Stop the auto-repo updates and do it when required here
-    NO_UPDATE_REPOS=True
-
-    # Set up home repo
-    curl https://get.docker.io/gpg | sudo apt-key add -
-    install_package python-software-properties && \
-        sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
-    apt_get update
-    install_package --force-yes lxc-docker socat
-fi
-
-# Start the daemon - restart just in case the package ever auto-starts...
-restart_service docker
-
-echo "Waiting for docker daemon to start..."
-DOCKER_GROUP=$(groups | cut -d' ' -f1)
-CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do
-    # Set the right group on docker unix socket before retrying
-    sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET
-    sudo chmod g+rw $DOCKER_UNIX_SOCKET
-    sleep 1
-done"
-if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then
-    die $LINENO "docker did not start"
-fi
-
-# Get guest container image
-docker pull $DOCKER_IMAGE
-docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME
-
-# Get docker-registry image
-docker pull $DOCKER_REGISTRY_IMAGE
-docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 0c65fd9..9651083 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -55,7 +55,13 @@
 # ================
 
 # Install package requirements
-install_package $(get_packages general $ENABLED_SERVICES)
+PACKAGES=$(get_packages general $ENABLED_SERVICES)
+if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
+    # ensure headers for the running kernel are installed for any DKMS builds
+    PACKAGES="$PACKAGES linux-headers-$(uname -r)"
+fi
+
+install_package $PACKAGES
 
 if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
     if is_ubuntu || is_fedora; then
diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes
new file mode 100755
index 0000000..dc5a19d
--- /dev/null
+++ b/tools/ironic/scripts/cleanup-nodes
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# **cleanup-nodes**
+
+# Cleans up baremetal poseur nodes and volumes created during ironic setup
+# Assumes calling user has proper libvirt group membership and access.
+
+set -exu
+
+LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+
+VM_COUNT=$1
+NETWORK_BRIDGE=$2
+
+for (( idx=0; idx<$VM_COUNT; idx++ )); do
+    NAME="baremetal${NETWORK_BRIDGE}_${idx}"
+    VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2"
+    virsh list | grep -q $NAME && virsh destroy $NAME
+    virsh list --inactive | grep -q $NAME && virsh undefine $NAME
+
+    if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then
+      virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
+          virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL
+    fi
+done
diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm
new file mode 100755
index 0000000..9936b76
--- /dev/null
+++ b/tools/ironic/scripts/configure-vm
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+import argparse
+import os.path
+
+import libvirt
+
+templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)),
+                           'templates')
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Configure a kvm virtual machine for the seed image.")
+    parser.add_argument('--name', default='seed',
+                        help='the name to give the machine in libvirt.')
+    parser.add_argument('--image',
+                        help='Use a custom image file (must be qcow2).')
+    parser.add_argument('--engine', default='qemu',
+                        help='The virtualization engine to use')
+    parser.add_argument('--arch', default='i686',
+                        help='The architecture to use')
+    parser.add_argument('--memory', default='2097152',
+                        help="Maximum memory for the VM in KB.")
+    parser.add_argument('--cpus', default='1',
+                        help="CPU count for the VM.")
+    parser.add_argument('--bootdev', default='hd',
+                        help="What boot device to use (hd/network).")
+    parser.add_argument('--network', default="brbm",
+                        help='The libvirt network name to use')
+    parser.add_argument('--libvirt-nic-driver', default='e1000',
+                        help='The libvirt network driver to use')
+    parser.add_argument('--emulator', default=None,
+                        help='Path to emulator bin for vm template')
+    args = parser.parse_args()
+    with file(templatedir + '/vm.xml', 'rb') as f:
+        source_template = f.read()
+    params = {
+        'name': args.name,
+        'imagefile': args.image,
+        'engine': args.engine,
+        'arch': args.arch,
+        'memory': args.memory,
+        'cpus': args.cpus,
+        'bootdev': args.bootdev,
+        'network': args.network,
+        'emulator': args.emulator,
+    }
+
+    if args.emulator:
+        params['emulator'] = args.emulator
+    else:
+        if os.path.exists("/usr/bin/kvm"):  # Debian
+            params['emulator'] = "/usr/bin/kvm"
+        elif os.path.exists("/usr/bin/qemu-kvm"):  # Redhat
+            params['emulator'] = "/usr/bin/qemu-kvm"
+
+    nicparams = {
+        'nicdriver': args.libvirt_nic_driver,
+        'network': args.network,
+    }
+
+    params['bm_network'] = """
+<!-- neutron friendly 'bare metal' network -->
+<interface type='network'>
+  <source network='%(network)s'/>
+  <virtualport type='openvswitch'/>
+  <model type='%(nicdriver)s'/>
+  <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+</interface>""" % nicparams
+
+    libvirt_template = source_template % params
+    conn = libvirt.open("qemu:///system")
+    a = conn.defineXML(libvirt_template)
+    print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes
new file mode 100755
index 0000000..3232b50
--- /dev/null
+++ b/tools/ironic/scripts/create-nodes
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# **create-nodes**
+
+# Creates baremetal poseur nodes for ironic testing purposes
+
+set -exu
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+
+CPU=$1
+MEM=$(( 1024 * $2 ))
+# extra G to allow fuzz for partition table : flavor size and registered size
+# need to be different to actual size.
+DISK=$(( $3 + 1))
+
+case $4 in
+    i386) ARCH='i686' ;;
+    amd64) ARCH='x86_64' ;;
+    *) echo "Unsupported arch $4!" ; exit 1 ;;
+esac
+
+TOTAL=$(($5 - 1))
+BRIDGE=$6
+EMULATOR=$7
+
+LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"}
+LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+
+if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
+    virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2
+    virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2
+    virsh pool-start $LIBVIRT_STORAGE_POOL >&2
+fi
+
+pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }')
+if [ "$pool_state" != "running" ] ; then
+  [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images
+  virsh pool-start $LIBVIRT_STORAGE_POOL >&2
+fi
+
+PREALLOC=
+if [ -f /etc/debian_version ]; then
+    PREALLOC="--prealloc-metadata"
+fi
+
+DOMS=""
+for idx in $(seq 0 $TOTAL) ; do
+    NAME="baremetal${BRIDGE}_${idx}"
+    DOMS="$DOMS $NAME"
+    VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2"
+    (virsh list --all | grep -q $NAME) && continue
+
+    virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
+        virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2
+    virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2
+    volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME)
+    # Pre-touch the VM to set +C, as it can only be set on empty files.
+    sudo touch "$volume_path"
+    sudo chattr +C "$volume_path" || true
+    $TOP_DIR/scripts/configure-vm --bootdev network --name $NAME --image "$volume_path" --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER --emulator $EMULATOR --network $BRIDGE >&2
+done
+
+for dom in $DOMS ; do
+    # echo mac
+    virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2
+done
diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network
new file mode 100755
index 0000000..8c3ea90
--- /dev/null
+++ b/tools/ironic/scripts/setup-network
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# **setup-network**
+
+# Setups openvswitch libvirt network suitable for
+# running baremetal poseur nodes for ironic testing purposes
+
+set -exu
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+BRIDGE_SUFFIX=${1:-''}
+BRIDGE_NAME=brbm$BRIDGE_SUFFIX
+
+# Only add bridge if missing
+(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
+
+# remove bridge before replacing it.
+(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME}
+(virsh net-list --inactive  | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME}
+
+virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml)
+virsh net-autostart ${BRIDGE_NAME}
+virsh net-start ${BRIDGE_NAME}
diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml
new file mode 100644
index 0000000..0769d3f
--- /dev/null
+++ b/tools/ironic/templates/brbm.xml
@@ -0,0 +1,6 @@
+<network>
+  <name>brbm</name>
+  <forward mode='bridge'/>
+  <bridge name='brbm'/>
+  <virtualport type='openvswitch'/>
+</network>
diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template
new file mode 100644
index 0000000..7b9b0f8
--- /dev/null
+++ b/tools/ironic/templates/tftpd-xinetd.template
@@ -0,0 +1,11 @@
+service tftp
+{
+  protocol        = udp
+  port            = 69
+  socket_type     = dgram
+  wait            = yes
+  user            = root
+  server          = /usr/sbin/in.tftpd
+  server_args     = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR%
+  disable         = no
+}
diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml
new file mode 100644
index 0000000..b18dec0
--- /dev/null
+++ b/tools/ironic/templates/vm.xml
@@ -0,0 +1,43 @@
+<domain type='%(engine)s'>
+  <name>%(name)s</name>
+  <memory unit='KiB'>%(memory)s</memory>
+  <vcpu>%(cpus)s</vcpu>
+  <os>
+    <type arch='%(arch)s' machine='pc-1.0'>hvm</type>
+    <boot dev='%(bootdev)s'/>
+    <bootmenu enable='no'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <clock offset='utc'/>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>%(emulator)s</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='writeback'/>
+      <source file='%(imagefile)s'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </disk>
+    <controller type='ide' index='0'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+    </controller>
+    %(network)s
+    %(bm_network)s
+    <input type='mouse' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes'/>
+    <video>
+      <model type='cirrus' vram='9216' heads='1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <memballoon model='virtio'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </memballoon>
+  </devices>
+</domain>
+
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index ea943e1..ee3790f 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -1,7 +1,20 @@
 #!/usr/bin/python
-import urllib
+
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
 import json
 import sys
+import urllib
 
 
 def print_usage():
@@ -42,4 +55,4 @@
                                        'logUrl': log_url,
                                        'healthReport': config['healthReport']})
 
-print json.dumps(results)
+print(json.dumps(results))
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
index 5b845d8..1d994a6 100644
--- a/tools/uec/meta.py
+++ b/tools/uec/meta.py
@@ -1,10 +1,23 @@
-import sys
-from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
-from SimpleHTTPServer import SimpleHTTPRequestHandler
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
 
-def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
-         ServerClass = HTTPServer, protocol="HTTP/1.0"):
-    """simple http server that listens on a give address:port"""
+import BaseHTTPServer
+import SimpleHTTPServer
+import sys
+
+
+def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
+         ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
+    """simple http server that listens on a give address:port."""
 
     server_address = (host, port)
 
@@ -12,7 +25,7 @@
     httpd = ServerClass(server_address, HandlerClass)
 
     sa = httpd.socket.getsockname()
-    print "Serving HTTP on", sa[0], "port", sa[1], "..."
+    print("Serving HTTP on", sa[0], "port", sa[1], "...")
     httpd.serve_forever()
 
 if __name__ == '__main__':
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 440774e..2b5e418 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -73,7 +73,7 @@
 # Install basics
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr
+apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
 pip install xenapi
 
 # Install XenServer guest utilities
diff --git a/unstack.sh b/unstack.sh
index 6351fe0..a5e7b87 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -127,11 +127,6 @@
     killall stud
 fi
 
-# baremetal might have created a fake environment
-if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-    cleanup_fake_baremetal_env
-fi
-
 SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
 
 # Get the iSCSI volumes