Merge "XenAPI:  Find location for XenAPI plugins"
diff --git a/clean.sh b/clean.sh
index 3707d84..f3b2a99 100755
--- a/clean.sh
+++ b/clean.sh
@@ -40,7 +40,6 @@
 
 source $TOP_DIR/lib/tls
 
-source $TOP_DIR/lib/oslo
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
@@ -84,7 +83,6 @@
 fi
 
 # Clean projects
-cleanup_oslo
 cleanup_cinder
 cleanup_glance
 cleanup_keystone
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index d2c636f..7726e7e 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -19,6 +19,8 @@
 #
 #     SCREEN_LOGDIR=/opt/stack/screen-logs
 
+set -o pipefail
+
 CERT_DIR=$(cd $(dirname "$0") && pwd)
 TOP_DIR=$(cd $CERT_DIR/..; pwd)
 
diff --git a/eucarc b/eucarc
index 3502351..343f4cc 100644
--- a/eucarc
+++ b/eucarc
@@ -22,7 +22,7 @@
 export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
 
 # Create EC2 credentials for the current user
-CREDS=$(keystone ec2-credentials-create)
+CREDS=$(openstack ec2 credentials create)
 export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
 export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
 
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index dff8e7a..d756685 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,6 +44,9 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
+# Ironic does not support boot from volume.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 3768b56..f9c4752 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -36,6 +36,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Import project functions
+source $TOP_DIR/lib/neutron
+
 # If nova api is not enabled we exit with exitcode 55 so that
 # the exercise is skipped
 is_service_enabled n-api || exit 55
@@ -82,7 +85,7 @@
 
 # Volumes
 # -------
-if is_service_enabled c-vol && ! is_service_enabled n-cell; then
+if is_service_enabled c-vol && ! is_service_enabled n-cell && [ "$VIRT_DRIVER" != "ironic" ]; then
     VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
     die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 1416d4d..7e90e5a 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -180,7 +180,7 @@
 fi
 
 # FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
+if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
     ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail
 fi
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0d556df..1dff6a4 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -41,6 +41,9 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
+# Ironic does not currently support volume attachment.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/files/apts/nova b/files/apts/nova
index ae925c3..69d0a35 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -12,7 +12,7 @@
 ebtables
 sqlite3
 sudo
-kvm # NOPRIME
+qemu-kvm # NOPRIME
 qemu # dist:wheezy,jessie NOPRIME
 libvirt-bin # NOPRIME
 libjs-jquery-tablesorter # Needed for coverage html reports
@@ -25,7 +25,6 @@
 python-mox
 python-paste
 python-migrate
-python-gflags
 python-greenlet
 python-libvirt # NOPRIME
 python-libxml2
@@ -34,7 +33,6 @@
 python-pastedeploy
 python-eventlet
 python-cheetah
-python-carrot
 python-tempita
 python-sqlalchemy
 python-suds
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index e9ccf59..462513d 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -3,7 +3,7 @@
 ebtables
 iptables
 iputils
-mysql-community-server # NOPRIME
+mariadb # NOPRIME
 python-boto
 python-eventlet
 python-greenlet
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index ee4917d..c5a58b9 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -12,8 +12,7 @@
 qemu # NOPRIME
 libvirt # NOPRIME
 libvirt-python # NOPRIME
-libxml2-python
-mysql-community-server # NOPRIME
+mariadb # NOPRIME
 parted
 polkit
 python-M2Crypto
@@ -24,20 +23,19 @@
 python-SQLAlchemy
 python-Tempita
 python-boto
-python-carrot
 python-cheetah
 python-eventlet
 python-feedparser
 python-greenlet
 python-iso8601
 python-kombu
+python-libxml2
 python-lockfile
 python-lxml # needed for glance which is needed for nova --- this shouldn't be here
 python-mox
 python-mysql
 python-numpy # needed by websockify for spice console
 python-paramiko
-python-python-gflags
 python-sqlalchemy-migrate
 python-suds
 python-xattr # needed for glance which is needed for nova --- this shouldn't be here
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 06ea0ea..9fafecb 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -21,4 +21,3 @@
 qpid-cpp-server        # NOPRIME
 sqlite
 sudo
-vconfig
diff --git a/files/rpms/nova b/files/rpms/nova
index 45d6e0b..e05d0d7 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -17,11 +17,9 @@
 parted
 polkit
 python-boto
-python-carrot
 python-cheetah
 python-eventlet
 python-feedparser
-python-gflags
 python-greenlet
 python-iso8601
 python-kombu
@@ -42,4 +40,3 @@
 qpid-cpp-server # NOPRIME
 sqlite
 sudo
-vconfig
diff --git a/functions b/functions
index 17c6e77..80f98ad 100644
--- a/functions
+++ b/functions
@@ -199,7 +199,21 @@
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
         IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}"
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=ovf --disk-format=vhd < "${IMAGE}"
+        FORCE_VM_MODE=""
+        if [[ "$IMAGE_NAME" =~ 'cirros' ]]; then
+            # Cirros VHD image currently only boots in PV mode.
+            # Nova defaults to PV for all VHD images, but
+            # the glance setting is needed for booting
+            # directly from volume.
+            FORCE_VM_MODE="--property vm_mode=xen"
+        fi
+        glance \
+            --os-auth-token $token \
+            --os-image-url http://$GLANCE_HOSTPORT \
+            image-create \
+            --name "$IMAGE_NAME" --is-public=True \
+            --container-format=ovf --disk-format=vhd \
+            $FORCE_VM_MODE < "${IMAGE}"
         return
     fi
 
diff --git a/functions-common b/functions-common
index c6fd5c7..6340c5c 100644
--- a/functions-common
+++ b/functions-common
@@ -824,6 +824,10 @@
             if [[ ! $file_to_parse =~ neutron ]]; then
                 file_to_parse="${file_to_parse} neutron"
             fi
+        elif [[ $service == ir-* ]]; then
+            if [[ ! $file_to_parse =~ ironic ]]; then
+                file_to_parse="${file_to_parse} ironic"
+            fi
         fi
     done
 
@@ -1235,6 +1239,19 @@
         && $SUDO_PIP rm -rf ${pip_build_tmp}
 }
 
+# this should be used if you want to install globally, all libraries should
+# use this, especially *oslo* ones
+function setup_install {
+    local project_dir=$1
+    setup_package_with_req_sync $project_dir
+}
+
+# this should be used for projects which run services, like all services
+function setup_develop {
+    local project_dir=$1
+    setup_package_with_req_sync $project_dir -e
+}
+
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
 #
@@ -1243,8 +1260,9 @@
 #
 # Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
 # setup_develop directory
-function setup_develop {
+function setup_package_with_req_sync {
     local project_dir=$1
+    local flags=$2
 
     # Don't update repo if local changes exist
     # Don't use buggy "git diff --quiet"
@@ -1256,7 +1274,7 @@
             $SUDO_CMD python update.py $project_dir)
     fi
 
-    setup_develop_no_requirements_update $project_dir
+    setup_package $project_dir $flags
 
     # We've just gone and possibly modified the user's source tree in an
     # automated way, which is considered bad form if it's a development
@@ -1277,12 +1295,15 @@
 # using pip before running `setup.py develop`
 # Uses globals ``STACK_USER``
 # setup_develop_no_requirements_update directory
-function setup_develop_no_requirements_update {
+function setup_package {
     local project_dir=$1
+    local flags=$2
 
-    pip_install -e $project_dir
+    pip_install $flags $project_dir
     # ensure that further actions can do things like setup.py sdist
-    safe_chown -R $STACK_USER $1/*.egg-info
+    if [[ "$flags" == "-e" ]]; then
+        safe_chown -R $STACK_USER $1/*.egg-info
+    fi
 }
 
 
diff --git a/lib/baremetal b/lib/baremetal
index eda92f9..adcbe4c 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -129,7 +129,7 @@
 
 # Below this, we set some path and filenames.
 # Defaults are probably sufficient.
-BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
+DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
 
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
@@ -165,7 +165,7 @@
 # Install diskimage-builder and shell-in-a-box
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
-    git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+    git_clone $DIB_REPO $DIB_DIR $DIB_BUILD_BRANCH
 
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
@@ -223,7 +223,7 @@
         BM_DEPLOY_KERNEL=bm-deploy.kernel
         BM_DEPLOY_RAMDISK=bm-deploy.initramfs
         if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then
-            $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \
+            $DIB_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \
                 -o $TOP_DIR/files/bm-deploy
         fi
     fi
@@ -271,7 +271,7 @@
     image_name=$(basename "$file" ".qcow2")
 
     # this call returns the file names as "$kernel,$ramdisk"
-    out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
+    out=$($DIB_DIR/bin/disk-image-get-kernel \
             -x -d $TOP_DIR/files -o bm-deploy -i $file)
     if [ $? -ne 0 ]; then
         die $LINENO "Failed to get kernel and ramdisk from $file"
diff --git a/lib/heat b/lib/heat
index 902333e..f66f0a8 100644
--- a/lib/heat
+++ b/lib/heat
@@ -37,6 +37,10 @@
 HEAT_CONF=$HEAT_CONF_DIR/heat.conf
 HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
 HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
+HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+
+# other default options
+HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
 
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,heat
@@ -247,37 +251,42 @@
     # heat_stack_user role is for users created by Heat
     openstack role create heat_stack_user
 
-    # heat_stack_owner role is given to users who create Heat stacks,
-    # it's the default role used by heat to delegate to the heat service
-    # user (for performing deferred operations via trusts), see heat.conf
-    HEAT_OWNER_ROLE=$(openstack role create \
-        heat_stack_owner \
-        | grep " id " | get_field 2)
+    if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then
+        # heat_stack_owner role is given to users who create Heat stacks,
+        # it's the default role used by heat to delegate to the heat service
+        # user (for performing deferred operations via trusts), see heat.conf
+        HEAT_OWNER_ROLE=$(openstack role create \
+            heat_stack_owner \
+            | grep " id " | get_field 2)
 
-    # Give the role to the demo and admin users so they can create stacks
-    # in either of the projects created by devstack
-    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
-    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
-    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+        # Give the role to the demo and admin users so they can create stacks
+        # in either of the projects created by devstack
+        openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+        openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+        openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+        iniset $HEAT_CONF DEFAULT deferred_auth_method trusts
+    fi
 
-    # Note we have to pass token/endpoint here because the current endpoint and
-    # version negotiation in OSC means just --os-identity-api-version=3 won't work
-    KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
-    D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 domain create heat \
-        --description "Owns users and projects created by heat" \
-        | grep ' id ' | get_field 2)
-    iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
+    if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
+        # Note we have to pass token/endpoint here because the current endpoint and
+        # version negotiation in OSC means just --os-identity-api-version=3 won't work
+        KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
+        D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 domain create heat \
+            --description "Owns users and projects created by heat" \
+            | grep ' id ' | get_field 2)
+        iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
 
-    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
-        --domain $D_ID heat_domain_admin \
-        --description "Manages users and projects created by heat"
-    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 role add \
-        --user heat_domain_admin --domain ${D_ID} admin
-    iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
-    iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
+        openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
+            --domain $D_ID heat_domain_admin \
+            --description "Manages users and projects created by heat"
+        openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 role add \
+            --user heat_domain_admin --domain ${D_ID} admin
+        iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+        iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
+    fi
 }
 
 # Restore xtrace
diff --git a/lib/infra b/lib/infra
index 7f70ff2..e2f7dad 100644
--- a/lib/infra
+++ b/lib/infra
@@ -46,7 +46,7 @@
 
     # Install pbr
     git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
-    setup_develop $PBR_DIR
+    setup_install $PBR_DIR
 }
 
 # Restore xtrace
diff --git a/lib/ironic b/lib/ironic
index c6fa563..979420f 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -52,7 +52,11 @@
 IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP}
 IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
 IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
-IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-256}
+# NOTE(agordeev): both ubuntu and fedora deploy images won't work with 256MB of RAM.
+#                 System halts and throws kernel panic during initramfs unpacking.
+#                 Ubuntu needs at least 384MB, but fedora requires 448.
+#                 So placing 512 here to satisfy both.
+IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-512}
 IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
 IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64}
 IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm}
@@ -60,6 +64,19 @@
 IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv}
 IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys}
 
+DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
+
+# Use DIB to create deploy ramdisk and kernel.
+IRONIC_BUILD_DEPLOY_RAMDISK=`trueorfalse True $IRONIC_BUILD_DEPLOY_RAMDISK`
+# If not use DIB, these files are used as deploy ramdisk/kernel.
+# (The value must be a absolute path)
+IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-}
+IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
+IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
+
+#TODO(agordeev): replace 'ubuntu' with host distro name getting
+IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
+
 # Support entry points installation of console scripts
 IRONIC_BIN_DIR=$(get_python_exec_prefix)
 
@@ -154,8 +171,8 @@
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
     iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
-    iniset $IRONIC_CONF_FILE conductor api_url http://$SERVICE_HOST:6385
-    iniset $IRONIC_CONF_FILE pxe tftp_server $SERVICE_HOST
+    iniset $IRONIC_CONF_FILE conductor api_url http://$HOST_IP:6385
+    iniset $IRONIC_CONF_FILE pxe tftp_server $HOST_IP
     iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
     iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
 }
@@ -284,20 +301,12 @@
     mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
 }
 
-function ironic_ensure_libvirt_group {
-    groups $STACK_USER | grep -q $LIBVIRT_GROUP || adduser $STACK_USER $LIBVIRT_GROUP
-}
-
 function create_bridge_and_vms {
-    ironic_ensure_libvirt_group
-
     # Call libvirt setup scripts in a new shell to ensure any new group membership
     sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
-
     sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \
         $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
         amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE
-
 }
 
 function enroll_vms {
@@ -331,7 +340,7 @@
 
     # create the nova flavor
     nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU
-    nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$BM_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$BM_DEPLOY_RAMDISK_ID"
+    nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$IRONIC_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$IRONIC_DEPLOY_RAMDISK_ID"
 
     # intentional sleep to make sure the tag has been set to port
     sleep 10
@@ -430,10 +439,55 @@
     configure_ironic_sshd
 }
 
+# build deploy kernel+ramdisk, then upload them to glance
+# this function sets IRONIC_DEPLOY_KERNEL_ID and IRONIC_DEPLOY_RAMDISK_ID
+function upload_baremetal_ironic_deploy {
+    token=$1
+
+    if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then
+        IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
+        IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
+    else
+        IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
+        IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
+    fi
+
+    if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then
+        # files don't exist, need to build them
+        if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
+            # we can build them only if we're not offline
+            if [ "$OFFLINE" != "True" ]; then
+                $DIB_DIR/bin/ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
+                    -o $TOP_DIR/files/ir-deploy
+            else
+                die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
+            fi
+        else
+            die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
+        fi
+    fi
+
+    # load them into glance
+    IRONIC_DEPLOY_KERNEL_ID=$(glance \
+        --os-auth-token $token \
+        --os-image-url http://$GLANCE_HOSTPORT \
+        image-create \
+        --name $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
+        --is-public True --disk-format=aki \
+        < $IRONIC_DEPLOY_KERNEL_PATH  | grep ' id ' | get_field 2)
+    IRONIC_DEPLOY_RAMDISK_ID=$(glance \
+        --os-auth-token $token \
+        --os-image-url http://$GLANCE_HOSTPORT \
+        image-create \
+        --name $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
+        --is-public True --disk-format=ari \
+        < $IRONIC_DEPLOY_RAMDISK_PATH  | grep ' id ' | get_field 2)
+}
+
 function prepare_baremetal_basic_ops {
 
     # install diskimage-builder
-    git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+    git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
 
     # make sure all needed service were enabled
     for srv in nova glance key neutron; do
@@ -461,7 +515,7 @@
     echo_summary "Creating and uploading baremetal images for ironic"
 
     # build and upload separate deploy kernel & ramdisk
-    upload_baremetal_deploy $TOKEN
+    upload_baremetal_ironic_deploy $TOKEN
 
     create_bridge_and_vms
     enroll_vms
diff --git a/lib/neutron b/lib/neutron
index 84e8277..294ffac 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -762,7 +762,7 @@
     done
 
     # Configuration for neutron notifations to nova.
-    iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
     iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
     iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 4cb0da8..efdd9ef 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -2,7 +2,7 @@
 # ------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+BS_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -38,7 +38,12 @@
 }
 
 function neutron_plugin_configure_plugin_agent {
-    :
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE
+    AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py"
+
+    _neutron_ovs_base_configure_firewall_driver
 }
 
 function neutron_plugin_configure_service {
@@ -61,7 +66,7 @@
 
 function has_neutron_plugin_security_group {
     # 1 means False here
-    return 1
+    return 0
 }
 
 function neutron_plugin_check_adv_test_requirements {
@@ -69,4 +74,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$BS_XTRACE
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index a1b089e..7f7c049 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+CISCO_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Scecify the VSM parameters
@@ -324,4 +324,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$CISCO_XTRACE
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 62f9737..cce108a 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+EMBR_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch
@@ -37,4 +37,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$EMBR_XTRACE
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
index 22c8578..3aef9d0 100644
--- a/lib/neutron_plugins/ibm
+++ b/lib/neutron_plugins/ibm
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+IBM_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -130,4 +130,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$IBM_XTRACE
diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge
index 362fd5b..96b14f1 100644
--- a/lib/neutron_plugins/linuxbridge
+++ b/lib/neutron_plugins/linuxbridge
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+LBRIDGE_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent
@@ -53,4 +53,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$LBRIDGE_XTRACE
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 742e3b2..c5373d6 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -6,7 +6,7 @@
 MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+MN_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function is_neutron_ovs_base_plugin {
@@ -84,4 +84,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$MN_XTRACE
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index e985dcb..db43fcf 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -2,7 +2,7 @@
 # ------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+ML2_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Enable this to simply and quickly enable tunneling with ML2.
@@ -119,4 +119,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$ML2_XTRACE
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index 6d4bfca..d76f7d4 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NEC_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Configuration parameters
@@ -127,4 +127,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NEC_XTRACE
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 3649f39..86f09d2 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -2,7 +2,7 @@
 # ----------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NU_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function neutron_plugin_create_nova_conf {
@@ -66,4 +66,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NU_XTRACE
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
index 724df41..6610ea3 100644
--- a/lib/neutron_plugins/ofagent_agent
+++ b/lib/neutron_plugins/ofagent_agent
@@ -2,7 +2,7 @@
 # ----------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OFA_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -91,4 +91,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OFA_XTRACE
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
index 0aebff6..06f1eee 100644
--- a/lib/neutron_plugins/oneconvergence
+++ b/lib/neutron_plugins/oneconvergence
@@ -1,7 +1,7 @@
 # Neutron One Convergence plugin
 # ---------------------------
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OC_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -73,4 +73,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OC_XTRACE
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index bdbc5a9..c644fed 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OVS_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch_agent
@@ -57,4 +57,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OVS_XTRACE
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 3a2bdc3..33ca17a 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -2,7 +2,7 @@
 # -----------------------------
 
 # Save trace setting
-PLUGIN_XTRACE=$(set +o | grep xtrace)
+OVSA_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -128,4 +128,4 @@
 }
 
 # Restore xtrace
-$PLUGIN_XTRACE
+$OVSA_XTRACE
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 0a2ba58..ae7f815 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -2,7 +2,7 @@
 # -------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OVSB_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
@@ -77,4 +77,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OVSB_XTRACE
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 19f94cb..67080f4 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -3,7 +3,7 @@
 # ------------------------------------
 
 # Save trace settings
-MY_XTRACE=$(set +o | grep xtrace)
+PG_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function neutron_plugin_create_nova_conf {
@@ -52,4 +52,4 @@
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 # Restore xtrace
-$MY_XTRACE
+$PG_XTRACE
diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu
index 9ae36d3..ceb89fa 100644
--- a/lib/neutron_plugins/ryu
+++ b/lib/neutron_plugins/ryu
@@ -2,7 +2,7 @@
 # ------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+RYU_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -77,4 +77,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$RYU_XTRACE
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index ab6c324..b5253db 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+FW_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin
@@ -24,4 +24,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$FW_XTRACE
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 531f52f..78e7738 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+LB_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -48,4 +48,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$LB_XTRACE
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 0e5f75b..51123e2 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+METER_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -27,4 +27,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$METER_XTRACE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index e56d361..d920ba6 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+VPN_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -30,4 +30,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$VPN_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index fe79354..f2f8735 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -2,7 +2,7 @@
 # -------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NSX_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -146,4 +146,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NSX_XTRACE
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index f03de56..033731e 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -2,7 +2,7 @@
 # ------------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+BS3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
@@ -49,4 +49,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$BS3_XTRACE
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index ad417bb..099a66e 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -17,7 +17,7 @@
 MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+MN3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function configure_midonet {
@@ -46,4 +46,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$MN3_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index b2c1b61..bbe227e 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -2,7 +2,7 @@
 # -----------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+RYU3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -75,4 +75,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$RYU3_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index d465ac7..f829aa8 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -13,7 +13,7 @@
 TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+TREMA3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 TREMA_DIR=${TREMA_DIR:-$DEST/trema}
@@ -114,4 +114,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$TREMA3_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 3fecc62..7a76570 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -11,7 +11,7 @@
 # * NSX_GATEWAY_NETWORK_CIDR         --> CIDR to configure br-ex, e.g. 172.24.4.211/24
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NSX3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # This is the interface that connects the Devstack instance
@@ -83,4 +83,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NSX3_XTRACE
diff --git a/lib/nova b/lib/nova
index 8240813..5cc94ec 100644
--- a/lib/nova
+++ b/lib/nova
@@ -139,7 +139,7 @@
 # Test if any Nova Cell services are enabled
 # is_nova_enabled
 function is_n-cell_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0
+    [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
     return 1
 }
 
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index adffe01..1f2b239 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -18,7 +18,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        install_package kvm
+        install_package qemu-kvm
         install_package libvirt-bin
         install_package python-libvirt
         install_package python-guestfs
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index 5af7c0b..e72f7c1 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -18,6 +18,7 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+source $TOP_DIR/lib/nova_plugins/functions-libvirt
 
 # Defaults
 # --------
@@ -33,8 +34,12 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor {
-    iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm`
+    configure_libvirt
     LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+
+    # NOTE(adam_g): The ironic compute driver currently lives in the ironic
+    # tree.  We purposely configure Nova to load it from there until it moves
+    # back into Nova proper.
     iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver
     iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
     iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager
@@ -45,13 +50,13 @@
     iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
     iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
     iniset $NOVA_CONF ironic admin_tenant_name demo
-    iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6358/v1
+    iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6385/v1
+    iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm`
 }
 
 # install_nova_hypervisor() - Install external components
 function install_nova_hypervisor {
-    # This function intentionally left blank
-    :
+    install_libvirt
 }
 
 # start_nova_hypervisor - Start any required external services
diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere
index b04aeda..9933a3c 100644
--- a/lib/nova_plugins/hypervisor-vsphere
+++ b/lib/nova_plugins/hypervisor-vsphere
@@ -39,7 +39,7 @@
     iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP"
     iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER"
     iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD"
-    iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER"
+    iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER"
     if is_service_enabled neutron; then
         iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE
     fi
diff --git a/lib/opendaylight b/lib/opendaylight
index ca81c20..1022e2c 100644
--- a/lib/opendaylight
+++ b/lib/opendaylight
@@ -134,7 +134,7 @@
     # The flags to ODL have the following meaning:
     #   -of13: runs ODL using OpenFlow 1.3 protocol support.
     #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
-    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
 
     # Sleep a bit to let OpenDaylight finish starting up
     sleep $ODL_BOOT_WAIT
diff --git a/lib/oslo b/lib/oslo
index 8ef179c..2e1f6bf 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -34,44 +34,29 @@
 
 # install_oslo() - Collect source and prepare
 function install_oslo {
-    # TODO(sdague): remove this once we get to Icehouse, this just makes
-    # for a smoother transition of existing users.
-    cleanup_oslo
-
     git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
-    setup_develop $CLIFF_DIR
+    setup_install $CLIFF_DIR
 
     git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
-    setup_develop $OSLOCFG_DIR
+    setup_install $OSLOCFG_DIR
 
     git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
-    setup_develop $OSLOMSG_DIR
+    setup_install $OSLOMSG_DIR
 
     git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
-    setup_develop $OSLORWRAP_DIR
+    setup_install $OSLORWRAP_DIR
 
     git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH
-    setup_develop $OSLOVMWARE_DIR
+    setup_install $OSLOVMWARE_DIR
 
     git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
-    setup_develop $PYCADF_DIR
+    setup_install $PYCADF_DIR
 
     git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH
-    setup_develop $STEVEDORE_DIR
+    setup_install $STEVEDORE_DIR
 
     git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH
-    setup_develop $TASKFLOW_DIR
-}
-
-# cleanup_oslo() - purge possibly old versions of oslo
-function cleanup_oslo {
-    # this means we've got an old oslo installed, lets get rid of it
-    if ! python -c 'import oslo.config' 2>/dev/null; then
-        echo "Found old oslo.config... removing to ensure consistency"
-        local PIP_CMD=$(get_pip_command)
-        pip_install oslo.config
-        sudo $PIP_CMD uninstall -y oslo.config
-    fi
+    setup_install $TASKFLOW_DIR
 }
 
 # Restore xtrace
diff --git a/lib/sahara b/lib/sahara
index 71bd5b0..1ff0cf9 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -28,7 +28,6 @@
 SAHARA_DIR=$DEST/sahara
 SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
 SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
-SAHARA_DEBUG=${SAHARA_DEBUG:-True}
 
 SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
 SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
@@ -102,8 +101,7 @@
     sudo chown $STACK_USER $SAHARA_CONF_DIR
 
     # Copy over sahara configuration file and configure common parameters.
-    # TODO(slukjanov): rename when sahara internals will be updated
-    cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE
+    cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE
 
     # Create auth cache dir
     sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
@@ -126,7 +124,8 @@
     iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
     iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
 
-    iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG
+    iniset $SAHARA_CONF_FILE DEFAULT verbose True
+    iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
     iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
 
diff --git a/lib/stackforge b/lib/stackforge
index dca08cc..e6528af 100644
--- a/lib/stackforge
+++ b/lib/stackforge
@@ -40,10 +40,10 @@
     cleanup_stackforge
 
     git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH
-    setup_develop_no_requirements_update $WSME_DIR
+    setup_package $WSME_DIR
 
     git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH
-    setup_develop_no_requirements_update $PECAN_DIR
+    setup_package $PECAN_DIR
 }
 
 # cleanup_stackforge() - purge possibly old versions of stackforge libraries
diff --git a/lib/swift b/lib/swift
index b655440..3e183ff 100644
--- a/lib/swift
+++ b/lib/swift
@@ -334,11 +334,12 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
 
-    # Configure Ceilometer
-    if is_service_enabled ceilometer; then
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
-        SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
-    fi
+    # Skipped due to bug 1294789
+    ## Configure Ceilometer
+    #if is_service_enabled ceilometer; then
+    #    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
+    #    SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
+    #fi
 
     # Restrict the length of auth tokens in the swift proxy-server logs.
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
@@ -454,6 +455,9 @@
     sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
     sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
         tee /etc/rsyslog.d/10-swift.conf
+    # restart syslog to take the changes
+    sudo killall -HUP rsyslogd
+
     if is_apache_enabled_service swift; then
         _config_swift_apache_wsgi
     fi
@@ -485,7 +489,7 @@
     truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
 
     # Make a fresh XFS filesystem
-    mkfs.xfs -f -i size=1024  ${SWIFT_DISK_IMAGE}
+    /sbin/mkfs.xfs -f -i size=1024  ${SWIFT_DISK_IMAGE}
 
     # Mount the disk with mount options to make it as efficient as possible
     mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
@@ -627,8 +631,6 @@
 
 # start_swift() - Start running processes, including screen
 function start_swift {
-    # (re)start rsyslog
-    restart_service rsyslog
     # (re)start memcached to make sure we have a clean memcache.
     restart_service memcached
 
diff --git a/lib/tempest b/lib/tempest
index af5493e..a4558ce 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -277,7 +277,6 @@
     fi
 
     # Compute
-    iniset $TEMPEST_CONFIG compute change_password_available False
     iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME
@@ -289,10 +288,14 @@
     iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
-    iniset $TEMPEST_CONFIG compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False}
-    iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
+    # Compute Features
+    iniset $TEMPEST_CONFIG compute-feature-enabled resize True
+    iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
+    iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
+    iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG "compute-admin" password "$password"
diff --git a/lib/tls b/lib/tls
index 072059d..88e5f60 100644
--- a/lib/tls
+++ b/lib/tls
@@ -348,7 +348,7 @@
     local key=${!key_var}
     local ca=${!ca_var}
 
-    if [[ !($cert && $key && $ca) ]]; then
+    if [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
         die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
                     "variable to enable SSL for ${service}"
     fi
diff --git a/lib/trove b/lib/trove
index 75b990f..42d2219 100644
--- a/lib/trove
+++ b/lib/trove
@@ -147,6 +147,9 @@
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove`
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_
 
     iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD
     sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
@@ -164,6 +167,9 @@
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
         setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
     fi
diff --git a/run_tests.sh b/run_tests.sh
index 685b203..b1aef4f 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -15,6 +15,23 @@
 #
 # this runs a series of unit tests for devstack to ensure it's functioning
 
+PASSES=""
+FAILURES=""
+
+# Check the return code and add the test to PASSES or FAILURES as appropriate
+# pass_fail <result> <expected> <name>
+function pass_fail {
+    local result=$1
+    local expected=$2
+    local test_name=$3
+
+    if [[ $result -ne $expected ]]; then
+        FAILURES="$FAILURES $test_name"
+    else
+        PASSES="$PASSES $test_name"
+    fi
+}
+
 if [[ -n $@ ]]; then
     FILES=$@
 else
@@ -27,6 +44,7 @@
 echo "Running bash8..."
 
 ./tools/bash8.py -v $FILES
+pass_fail $? 0 bash8
 
 
 # Test that no one is trying to land crazy refs as branches
@@ -35,8 +53,21 @@
 
 REFS=`grep BRANCH stackrc | grep -v -- '-master'`
 rc=$?
+pass_fail $rc 1 crazy-refs
 if [[ $rc -eq 0 ]]; then
     echo "Branch defaults must be master. Found:"
     echo $REFS
+fi
+
+echo "====================================================================="
+for script in $PASSES; do
+    echo PASS $script
+done
+for script in $FAILURES; do
+    echo FAILED $script
+done
+echo "====================================================================="
+
+if [[ -n "$FAILURES" ]]; then
     exit 1
 fi
diff --git a/stack.sh b/stack.sh
index a67f688..91f188f 100755
--- a/stack.sh
+++ b/stack.sh
@@ -195,6 +195,7 @@
 # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will
 # see them by forcing PATH
 echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE
+echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE
 chmod 0440 $TEMPFILE
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
@@ -594,7 +595,9 @@
 function exit_trap {
     local r=$?
     jobs=$(jobs -p)
-    if [[ -n $jobs ]]; then
+    # Only do the kill when we're logging through a process substitution,
+    # which currently is only to verbose logfile
+    if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then
         echo "exit_trap: cleaning up child processes"
         kill 2>&1 $jobs
     fi
@@ -881,7 +884,7 @@
 # -------
 
 # A better kind of sysstat, with the top process per time slice
-DSTAT_OPTS="-tcndylp --top-cpu-adv"
+DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
 if [[ -n ${SCREEN_LOGDIR} ]]; then
     screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
 else
diff --git a/stackrc b/stackrc
index 4a997bf..4418be1 100644
--- a/stackrc
+++ b/stackrc
@@ -213,7 +213,7 @@
 # storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
-SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
 SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
 
 # python swift client library
@@ -226,8 +226,8 @@
 
 
 # diskimage-builder
-BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
+DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
+DIB_BRANCH=${DIB_BRANCH:-master}
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 9fa161e..1eb9e7a 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -71,12 +71,13 @@
 }
 
 function install_pip_tarball {
-    (cd $FILES; \
-        curl -O $PIP_TAR_URL; \
-        tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
-        cd pip-$INSTALL_PIP_VERSION; \
-        sudo -E python setup.py install 1>/dev/null; \
-    )
+    if [[ ! -r $FILES/pip-$INSTALL_PIP_VERSION.tar.gz ]]; then
+        (cd $FILES; \
+            curl -O $PIP_TAR_URL; \
+            tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null)
+    fi
+    (cd $FILES/pip-$INSTALL_PIP_VERSION; \
+        sudo -E python setup.py install 1>/dev/null)
 }
 
 # Show starting versions
diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes
index dc5a19d..adeca5c 100755
--- a/tools/ironic/scripts/cleanup-nodes
+++ b/tools/ironic/scripts/cleanup-nodes
@@ -8,10 +8,13 @@
 set -exu
 
 LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
 
 VM_COUNT=$1
 NETWORK_BRIDGE=$2
 
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
+
 for (( idx=0; idx<$VM_COUNT; idx++ )); do
     NAME="baremetal${NETWORK_BRIDGE}_${idx}"
     VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2"
diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes
index 3232b50..d81113a 100755
--- a/tools/ironic/scripts/create-nodes
+++ b/tools/ironic/scripts/create-nodes
@@ -27,6 +27,9 @@
 
 LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"}
 LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
 
 if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
     virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2
diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network
index 8c3ea90..e326bf8 100755
--- a/tools/ironic/scripts/setup-network
+++ b/tools/ironic/scripts/setup-network
@@ -7,11 +7,15 @@
 
 set -exu
 
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
 BRIDGE_SUFFIX=${1:-''}
 BRIDGE_NAME=brbm$BRIDGE_SUFFIX
 
+export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI"
+
 # Only add bridge if missing
 (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
 
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index ee3790f..8be500b 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -18,8 +18,8 @@
 
 
 def print_usage():
-    print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
-           % sys.argv[0])
+    print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
+          % sys.argv[0])
     sys.exit()