Merge "Add n-obj to stop_nova"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8dc44ef..8b7b961 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -178,6 +178,10 @@
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
     die $LINENO "Failure deleting security group rule from $SECGROUP"
 
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+    die $LINENO "Security group rule not deleted from $SECGROUP"
+fi
+
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
new file mode 100644
index 0000000..57b4328
--- /dev/null
+++ b/extras.d/80-opendaylight.sh
@@ -0,0 +1,69 @@
+# opendaylight.sh - DevStack extras script
+
+if is_service_enabled odl-server odl-compute; then
+    # Initial source
+    [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
+fi
+
+if is_service_enabled odl-server; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "source" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight-compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        create_nova_conf_neutron
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing OpenDaylight"
+        ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+        ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+        read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+        sudo ovs-vsctl set-manager tcp:$ODL_MGR_IP:$ODL_MGR_PORT
+        sudo ovs-vsctl set Open_vSwitch $ovstbl other_config={"local_ip"="$ODL_LOCAL_IP"}
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        sudo ovs-vsctl del-manager
+        BRIDGES=$(sudo ovs-vsctl list-br)
+        for bridge in $BRIDGES ; do
+            sudo ovs-vsctl del-controller $bridge
+        done
+
+        stop_opendaylight-compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/files/apts/general b/files/apts/general
index 32d31f0..995c0c6 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,8 +9,6 @@
 lsof # useful when debugging
 openssh-server
 openssl
-vim-nox
-locate # useful when debugging
 python-virtualenv
 python-unittest2
 iputils-ping
diff --git a/files/apts/opendaylight b/files/apts/opendaylight
new file mode 100644
index 0000000..ec3cc9d
--- /dev/null
+++ b/files/apts/opendaylight
@@ -0,0 +1,2 @@
+openvswitch-datapath-dkms # NOPRIME
+openvswitch-switch # NOPRIME
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 9a34c76..fc1e813 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -53,41 +53,6 @@
         --role ResellerAdmin
 fi
 
-# Heat
-if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
-    keystone user-create --name=heat \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=heat@example.com
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user heat \
-        --role service
-    # heat_stack_user role is for users created by Heat
-    keystone role-create --name heat_stack_user
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=heat-cfn \
-            --type=cloudformation \
-            --description="Heat CloudFormation Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat-cfn \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
-        keystone service-create \
-            --name=heat \
-            --type=orchestration \
-            --description="Heat Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service heat \
-            --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
-    fi
-fi
-
 # Glance
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
     keystone user-create \
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
new file mode 100644
index 0000000..61f73ee
--- /dev/null
+++ b/files/rpms-suse/baremetal
@@ -0,0 +1 @@
+dnsmasq
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 704947e..ff27a3a 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,23 +1,22 @@
+bc
 bridge-utils
 ca-certificates-mozilla
 curl
 euca2ools
 git-core
 iputils
+libopenssl-devel # to rebuild pyOpenSSL if needed
+lsof # useful when debugging
+make
 openssh
 openssl
 psmisc
-python-setuptools # instead of python-distribute; dist:sle11sp2
 python-cmd2 # dist:opensuse-12.3
 python-pylint
+python-setuptools # instead of python-distribute; dist:sle11sp2
 python-unittest2
 screen
 tar
 tcpdump
 unzip
-vim-enhanced
 wget
-bc
-
-findutils-locate # useful when debugging
-lsof # useful when debugging
diff --git a/files/rpms-suse/opendaylight b/files/rpms-suse/opendaylight
new file mode 100644
index 0000000..d6c7146
--- /dev/null
+++ b/files/rpms-suse/opendaylight
@@ -0,0 +1,4 @@
+openvswitch # NOPRIME
+openvswitch-controller # NOPRIME
+openvswitch-switch # NOPRIME
+
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
 python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/glance b/files/rpms/glance
index 25c5d39..534097a 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -13,6 +13,6 @@
 python-paste-deploy #dist:f18,f19,f20,rhel7
 python-routes
 python-sqlalchemy
-python-wsgiref
+python-wsgiref      #dist:f18,f19,f20
 pyxattr
 zlib-devel          # testonly
diff --git a/files/rpms/opendaylight b/files/rpms/opendaylight
new file mode 100644
index 0000000..98aaaf4
--- /dev/null
+++ b/files/rpms/opendaylight
@@ -0,0 +1 @@
+openvswitch # NOPRIME
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/functions b/functions
index ab8319b..1d30922 100644
--- a/functions
+++ b/functions
@@ -163,38 +163,37 @@
             if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
                             `" Filename provided: ${IMAGE_NAME}"
-            else
-                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-                flat_path="${image_url:0:$path_len}"
-                descriptor_url=$flat_path$descriptor_fname
-                warn $LINENO "$descriptor_data_pair_msg"`
-                                `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
-                if [[ $flat_path != file* ]]; then
-                    if [[ ! -f $FILES/$descriptor_fname || \
-                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
-                        wget -c $descriptor_url -O $FILES/$descriptor_fname
-                        if [[ $? -ne 0 ]]; then
-                            warn $LINENO "Descriptor not found $descriptor_url"
-                            descriptor_found=false
-                        fi
-                    fi
-                    descriptor_url="$FILES/$descriptor_fname"
-                else
-                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
-                    if [[ ! -f $descriptor_url || \
-                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+            fi
+
+            descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+            flat_path="${image_url:0:$path_len}"
+            descriptor_url=$flat_path$descriptor_fname
+            warn $LINENO "$descriptor_data_pair_msg"`
+                            `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+            if [[ $flat_path != file* ]]; then
+                if [[ ! -f $FILES/$descriptor_fname || \
+                "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                    wget -c $descriptor_url -O $FILES/$descriptor_fname
+                    if [[ $? -ne 0 ]]; then
                         warn $LINENO "Descriptor not found $descriptor_url"
                         descriptor_found=false
                     fi
                 fi
-                if $descriptor_found; then
-                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
-                    `" { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
-                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
-                    vmdk_adapter_type="${vmdk_adapter_type%?}"
+                descriptor_url="$FILES/$descriptor_fname"
+            else
+                descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                if [[ ! -f $descriptor_url || \
+                "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                    warn $LINENO "Descriptor not found $descriptor_url"
+                    descriptor_found=false
                 fi
             fi
+            if $descriptor_found; then
+                vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
+                vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                vmdk_adapter_type="${vmdk_adapter_type%?}"
+            fi
             vmdk_disktype="preallocated"
         else
             vmdk_disktype="preallocated"
diff --git a/functions-common b/functions-common
index 0db3ff3..ed3d883 100644
--- a/functions-common
+++ b/functions-common
@@ -938,9 +938,24 @@
     [[ "$OFFLINE" = "True" ]] && return
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    # The manual check for missing packages is because yum -y assumes
+    # missing packages are OK.  See
+    # https://bugzilla.redhat.com/show_bug.cgi?id=965567
     $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
         no_proxy=$no_proxy \
-        yum install -y "$@"
+        yum install -y "$@" 2>&1 | \
+        awk '
+            BEGIN { fail=0 }
+            /No package/ { fail=1 }
+            { print }
+            END { exit fail }' || \
+                die $LINENO "Missing packages detected"
+
+    # also ensure we catch a yum failure
+    if [[ ${PIPESTATUS[0]} != 0 ]]; then
+        die $LINENO "Yum install failure"
+    fi
 }
 
 # zypper wrapper to set arguments correctly
diff --git a/lib/ceilometer b/lib/ceilometer
index 04c1a34..b0899e2 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -129,6 +129,7 @@
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
+    iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
diff --git a/lib/heat b/lib/heat
index d0c0302..2d9d863 100644
--- a/lib/heat
+++ b/lib/heat
@@ -197,8 +197,62 @@
 }
 
 # create_heat_accounts() - Set up common required heat accounts
-# Note this is in addition to what is in files/keystone_data.sh
 function create_heat_accounts {
+    # migrated from files/keystone_data.sh
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    HEAT_USER=$(openstack user create \
+        heat \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email heat@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $HEAT_USER
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        HEAT_SERVICE=$(openstack service create \
+            heat \
+            --type=orchestration \
+            --description="Heat Orchestration Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+        HEAT_CFN_SERVICE=$(openstack service create \
+            heat \
+            --type=cloudformation \
+            --description="Heat CloudFormation Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+                $HEAT_CFN_SERVICE \
+                --region RegionOne \
+                --publicurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --adminurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                --internalurl "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+    fi
+
+    # heat_stack_user role is for users created by Heat
+    openstack role create heat_stack_user
+
+    # heat_stack_owner role is given to users who create Heat stacks,
+    # it's the default role used by heat to delegate to the heat service
+    # user (for performing deferred operations via trusts), see heat.conf
+    HEAT_OWNER_ROLE=$(openstack role create \
+        heat_stack_owner \
+        | grep " id " | get_field 2)
+
+    # Give the role to the demo and admin users so they can create stacks
+    # in either of the projects created by devstack
+    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+
     # Note we have to pass token/endpoint here because the current endpoint and
     # version negotiation in OSC means just --os-identity-api-version=3 won't work
     KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
diff --git a/lib/ironic b/lib/ironic
index 4e5edc9..b346de1 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -124,7 +124,7 @@
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
-    iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
diff --git a/lib/ldap b/lib/ldap
index 51d0251..efe2f09 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -154,7 +154,7 @@
 
 # clear_ldap_state() - Clear LDAP State
 function clear_ldap_state {
-    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
+    ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN" || :
 }
 
 # Restore xtrace
diff --git a/lib/marconi b/lib/marconi
index 29ae386..3c4547f 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -34,7 +34,8 @@
 MARCONICLIENT_DIR=$DEST/python-marconiclient
 MARCONI_CONF_DIR=/etc/marconi
 MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_API_LOG_DIR=/var/log/marconi
+MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
 MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
 
 # Support potential entry-points console scripts
@@ -96,6 +97,7 @@
 
     iniset $MARCONI_CONF DEFAULT verbose True
     iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+    iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
@@ -104,8 +106,12 @@
     iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
 
-    if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
-        iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
+        iniset $MARCONI_CONF drivers storage sqlalchemy
+        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
+    else
+        iniset $MARCONI_CONF drivers storage mongodb
+        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
         configure_mongodb
         cleanup_marconi
     fi
@@ -148,7 +154,7 @@
 
 # start_marconi() - Start running processes, including screen
 function start_marconi {
-    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1"
     echo "Waiting for Marconi to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
         die $LINENO "Marconi did not start"
diff --git a/lib/neutron b/lib/neutron
index bb591ab..84e8277 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 # nova vif driver that all plugins should use
 NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
+
+    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -754,6 +761,16 @@
         iniset $NEUTRON_CONF DEFAULT ${I/=/ }
     done
 
+    # Configuration for neutron notifations to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
     # Configure plugin
     neutron_plugin_configure_service
 }
diff --git a/lib/nova b/lib/nova
index 2d8715b..55103e8 100644
--- a/lib/nova
+++ b/lib/nova
@@ -665,17 +665,6 @@
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # Enable client side traces for libvirt
-        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
-        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-        # Enable server side traces for libvirtd
-        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
-            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-        fi
-
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index cdbc4d1..fd3c4fe 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -57,6 +57,18 @@
     iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
 }
 
+# is_docker_running - Return 0 (true) if Docker is running, otherwise 1
+function is_docker_running {
+    local docker_pid
+    if [ -f "$DOCKER_PID_FILE" ]; then
+        docker_pid=$(cat "$DOCKER_PID_FILE")
+    fi
+    if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then
+        return 1
+    fi
+    return 0
+}
+
 # install_nova_hypervisor() - Install external components
 function install_nova_hypervisor {
     # So far this is Ubuntu only
@@ -69,19 +81,15 @@
         die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
     fi
 
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
+    if ! (is_docker_running); then
         die $LINENO "Docker not running"
     fi
 }
 
 # start_nova_hypervisor - Start any required external services
 function start_nova_hypervisor {
-    local docker_pid
-    read docker_pid <$DOCKER_PID_FILE
-    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
-        die $LINENO "Docker not running, start the daemon"
+    if ! (is_docker_running); then
+        die $LINENO "Docker not running"
     fi
 
     # Start the Docker registry container
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index bbf6554..26880e5 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -103,6 +103,16 @@
     fi
     add_user_to_group $STACK_USER $LIBVIRT_GROUP
 
+    # Enable server side traces for libvirtd
+    local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+    local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+    if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+        echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+    fi
+    if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+        echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+    fi
+
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
diff --git a/lib/opendaylight b/lib/opendaylight
new file mode 100644
index 0000000..ca81c20
--- /dev/null
+++ b/lib/opendaylight
@@ -0,0 +1,167 @@
+# lib/opendaylight
+# Functions to control the configuration and operation of the opendaylight service
+
+# Dependencies:
+#
+# - ``functions`` file
+# # ``DEST`` must be defined
+# # ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_opendaylight_enabled
+# - is_opendaylight-compute_enabled
+# - install_opendaylight
+# - install_opendaylight-compute
+# - configure_opendaylight
+# - init_opendaylight
+# - start_opendaylight
+# - stop_opendaylight-compute
+# - stop_opendaylight
+# - cleanup_opendaylight
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# For OVS_BRIDGE and PUBLIC_BRIDGE
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Defaults
+# --------
+
+# The IP address of ODL. Set this in local.conf.
+# ODL_MGR_IP=
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# <define global variables here that belong to this project>
+ODL_DIR=$DEST/opendaylight
+
+# The OpenDaylight Package, currently using 'Hydrogen' release
+ODL_PKG=${ODL_PKG:-distributions-virtualization-0.1.1-osgipackage.zip}
+
+# The OpenDaylight URL
+ODL_URL=${ODL_URL:-https://nexus.opendaylight.org/content/repositories/opendaylight.release/org/opendaylight/integration/distributions-virtualization/0.1.1}
+
+# Default arguments for OpenDaylight. This is typically used to set
+# Java memory options.
+#   ODL_ARGS=Xmx1024m -XX:MaxPermSize=512m
+ODL_ARGS=${ODL_ARGS:-"-XX:MaxPermSize=384m"}
+
+# How long to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-60}
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# Test if OpenDaylight is enabled
+# is_opendaylight_enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+# cleanup_opendaylight() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    :
+}
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    # Remove simple forwarder
+    rm -f $ODL_DIR/opendaylight/plugins/org.opendaylight.controller.samples.simpleforwarding*
+
+    # Configure OpenFlow 1.3
+    echo "ovsdb.of.version=1.3" >> $ODL_DIR/opendaylight/configuration/config.ini
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    local _pwd=$(pwd)
+
+    if is_ubuntu; then
+        install_package maven openjdk-7-jre openjdk-7-jdk
+    else
+        yum_install maven java-1.7.0-openjdk
+    fi
+
+    # Download OpenDaylight
+    mkdir -p $ODL_DIR
+    cd $ODL_DIR
+    wget -N $ODL_URL/$ODL_PKG
+    unzip -u $ODL_PKG
+}
+
+# install_opendaylight-compute - Make sure OVS is install
+function install_opendaylight-compute {
+    local kernel_version
+    # Install deps
+    # FIXME add to ``files/apts/neutron``, but don't install if not needed!
+    if is_ubuntu; then
+        kernel_version=`cat /proc/version | cut -d " " -f3`
+        install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        install_package openvswitch
+        restart_service openvswitch-switch
+        restart_service openvswitch-controller
+    fi
+}
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    if is_ubuntu; then
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk-amd64
+    else
+        JHOME=/usr/lib/jvm/java-1.7.0-openjdk
+    fi
+
+    # The flags to ODL have the following meaning:
+    #   -of13: runs ODL using OpenFlow 1.3 protocol support.
+    #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+
+    # Sleep a bit to let OpenDaylight finish starting up
+    sleep $ODL_BOOT_WAIT
+}
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    screen_stop odl-server
+}
+
+# stop_opendaylight-compute() - Remove OVS bridges
+function stop_opendaylight-compute {
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Neutron
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/swift b/lib/swift
index 5d4d4ef..b655440 100644
--- a/lib/swift
+++ b/lib/swift
@@ -67,8 +67,8 @@
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
-# Default is ``staticweb, tempurl, formpost``
-SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb}
+# Default is ``staticweb, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
 
 # Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
 # the end of the pipeline.
@@ -687,6 +687,11 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
+    # Maintain the iteration as screen_stop() has some desirable side-effects
+    for type in proxy object container account; do
+        screen_stop s-${type}
+    done
+    # Blast out any stragglers
     pkill -f swift-
 }
 
diff --git a/lib/tempest b/lib/tempest
index 16f8744..c74f00d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -149,8 +149,12 @@
 
     password=${ADMIN_PASSWORD:-secrete}
 
-    # See files/keystone_data.sh where alt_demo user
-    # and tenant are set up...
+    # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
+    # user and tenant are set up...
+    ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
+    ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+    TEMPEST_USERNAME=${TEMPEST_USERNAME:-demo}
+    TEMPEST_TENANT_NAME=${TEMPEST_TENANT_NAME:-demo}
     ALT_USERNAME=${ALT_USERNAME:-alt_demo}
     ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
 
@@ -254,11 +258,16 @@
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
+    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
     iniset $TEMPEST_CONFIG identity password "$password"
+    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
     iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG identity admin_password "$password"
+    iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
 
     # Image
     # for the gate we want to be able to override this variable so we aren't
@@ -285,7 +294,9 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
-    iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
+    iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
+    iniset $TEMPEST_CONFIG "compute-admin" password "$password"
+    iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -293,7 +304,7 @@
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
-    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
diff --git a/stack.sh b/stack.sh
index ab1e8fe..148ce04 100755
--- a/stack.sh
+++ b/stack.sh
@@ -934,8 +934,7 @@
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_CFN_PORT=$HEAT_API_CFN_PORT \
-    HEAT_API_PORT=$HEAT_API_PORT \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
@@ -1419,3 +1418,9 @@
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
 echo_summary "stack.sh completed in $SECONDS seconds."
+
+# Restore/close logging file descriptors
+exec 1>&3
+exec 2>&3
+exec 3>&-
+exec 6>&-
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 440774e..2b5e418 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -73,7 +73,7 @@
 # Install basics
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo python-netaddr
+apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
 pip install xenapi
 
 # Install XenServer guest utilities