Merge "Fix pep8 errors"
diff --git a/lib/baremetal b/lib/baremetal
index 473de0d..1d02e1e 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -77,14 +77,6 @@
 # These should be customized to your environment and hardware
 # -----------------------------------------------------------
 
-# whether to create a fake environment, eg. for devstack-gate
-BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV`
-
-# Extra options to pass to bm_poseur
-# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1
-# change the virtualization type: --engine qemu
-BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
-
 # To provide PXE, configure nova-network's dnsmasq rather than run the one
 # dedicated to baremetal. When enable this, make sure these conditions are
 # fulfilled:
@@ -97,15 +89,10 @@
 BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
 
 # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
-if [ "$BM_USE_FAKE_ENV" ]; then
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48}
-else
-    BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
-    # if testing on a physical network,
-    # BM_DNSMASQ_RANGE must be changed to suit your network
-    BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
-fi
+BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0}
+# if testing on a physical network,
+# BM_DNSMASQ_RANGE must be changed to suit your network
+BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-}
 
 # BM_DNSMASQ_DNS provide dns server to bootstrap clients
 BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-}
@@ -143,7 +130,6 @@
 # Below this, we set some path and filenames.
 # Defaults are probably sufficient.
 BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
-BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur}
 
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
@@ -177,7 +163,6 @@
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
     git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
-    git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
 
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
@@ -196,27 +181,6 @@
     fi
 }
 
-# set up virtualized environment for devstack-gate testing
-function create_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    # TODO(deva): add support for >1 VM
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm
-    BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
-
-    # NOTE: there is currently a limitation in baremetal driver
-    # that requires second MAC even if it is not used.
-    # Passing a fake value allows this to work.
-    # TODO(deva): remove this after driver issue is fixed.
-    BM_SECOND_MAC='12:34:56:78:90:12'
-}
-
-function cleanup_fake_baremetal_env {
-    local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
-    sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
-}
-
 # prepare various directories needed by baremetal hypervisor
 function configure_baremetal_nova_dirs {
     # ensure /tftpboot is prepared
diff --git a/lib/neutron b/lib/neutron
index 7ca66a5..bb591ab 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -586,11 +586,9 @@
     # If additional config files exist, copy them over to neutron configuration
     # directory
     if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then
-        mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
         local f
         for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
             Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
-            cp $NEUTRON_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
         done
     fi
 
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
new file mode 100644
index 0000000..22c8578
--- /dev/null
+++ b/lib/neutron_plugins/ibm
@@ -0,0 +1,133 @@
+# Neutron IBM SDN-VE plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+
+function _neutron_interface_setup {
+    # Setup one interface on the integration bridge if needed
+    # The plugin agent to be used if more than one interface is used
+    local bridge=$1
+    local interface=$2
+    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
+}
+
+function neutron_setup_integration_bridge {
+    # Setup integration bridge if needed
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        neutron_ovs_base_cleanup
+        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
+        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
+            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
+        fi
+    fi
+
+    # Set controller to SDNVE controller (1st of list) if exists
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        # Get the first controller
+        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
+        SDNVE_IP=${controllers[0]}
+        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
+    fi
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    # if n-cpu is enabled, then setup integration bridge
+    if is_service_enabled n-cpu; then
+        neutron_setup_integration_bridge
+    fi
+}
+
+function is_neutron_ovs_base_plugin {
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        # Yes, we use OVS.
+        return 0
+    else
+        # No, we do not use OVS.
+        return 1
+    fi
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
+    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
+    Q_DB_NAME="sdnve_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
+}
+
+function neutron_plugin_configure_service {
+    # Define extra "SDNVE" configuration options when q-svc is configured
+
+    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
+    fi
+
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
+    fi
+
+    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
+    fi
+
+    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
+    fi
+
+    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
+    fi
+
+    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
+    fi
+
+
+    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
+
+}
+
+function neutron_plugin_configure_plugin_agent {
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_setup_interface_driver {
+    return 0
+}
+
+function has_neutron_plugin_security_group {
+    # Does not support Security Groups
+    return 1
+}
+
+function neutron_ovs_base_cleanup {
+    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
+        # remove all OVS ports that look like Neutron created ports
+        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+            sudo ovs-vsctl del-port ${port}
+        done
+
+        # remove integration bridge created by Neutron
+        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
+            sudo ovs-vsctl del-br ${bridge}
+        done
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
new file mode 100644
index 0000000..724df41
--- /dev/null
+++ b/lib/neutron_plugins/ofagent_agent
@@ -0,0 +1,94 @@
+# OpenFlow Agent plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+source $TOP_DIR/lib/neutron_thirdparty/ryu  # for RYU_DIR, install_ryu, etc
+
+function neutron_plugin_create_nova_conf {
+    _neutron_ovs_base_configure_nova_vif_driver
+}
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+
+    # This agent uses ryu to talk with switches
+    install_package $(get_packages "ryu")
+    install_ryu
+    configure_ryu
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    _neutron_ovs_base_configure_firewall_driver
+
+    # Check a supported openflow version
+    OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2`
+    if [ `vercmp_numbers "$OF_VERSION" "0x3"` -lt "0" ]; then
+        die $LINENO "This agent requires OpenFlow 1.3+ capable switch."
+    fi
+
+    # Enable tunnel networks if selected
+    if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+        # Verify tunnels are supported
+        # REVISIT - also check kernel module support for GRE and patch ports
+        OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"`
+        if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ]; then
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
+        fi
+        iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True
+        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+    fi
+
+    # Setup physical network bridge mappings.  Override
+    # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+    # complex physical network configurations.
+    if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+        # Configure bridge manually with physical interface as port for multi-node
+        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+    fi
+    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
+    fi
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent"
+
+    # Define extra "AGENT" configuration options when q-agt is configured by defining
+    # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ }
+    done
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT ovs_use_veth True
+}
+
+function neutron_plugin_check_adv_test_requirements {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/rpc_backend b/lib/rpc_backend
index a0424b1..e922daa 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -186,7 +186,7 @@
         fi
     elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
-        iniset $file $section rabbit_host $RABBIT_HOST
+        iniset $file $section rabbit_hosts $RABBIT_HOST
         iniset $file $section rabbit_password $RABBIT_PASSWORD
     fi
 }
diff --git a/stack.sh b/stack.sh
index ccd567e..ab1e8fe 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1053,9 +1053,6 @@
     echo_summary "Preparing for nova baremetal"
     prepare_baremetal_toolchain
     configure_baremetal_nova_dirs
-    if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-        create_fake_baremetal_env
-    fi
 fi
 
 
diff --git a/stackrc b/stackrc
index f235ccc..6bb6f37 100644
--- a/stackrc
+++ b/stackrc
@@ -229,12 +229,6 @@
 BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
 BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
 
-# bm_poseur
-# Used to simulate a hardware environment for baremetal
-# Only used if BM_USE_FAKE_ENV is set
-BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
-BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
-
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
 NOVNC_BRANCH=${NOVNC_BRANCH:-master}
diff --git a/unstack.sh b/unstack.sh
index 6351fe0..a5e7b87 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -127,11 +127,6 @@
     killall stud
 fi
 
-# baremetal might have created a fake environment
-if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then
-    cleanup_fake_baremetal_env
-fi
-
 SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
 
 # Get the iSCSI volumes