Merge "Remove bm_poseur, unmaintained and obsolete"
diff --git a/clean.sh b/clean.sh
index e121e4f..3707d84 100755
--- a/clean.sh
+++ b/clean.sh
@@ -123,6 +123,6 @@
 FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*"
 FILES_TO_CLEAN+=".stackenv .prereqs"
 
-for file in FILES_TO_CLEAN; do
+for file in $FILES_TO_CLEAN; do
     rm -f $TOP_DIR/$file
 done
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 7912046..f679669 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -32,6 +32,7 @@
 
 # Import project functions
 source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/neutron
 
 # Import configuration
 source $TOP_DIR/openrc
diff --git a/functions b/functions
index 43639c7..a844b1c 100644
--- a/functions
+++ b/functions
@@ -6,10 +6,6 @@
 # - ``ENABLED_SERVICES``
 # - ``FILES``
 # - ``GLANCE_HOSTPORT``
-# - ``REQUIREMENTS_DIR``
-# - ``STACK_USER``
-# - ``TRACK_DEPENDS``
-# - ``UNDO_REQUIREMENTS``
 #
 
 # Include the common functions
diff --git a/functions-common b/functions-common
index 228cb2a..0db3ff3 100644
--- a/functions-common
+++ b/functions-common
@@ -26,7 +26,10 @@
 # - ``PIP_DOWNLOAD_CACHE``
 # - ``PIP_USE_MIRRORS``
 # - ``RECLONE``
+# - ``REQUIREMENTS_DIR``
+# - ``STACK_USER``
 # - ``TRACK_DEPENDS``
+# - ``UNDO_REQUIREMENTS``
 # - ``http_proxy``, ``https_proxy``, ``no_proxy``
 
 # Save trace setting
diff --git a/lib/ceilometer b/lib/ceilometer
index d20d628..2e6e7c5 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -180,9 +180,11 @@
     sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
     rm -f $CEILOMETER_AUTH_CACHE_DIR/*
 
-    if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
-        recreate_database ceilometer utf8
-        $CEILOMETER_BIN_DIR/ceilometer-dbsync
+    if is_service_enabled mysql postgresql; then
+        if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
+            recreate_database ceilometer utf8
+            $CEILOMETER_BIN_DIR/ceilometer-dbsync
+        fi
     fi
 }
 
@@ -206,9 +208,12 @@
     screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
     screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 
-    echo "Waiting for ceilometer-api to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
-        die $LINENO "ceilometer-api did not start"
+    # only die on API if it was actually intended to be turned on
+    if service_enabled ceilometer-api; then
+        echo "Waiting for ceilometer-api to start..."
+        if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
+            die $LINENO "ceilometer-api did not start"
+        fi
     fi
 
     screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
diff --git a/lib/marconi b/lib/marconi
index 8cfc55c..29ae386 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -95,6 +95,7 @@
     sudo chown $USER $MARCONI_API_LOG_DIR
 
     iniset $MARCONI_CONF DEFAULT verbose True
+    iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
     iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
new file mode 100644
index 0000000..22c8578
--- /dev/null
+++ b/lib/neutron_plugins/ibm
@@ -0,0 +1,133 @@
+# Neutron IBM SDN-VE plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+
+function _neutron_interface_setup {
+    # Setup one interface on the integration bridge if needed
+    # The plugin agent to be used if more than one interface is used
+    local bridge=$1
+    local interface=$2
+    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
+}
+
+function neutron_setup_integration_bridge {
+    # Setup integration bridge if needed
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        neutron_ovs_base_cleanup
+        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
+        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
+            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
+        fi
+    fi
+
+    # Set controller to SDNVE controller (1st of list) if exists
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        # Get the first controller
+        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
+        SDNVE_IP=${controllers[0]}
+        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
+    fi
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    # if n-cpu is enabled, then setup integration bridge
+    if is_service_enabled n-cpu; then
+        neutron_setup_integration_bridge
+    fi
+}
+
+function is_neutron_ovs_base_plugin {
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        # Yes, we use OVS.
+        return 0
+    else
+        # No, we do not use OVS.
+        return 1
+    fi
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
+    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
+    Q_DB_NAME="sdnve_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
+}
+
+function neutron_plugin_configure_service {
+    # Define extra "SDNVE" configuration options when q-svc is configured
+
+    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+
+    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
+    fi
+
+    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
+    fi
+
+    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
+    fi
+
+    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
+    fi
+
+    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
+    fi
+
+    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
+    fi
+
+
+    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
+
+}
+
+function neutron_plugin_configure_plugin_agent {
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_setup_interface_driver {
+    return 0
+}
+
+function has_neutron_plugin_security_group {
+    # Does not support Security Groups
+    return 1
+}
+
+function neutron_ovs_base_cleanup {
+    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
+        # remove all OVS ports that look like Neutron created ports
+        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+            sudo ovs-vsctl del-port ${port}
+        done
+
+        # remove integration bridge created by Neutron
+        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
+            sudo ovs-vsctl del-br ${bridge}
+        done
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 0930422..fe79354 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -41,8 +41,7 @@
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
     Q_PLUGIN_CONF_FILENAME=nsx.ini
     Q_DB_NAME="neutron_nsx"
-    # TODO(armando-migliaccio): rename this once the code rename is complete
-    Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2"
+    Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
 }
 
 function neutron_plugin_configure_debug_command {
diff --git a/lib/nova b/lib/nova
index 90b1ba4..583a592 100644
--- a/lib/nova
+++ b/lib/nova
@@ -245,10 +245,9 @@
         inicomment $NOVA_API_PASTE_INI filter:authtoken cafile
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password
+        inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir
     fi
 
-    inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir
-
     if is_service_enabled n-cpu; then
         # Force IP forwarding on, just on case
         sudo sysctl -w net.ipv4.ip_forward=1
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index f8dc6af..cdbc4d1 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -104,8 +104,7 @@
     fi
 
     # Make sure we copied the image in Glance
-    DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ")
-    if ! is_set DOCKER_IMAGE ; then
+    if ! (glance image-show "$DOCKER_IMAGE"); then
         docker push $DOCKER_REPOSITORY_NAME
     fi
 }
diff --git a/lib/rpc_backend b/lib/rpc_backend
index a0424b1..e922daa 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -186,7 +186,7 @@
         fi
     elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
-        iniset $file $section rabbit_host $RABBIT_HOST
+        iniset $file $section rabbit_hosts $RABBIT_HOST
         iniset $file $section rabbit_password $RABBIT_PASSWORD
     fi
 }
diff --git a/lib/swift b/lib/swift
index 59c1e54..5d4d4ef 100644
--- a/lib/swift
+++ b/lib/swift
@@ -301,7 +301,7 @@
     # rsyncd.conf just prepared for 4 nodes
     if is_ubuntu; then
         sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
-    else
+    elif [ -e /etc/xinetd.d/rsync ]; then
         sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
     fi
 
@@ -635,8 +635,10 @@
     # Start rsync
     if is_ubuntu; then
         sudo /etc/init.d/rsync restart || :
+    elif [ -e /etc/xinetd.d/rsync ]; then
+        start_service xinetd
     else
-        sudo systemctl start xinetd.service
+        start_service rsyncd
     fi
 
     if is_apache_enabled_service swift; then
diff --git a/stack.sh b/stack.sh
index 5152b2a..ab1e8fe 100755
--- a/stack.sh
+++ b/stack.sh
@@ -165,46 +165,6 @@
 # Set up logging level
 VERBOSE=$(trueorfalse True $VERBOSE)
 
-
-# Additional repos
-# ================
-
-# Some distros need to add repos beyond the defaults provided by the vendor
-# to pick up required packages.
-
-# The Debian Wheezy official repositories do not contain all required packages,
-# add gplhost repository.
-if [[ "$os_VENDOR" =~ (Debian) ]]; then
-    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    apt_get update
-    apt_get install --force-yes gplhost-archive-keyring
-fi
-
-if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
-    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
-    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
-    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
-    if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
-        echo "RDO repo not detected; installing"
-        yum_install $RHEL6_RDO_REPO_RPM || \
-            die $LINENO "Error installing RDO repo, cannot continue"
-    fi
-
-    # RHEL6 requires EPEL for many Open Stack dependencies
-    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
-    if ! sudo yum repolist enabled epel | grep -q 'epel'; then
-        echo "EPEL not detected; installing"
-        yum_install ${RHEL6_EPEL_RPM} || \
-            die $LINENO "Error installing EPEL repo, cannot continue"
-    fi
-
-    # ... and also optional to be enabled
-    sudo yum-config-manager --enable rhel-6-server-optional-rpms
-
-fi
-
-
 # root Access
 # -----------
 
@@ -239,6 +199,47 @@
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
+# Additional repos
+# ----------------
+
+# Some distros need to add repos beyond the defaults provided by the vendor
+# to pick up required packages.
+
+# The Debian Wheezy official repositories do not contain all required packages,
+# add gplhost repository.
+if [[ "$os_VENDOR" =~ (Debian) ]]; then
+    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    apt_get update
+    apt_get install --force-yes gplhost-archive-keyring
+fi
+
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
+    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
+    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
+    if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+        echo "RDO repo not detected; installing"
+        yum_install $RHEL6_RDO_REPO_RPM || \
+            die $LINENO "Error installing RDO repo, cannot continue"
+    fi
+
+    # RHEL6 requires EPEL for many Open Stack dependencies
+    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+    if ! sudo yum repolist enabled epel | grep -q 'epel'; then
+        echo "EPEL not detected; installing"
+        yum_install ${RHEL6_EPEL_RPM} || \
+            die $LINENO "Error installing EPEL repo, cannot continue"
+    fi
+
+    # ... and also optional to be enabled
+    is_package_installed yum-utils || install_package yum-utils
+    sudo yum-config-manager --enable rhel-6-server-optional-rpms
+
+fi
+
+# Filesystem setup
+# ----------------
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
@@ -256,6 +257,15 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+safe_chown -R $STACK_USER $DATA_DIR
+
+
+# Common Configuration
+# --------------------
+
 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
 # Internet access. ``stack.sh`` must have been previously run with Internet
 # access to install prerequisites and fetch repositories.
@@ -269,15 +279,6 @@
 # Whether to enable the debug log level in OpenStack services
 ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL`
 
-# Destination path for service data
-DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
-
-
-# Common Configuration
-# ====================
-
 # Set fixed and floating range here so we can make sure not to use addresses
 # from either range when attempting to guess the IP to use for the host.
 # Note that setting FIXED_RANGE may be necessary when running DevStack