Merge "Complete the support of MatchMakerRedis driver"
diff --git a/README.md b/README.md
index 206ffe0..c5e7f55 100644
--- a/README.md
+++ b/README.md
@@ -282,7 +282,15 @@
tests can be run as follows:
$ cd /opt/stack/tempest
- $ nosetests tempest/scenario/test_network_basic_ops.py
+ $ tox -efull tempest.scenario.test_network_basic_ops
+
+By default tempest is downloaded and the config file is generated, but the
+tempest package is not installed in the system's global site-packages (the
+package install includes installing dependences). So tempest won't run
+outside of tox. If you would like to install it add the following to your
+``localrc`` section:
+
+ INSTALL_TEMPEST=True
# DevStack on Xenserver
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index ad08221..4020580 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -36,7 +36,7 @@
is_service_enabled horizon || exit 55
# can we get the front page
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3.*>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
+$CURL_GET http://$SERVICE_HOST 2>/dev/null | grep -q '<h3.*>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/sahara.sh b/exercises/sahara.sh
index 867920e..2589e28 100755
--- a/exercises/sahara.sh
+++ b/exercises/sahara.sh
@@ -35,7 +35,7 @@
is_service_enabled sahara || exit 55
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
+$CURL_GET http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/zaqar.sh b/exercises/zaqar.sh
index 6996f34..c370b12 100755
--- a/exercises/zaqar.sh
+++ b/exercises/zaqar.sh
@@ -35,7 +35,7 @@
is_service_enabled zaqar-server || exit 55
-curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
+$CURL_GET http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/files/debs/ironic b/files/debs/ironic
index f6c7b74..0a906db 100644
--- a/files/debs/ironic
+++ b/files/debs/ironic
@@ -4,6 +4,7 @@
ipxe
libguestfs0
libvirt-bin
+open-iscsi
openssh-client
openvswitch-switch
openvswitch-datapath-dkms
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 534b1c1..5d5052a 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,8 +1,6 @@
qemu-utils
-# Stuff for diablo volumes
-lvm2
+lvm2 # NOPRIME
open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:precise
genisoimage
sysfsutils
sg3-utils
diff --git a/functions b/functions
index 2f976cf..79b2b37 100644
--- a/functions
+++ b/functions
@@ -325,13 +325,15 @@
fi
}
+#Macro for curl statements. curl requires -g option for literal IPv6 addresses.
+CURL_GET="${CURL_GET:-curl -g}"
# Wait for an HTTP server to start answering requests
# wait_for_service timeout url
function wait_for_service {
local timeout=$1
local url=$2
- timeout $timeout sh -c "while ! curl -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+ timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
}
diff --git a/lib/ceilometer b/lib/ceilometer
index f509788..9db0640 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -13,21 +13,16 @@
#
# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
#
-# To ensure events are stored, add the following section to local.conf:
-#
-# [[post-config|$CEILOMETER_CONF]]
-# [notification]
-# store_events=True
-#
# Several variables set in the localrc section adjust common behaviors
# of Ceilometer (see within for additional settings):
#
# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi.
# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing
# runs. Default 600.
-# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb')
+# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es')
# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided
# by tooz.
+# CEILOMETER_EVENTS: Enable event collection
# Dependencies:
@@ -80,6 +75,7 @@
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
+CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True}
CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-}
CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-}
@@ -137,8 +133,10 @@
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceilometer {
- if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
mongo ceilometer --eval "db.dropDatabase();"
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ curl -XDELETE "localhost:9200/events_*"
fi
if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
_cleanup_ceilometer_apache_wsgi
@@ -206,11 +204,21 @@
configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
+ iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS
+
if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ # es is only supported for events. we will use sql for alarming/metering.
+ iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF database event_connection es://localhost:9200
+ iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
+ iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
+ ${TOP_DIR}/pkg/elasticsearch.sh start
+ cleanup_ceilometer
else
iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer
iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
@@ -264,7 +272,7 @@
rm -f $CEILOMETER_AUTH_CACHE_DIR/*
if is_service_enabled mysql postgresql; then
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
+ if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
recreate_database ceilometer
$CEILOMETER_BIN_DIR/ceilometer-dbsync
fi
@@ -293,6 +301,11 @@
elif echo $CEILOMETER_COORDINATION_URL | grep -q '^redis:'; then
install_redis
fi
+
+ if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
+ ${TOP_DIR}/pkg/elasticsearch.sh download
+ ${TOP_DIR}/pkg/elasticsearch.sh install
+ fi
}
# install_ceilometerclient() - Collect source and prepare
@@ -340,7 +353,7 @@
# only die on API if it was actually intended to be turned on
if is_service_enabled ceilometer-api; then
echo "Waiting for ceilometer-api to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then
die $LINENO "ceilometer-api did not start"
fi
fi
diff --git a/lib/horizon b/lib/horizon
index a8e83f9..c6e3692 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -94,6 +94,7 @@
cp $HORIZON_SETTINGS $local_settings
_horizon_config_set $local_settings "" COMPRESS_OFFLINE True
+ _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
_horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
diff --git a/lib/ironic b/lib/ironic
index ade889e..bc30cdb 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -121,6 +121,16 @@
IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP}
IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088}
+# NOTE(lucasagomes): This flag is used to differentiate the nodes that
+# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers
+# (which also uses IPA but depends on Swift Temp URLs to work). At present,
+# all drivers that uses the iSCSI approach for their deployment supports
+# using both, IPA or bash ramdisks for the deployment. In the future we
+# want to remove the support for the bash ramdisk in favor of IPA, once
+# we get there this flag can be removed, and all conditionals that uses
+# it should just run by default.
+IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA)
+
# get_pxe_boot_file() - Get the PXE/iPXE boot file path
function get_pxe_boot_file {
local relpath=syslinux/pxelinux.0
@@ -162,6 +172,11 @@
return 1
}
+function is_deployed_with_ipa_ramdisk {
+ is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0
+ return 1
+}
+
# install_ironic() - Collect source and prepare
function install_ironic {
# make sure all needed service were enabled
@@ -329,7 +344,11 @@
iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0"
+ local pxe_params="nofb nomodeset vga=normal console=ttyS0"
+ if is_deployed_with_ipa_ramdisk; then
+ pxe_params+=" systemd.journald.forward_to_console=yes"
+ fi
+ iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params"
fi
if is_deployed_by_agent; then
if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
@@ -344,9 +363,6 @@
iniset $IRONIC_CONF_FILE glance swift_container glance
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30
- if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- iniset $IRONIC_CONF_FILE agent agent_pxe_append_params "nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes"
- fi
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
@@ -717,7 +733,7 @@
if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
# we can build them only if we're not offline
if [ "$OFFLINE" != "True" ]; then
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
else
ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
@@ -727,7 +743,7 @@
die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
fi
else
- if is_deployed_by_agent; then
+ if is_deployed_with_ipa_ramdisk; then
# download the agent image tarball
wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
diff --git a/lib/keystone b/lib/keystone
index 102d188..0968445 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -564,7 +564,7 @@
# Check that the keystone service is running. Even if the tls tunnel
# should be enabled, make sure the internal port is checked using
# unencryted traffic at this point.
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/neutron b/lib/neutron
index 3804e05..a7aabc5 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -934,7 +934,7 @@
Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
if is_service_enabled q-vpn; then
- cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+ neutron_vpn_configure_agent
fi
cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 5912eab..4d6a2bf 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -12,6 +12,13 @@
function neutron_vpn_install_agent_packages {
install_package $IPSEC_PACKAGE
+ if is_ubuntu && [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
+ sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.charon /etc/apparmor.d/disable/
+ sudo ln -sf /etc/apparmor.d/usr.lib.ipsec.stroke /etc/apparmor.d/disable/
+ # NOTE: Due to https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/1387220
+ # one must use 'sudo start apparmor ACTION=reload' for Ubuntu 14.10
+ restart_service apparmor
+ fi
}
function neutron_vpn_configure_common {
@@ -19,6 +26,18 @@
_neutron_deploy_rootwrap_filters $NEUTRON_VPNAAS_DIR
}
+function neutron_vpn_configure_agent {
+ cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+ if [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
+ iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver
+ if is_fedora; then
+ iniset $Q_VPN_CONF_FILE strongswan default_config_area /usr/share/strongswan/templates/config/strongswan.d
+ fi
+ else
+ iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.ipsec.OpenSwanDriver
+ fi
+}
+
function neutron_vpn_stop {
local ipsec_data_dir=$DATA_DIR/neutron/ipsec
local pids
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 4cbedd6..b6c1c9c 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -1,147 +1,10 @@
#!/bin/bash
-#
-# Neutron VMware NSX plugin
-# -------------------------
-# Save trace setting
-NSX_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# This file is needed so Q_PLUGIN=vmware_nsx will work.
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function setup_integration_bridge {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- # Set manager to NSX controller (1st of list)
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- # Get the first controller
- controllers=(${NSX_CONTROLLERS//,/ })
- OVS_MGR_IP=${controllers[0]}
- else
- die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
- fi
- sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
-}
-
-function is_neutron_ovs_base_plugin {
- # NSX uses OVS, but not the l3-agent
- return 0
-}
-
-function neutron_plugin_create_nova_conf {
- # if n-cpu is enabled, then setup integration bridge
- if is_service_enabled n-cpu; then
- setup_integration_bridge
- fi
-}
-
-function neutron_plugin_install_agent_packages {
- # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents
- _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
- Q_PLUGIN_CONF_FILENAME=nsx.ini
- Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
-}
-
-function neutron_plugin_configure_debug_command {
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
-}
-
-function neutron_plugin_configure_dhcp_agent {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
- iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
-}
-
-function neutron_plugin_configure_l3_agent {
- # VMware NSX plugin does not run L3 agent
- die $LINENO "q-l3 should not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent {
- # VMware NSX plugin does not run L2 agent
- die $LINENO "q-agt must not be executed with VMware NSX plugin!"
-}
-
-function neutron_plugin_configure_service {
- if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
- fi
- if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
- fi
- if [[ "$FAILOVER_TIME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME
- fi
- if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS
- fi
-
- if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
- else
- die $LINENO "The VMware NSX plugin won't work without a default transport zone."
- fi
- if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
- iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network
- fi
- if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
- fi
- # NSX_CONTROLLERS must be a comma separated string
- if [[ "$NSX_CONTROLLERS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS
- else
- die $LINENO "The VMware NSX plugin needs at least an NSX controller."
- fi
- if [[ "$NSX_USER" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER
- fi
- if [[ "$NSX_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
- fi
- if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
- fi
- if [[ "$NSX_RETRIES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES
- fi
- if [[ "$NSX_REDIRECTS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS
- fi
- if [[ "$AGENT_MODE" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE
- if [[ "$AGENT_MODE" == "agentless" ]]; then
- if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID
- else
- die $LINENO "Agentless mode requires a service cluster."
- fi
- iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP
- fi
- fi
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
function has_neutron_plugin_security_group {
# 0 means True here
return 0
}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$NSX_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx_v b/lib/neutron_plugins/vmware_nsx_v
new file mode 100644
index 0000000..3d33c65
--- /dev/null
+++ b/lib/neutron_plugins/vmware_nsx_v
@@ -0,0 +1,10 @@
+#!/bin/bash
+#
+# This file is needed so Q_PLUGIN=vmware_nsx_v will work.
+
+# FIXME(salv-orlando): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+ # 0 means True here
+ return 0
+}
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 7027a29..03853a9 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,89 +1,2 @@
-#!/bin/bash
-#
-# VMware NSX
-# ----------
-
-# This third-party addition can be used to configure connectivity between a DevStack instance
-# and an NSX Gateway in dev/test environments. In order to use this correctly, the following
-# env variables need to be set (e.g. in your localrc file):
-#
-# * enable_service vmware_nsx --> to execute this third-party addition
-# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
-# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway
-# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure $PUBLIC_BRIDGE, e.g. 172.24.4.211/24
-
-# Save trace setting
-NSX3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# This is the interface that connects the Devstack instance
-# to an network that allows it to talk to the gateway for
-# testing purposes
-NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2}
-# Re-declare floating range as it's needed also in stop_vmware_nsx, which
-# is invoked by unstack.sh
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-
-function configure_vmware_nsx {
- :
-}
-
-function init_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address to set on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- # Make sure the interface is up, but not configured
- sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up
- # Save and then flush the IP addresses on the interface
- addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
- sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE
- # Use the PUBLIC Bridge to route traffic to the NSX gateway
- # NOTE(armando-migliaccio): if running in a nested environment this will work
- # only with mac learning enabled, portsecurity and security profiles disabled
- # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off
- # Try to create it anyway
- sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE
- sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE
- nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE
- for address in $addresses; do
- sudo ip addr add dev $PUBLIC_BRIDGE $address
- done
- sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR
- sudo ip link set $PUBLIC_BRIDGE up
-}
-
-function install_vmware_nsx {
- :
-}
-
-function start_vmware_nsx {
- :
-}
-
-function stop_vmware_nsx {
- if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
- NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address expected on $PUBLIC_BRIDGE was not specified. "
- echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
- fi
- sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE
- # Save and then flush remaining addresses on the interface
- addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'})
- sudo ip addr flush $PUBLIC_BRIDGE
- # Try to detach physical interface from PUBLIC_BRIDGE
- sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE
- # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE
- for address in $addresses; do
- sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address
- done
-}
-
-function check_vmware_nsx {
- neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
-}
-
-# Restore xtrace
-$NSX3_XTRACE
+# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
+# continues to work.
diff --git a/lib/sahara b/lib/sahara
index a84a06f..9b2e9c4 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -65,9 +65,25 @@
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local sahara_service=$(get_or_create_service "sahara" \
- "data_processing" "Sahara Data Processing")
- get_or_create_endpoint $sahara_service \
+ # TODO: remove "data_processing" service when #1356053 will be fixed
+ local sahara_service_old=$(openstack service create \
+ "data_processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ local sahara_service_new=$(openstack service create \
+ "data-processing" \
+ --name "sahara" \
+ --description "Sahara Data Processing" \
+ -f value -c id
+ )
+ get_or_create_endpoint $sahara_service_old \
+ "$REGION_NAME" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ get_or_create_endpoint $sahara_service_new \
"$REGION_NAME" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
"$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
diff --git a/lib/tempest b/lib/tempest
index 6177ffe..f856ce0 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,6 +63,12 @@
BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
+# This must be False on stable branches, as master tempest
+# deps do not match stable branch deps. Set this to True to
+# have tempest installed in devstack by default.
+INSTALL_TEMPEST=${INSTALL_TEMPEST:-"False"}
+
+
BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
BOTO_CONF=/etc/boto.cfg
@@ -94,8 +100,12 @@
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
- # install testr since its used to process tempest logs
- pip_install $(get_from_global_requirements testrepository)
+ if [[ "$INSTALL_TEMPEST" == "True" ]]; then
+ setup_develop $TEMPEST_DIR
+ else
+ # install testr since its used to process tempest logs
+ pip_install $(get_from_global_requirements testrepository)
+ fi
local image_lines
local images
diff --git a/lib/trove b/lib/trove
index d437718..080e860 100644
--- a/lib/trove
+++ b/lib/trove
@@ -180,7 +180,7 @@
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
- iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /tmp/
+ iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /var/log/trove/
iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
}
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 447596a..239d6b9 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -47,11 +47,20 @@
:
}
+function _check_elasticsearch_ready {
+ # poll elasticsearch to see if it's started
+ if ! wait_for_service 30 http://localhost:9200; then
+ die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
+ fi
+}
+
function start_elasticsearch {
if is_ubuntu; then
sudo /etc/init.d/elasticsearch start
+ _check_elasticsearch_ready
elif is_fedora; then
sudo /bin/systemctl start elasticsearch.service
+ _check_elasticsearch_ready
else
echo "Unsupported architecture...can not start elasticsearch."
fi
diff --git a/stack.sh b/stack.sh
index 44a0743..2ac7dfa 100755
--- a/stack.sh
+++ b/stack.sh
@@ -250,8 +250,10 @@
enabled=0
gpgcheck=0
EOF
- # bare yum call due to --enablerepo
- sudo yum --enablerepo=epel-bootstrap -y install epel-release || \
+ # Enable a bootstrap repo. It is removed after finishing
+ # the epel-release installation.
+ sudo yum-config-manager --enable epel-bootstrap
+ yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue"
# epel rpm has installed it's version
sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
@@ -1298,6 +1300,13 @@
service_check
+# Bash completion
+# ===============
+
+# Prepare bash completion for OSC
+openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+
+
# Fin
# ===