Merge "remove additional f18 references"
diff --git a/exercises/savanna.sh b/exercises/sahara.sh
similarity index 88%
rename from exercises/savanna.sh
rename to exercises/sahara.sh
index fc3f976..867920e 100755
--- a/exercises/savanna.sh
+++ b/exercises/sahara.sh
@@ -1,8 +1,8 @@
#!/usr/bin/env bash
-# **savanna.sh**
+# **sahara.sh**
-# Sanity check that Savanna started if enabled
+# Sanity check that Sahara started if enabled
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
@@ -33,9 +33,9 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
-is_service_enabled savanna || exit 55
+is_service_enabled sahara || exit 55
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!"
+curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
new file mode 100644
index 0000000..80e07ff
--- /dev/null
+++ b/extras.d/70-sahara.sh
@@ -0,0 +1,37 @@
+# sahara.sh - DevStack extras script to install Sahara
+
+if is_service_enabled sahara; then
+ if [[ "$1" == "source" ]]; then
+ # Initial source
+ source $TOP_DIR/lib/sahara
+ source $TOP_DIR/lib/sahara-dashboard
+ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Installing sahara"
+ install_sahara
+ cleanup_sahara
+ if is_service_enabled horizon; then
+ install_sahara_dashboard
+ fi
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring sahara"
+ configure_sahara
+ create_sahara_accounts
+ if is_service_enabled horizon; then
+ configure_sahara_dashboard
+ fi
+ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+ echo_summary "Initializing sahara"
+ start_sahara
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ stop_sahara
+ if is_service_enabled horizon; then
+ cleanup_sahara_dashboard
+ fi
+ fi
+
+ if [[ "$1" == "clean" ]]; then
+ cleanup_sahara
+ fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
deleted file mode 100644
index edc1376..0000000
--- a/extras.d/70-savanna.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-# savanna.sh - DevStack extras script to install Savanna
-
-if is_service_enabled savanna; then
- if [[ "$1" == "source" ]]; then
- # Initial source
- source $TOP_DIR/lib/savanna
- source $TOP_DIR/lib/savanna-dashboard
- elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing Savanna"
- install_savanna
- cleanup_savanna
- if is_service_enabled horizon; then
- install_savanna_dashboard
- fi
- elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- echo_summary "Configuring Savanna"
- configure_savanna
- create_savanna_accounts
- if is_service_enabled horizon; then
- configure_savanna_dashboard
- fi
- elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- echo_summary "Initializing Savanna"
- start_savanna
- fi
-
- if [[ "$1" == "unstack" ]]; then
- stop_savanna
- if is_service_enabled horizon; then
- cleanup_savanna_dashboard
- fi
- fi
-
- if [[ "$1" == "clean" ]]; then
- cleanup_savanna
- fi
-fi
diff --git a/extras.d/80-opendaylight.sh b/extras.d/80-opendaylight.sh
index cc5c8de..57b4328 100644
--- a/extras.d/80-opendaylight.sh
+++ b/extras.d/80-opendaylight.sh
@@ -1,7 +1,9 @@
# opendaylight.sh - DevStack extras script
-# Need this first to get the is_***_enabled for ODL
-source $TOP_DIR/lib/opendaylight
+if is_service_enabled odl-server odl-compute; then
+ # Initial source
+ [[ "$1" == "source" ]] && source $TOP_DIR/lib/opendaylight
+fi
if is_service_enabled odl-server; then
if [[ "$1" == "source" ]]; then
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index 71007ba..f1b692a 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,5 +1,5 @@
-python-pymongo
-mongodb-server
+python-pymongo #NOPRIME
+mongodb-server #NOPRIME
libnspr4-dev
pkg-config
libxml2-dev
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
python-sphinx
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index dd68ac0..d9844e9 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -8,5 +8,6 @@
python-eventlet
python-greenlet
python-iso8601
+python-pyOpenSSL
python-wsgiref
python-xattr
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c91bac3..9cf580d 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,4 +1,4 @@
selinux-policy-targeted
-mongodb-server
-pymongo
+mongodb-server #NOPRIME
+pymongo # NOPRIME
mongodb # NOPRIME
diff --git a/files/rpms/glance b/files/rpms/glance
index c886ece..2007e2e 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -13,6 +13,6 @@
python-paste-deploy #dist:f19,f20,rhel7
python-routes
python-sqlalchemy
-python-wsgiref
+python-wsgiref #dist:f18,f19,f20
pyxattr
zlib-devel # testonly
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
python-sphinx
diff --git a/functions-common b/functions-common
index 0db3ff3..90cd3df 100644
--- a/functions-common
+++ b/functions-common
@@ -938,9 +938,24 @@
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
+
+ # The manual check for missing packages is because yum -y assumes
+ # missing packages are OK. See
+ # https://bugzilla.redhat.com/show_bug.cgi?id=965567
$sudo http_proxy=$http_proxy https_proxy=$https_proxy \
no_proxy=$no_proxy \
- yum install -y "$@"
+ yum install -y "$@" 2>&1 | \
+ awk '
+ BEGIN { fail=0 }
+ /No package/ { fail=1 }
+ { print }
+ END { exit fail }' || \
+ die $LINENO "Missing packages detected"
+
+ # also ensure we catch a yum failure
+ if [[ ${PIPESTATUS[0]} != 0 ]]; then
+ die $LINENO "Yum install failure"
+ fi
}
# zypper wrapper to set arguments correctly
@@ -1233,7 +1248,7 @@
# ``errexit`` requires us to trap the exit code when the repo is changed
local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
- if [[ $update_requirements = "changed" ]]; then
+ if [[ $update_requirements != "changed" ]]; then
(cd $REQUIREMENTS_DIR; \
$SUDO_CMD python update.py $project_dir)
fi
@@ -1249,7 +1264,7 @@
# a variable that tells us whether or not we should UNDO the requirements
# changes (this will be set to False in the OpenStack ci gate)
if [ $UNDO_REQUIREMENTS = "True" ]; then
- if [[ $update_requirements = "changed" ]]; then
+ if [[ $update_requirements != "changed" ]]; then
(cd $project_dir && git reset --hard)
fi
fi
diff --git a/lib/ceilometer b/lib/ceilometer
index b0899e2..6aaddce 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -106,7 +106,9 @@
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceilometer {
- mongo ceilometer --eval "db.dropDatabase();"
+ if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+ mongo ceilometer --eval "db.dropDatabase();"
+ fi
}
# configure_ceilometerclient() - Set config files, create data dirs, etc
@@ -164,14 +166,27 @@
}
function configure_mongodb {
+ # server package is the same on all
+ local packages=mongodb-server
+
if is_fedora; then
- # install mongodb client
- install_package mongodb
+ # mongodb client + python bindings
+ packages="${packages} mongodb pymongo"
+ else
+ packages="${packages} python-pymongo"
+ fi
+
+ install_package ${packages}
+
+ if is_fedora; then
# ensure smallfiles selected to minimize freespace requirements
sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
restart_service mongod
fi
+
+ # give mongodb time to start-up
+ sleep 5
}
# init_ceilometer() - Initialize etc.
diff --git a/lib/marconi b/lib/marconi
index 1e0cc7d..3c4547f 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -34,7 +34,8 @@
MARCONICLIENT_DIR=$DEST/python-marconiclient
MARCONI_CONF_DIR=/etc/marconi
MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_API_LOG_DIR=/var/log/marconi
+MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
# Support potential entry-points console scripts
@@ -96,6 +97,7 @@
iniset $MARCONI_CONF DEFAULT verbose True
iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+ iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
iniset $MARCONI_CONF keystone_authtoken auth_protocol http
@@ -152,7 +154,7 @@
# start_marconi() - Start running processes, including screen
function start_marconi {
- screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+ screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1"
echo "Waiting for Marconi to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
die $LINENO "Marconi did not start"
diff --git a/lib/neutron b/lib/neutron
index bb591ab..84e8277 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
# nova vif driver that all plugins should use
NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
# The next two variables are configured by plugin
# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
if is_service_enabled q-meta; then
iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
fi
+
+ iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+ iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
}
# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -754,6 +761,16 @@
iniset $NEUTRON_CONF DEFAULT ${I/=/ }
done
+ # Configuration for neutron notifations to nova.
+ iniset $NEUTRON_CONF DEFAULT notify_nova_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+ iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+ iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+ iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+ iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+ ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+ iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+ iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
# Configure plugin
neutron_plugin_configure_service
}
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
new file mode 100644
index 0000000..0aebff6
--- /dev/null
+++ b/lib/neutron_plugins/oneconvergence
@@ -0,0 +1,76 @@
+# Neutron One Convergence plugin
+# ---------------------------
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+Q_L3_ENABLED=true
+Q_L3_ROUTER_PER_TENANT=true
+Q_USE_NAMESPACE=true
+
+function neutron_plugin_install_agent_packages {
+ _neutron_ovs_base_install_agent_packages
+}
+# Configure common parameters
+function neutron_plugin_configure_common {
+
+ Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
+ Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
+ Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
+ Q_DB_NAME='oc_nvsd_neutron'
+}
+
+# Configure plugin specific information
+function neutron_plugin_configure_service {
+ iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
+ iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
+ iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
+ iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
+}
+
+function neutron_plugin_configure_debug_command {
+ _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_setup_interface_driver {
+ local conf_file=$1
+ iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group {
+ # 1 means False here
+ return 0
+}
+
+function setup_integration_bridge {
+ _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_dhcp_agent {
+ setup_integration_bridge
+ iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+ _neutron_ovs_base_configure_l3_agent
+ iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+
+ AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
+
+ _neutron_ovs_base_configure_firewall_driver
+}
+
+function neutron_plugin_create_nova_conf {
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
+ setup_integration_bridge
+ fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 424a900..b2c1b61 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -18,14 +18,8 @@
# Ryu Applications
RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-# configure_ryu can be called multiple times as neutron_pluing/ryu may call
-# this function for neutron-ryu-agent
-_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
function configure_ryu {
- if [[ "$_RYU_CONFIGURED" == "False" ]]; then
- setup_develop $RYU_DIR
- _RYU_CONFIGURED=True
- fi
+ :
}
function init_ryu {
@@ -63,6 +57,7 @@
function install_ryu {
if [[ "$_RYU_INSTALLED" == "False" ]]; then
git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+ export PYTHONPATH=$RYU_DIR:$PYTHONPATH
_RYU_INSTALLED=True
fi
}
diff --git a/lib/nova b/lib/nova
index 583a592..360427d 100644
--- a/lib/nova
+++ b/lib/nova
@@ -308,7 +308,7 @@
# Rebuild the config file from scratch
create_nova_conf
- if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+ if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
# Configure hypervisor plugin
configure_nova_hypervisor
fi
@@ -665,17 +665,6 @@
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
- # Enable client side traces for libvirt
- local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
- local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
- # Enable server side traces for libvirtd
- if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
- echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
- fi
- if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
- echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
- fi
-
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
@@ -726,19 +715,27 @@
start_nova_rest
}
-# stop_nova() - Stop running processes (non-screen)
-function stop_nova {
- # Kill the nova screen windows
- # Some services are listed here twice since more than one instance
- # of a service may be running in certain configs.
- for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
- screen_stop $serv
- done
+function stop_nova_compute {
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
stop_nova_hypervisor
fi
}
+function stop_nova_rest {
+ # Kill the nova screen windows
+ # Some services are listed here twice since more than one instance
+ # of a service may be running in certain configs.
+ for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+ screen_stop $serv
+ done
+}
+
+# stop_nova() - Stop running processes (non-screen)
+function stop_nova {
+ stop_nova_rest
+ stop_nova_compute
+}
+
# Restore xtrace
$XTRACE
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index bbf6554..5a51f33 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -25,6 +25,8 @@
# File injection is disabled by default in Nova. This will turn it back on.
ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+# if we should turn on massive libvirt debugging
+DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT)
# Entry Points
@@ -103,6 +105,18 @@
fi
add_user_to_group $STACK_USER $LIBVIRT_GROUP
+ # Enable server side traces for libvirtd
+ if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then
+ local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+ local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+ if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+ echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+ fi
+ if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+ echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+ fi
+ fi
+
# libvirt detects various settings on startup, as we potentially changed
# the system configuration (modules, filesystems), we need to restart
# libvirt to detect those changes.
diff --git a/lib/sahara b/lib/sahara
new file mode 100644
index 0000000..4cb04ec
--- /dev/null
+++ b/lib/sahara
@@ -0,0 +1,177 @@
+# lib/sahara
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_sahara
+# configure_sahara
+# start_sahara
+# stop_sahara
+# cleanup_sahara
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
+SAHARA_BRANCH=${SAHARA_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DIR=$DEST/sahara
+SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
+SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
+SAHARA_DEBUG=${SAHARA_DEBUG:-True}
+
+SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
+SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
+SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
+SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
+
+# Support entry points installation of console scripts
+if [[ -d $SAHARA_DIR/bin ]]; then
+ SAHARA_BIN_DIR=$SAHARA_DIR/bin
+else
+ SAHARA_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+# Tell Tempest this project is present
+TEMPEST_SERVICES+=,sahara
+
+# For backward compatibility with current tests in Tempest
+TEMPEST_SERVICES+=,savanna
+
+
+# Functions
+# ---------
+
+# create_sahara_accounts() - Set up common required sahara accounts
+#
+# Tenant User Roles
+# ------------------------------
+# service sahara admin
+function create_sahara_accounts {
+
+ SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+ SAHARA_USER=$(openstack user create \
+ sahara \
+ --password "$SERVICE_PASSWORD" \
+ --project $SERVICE_TENANT \
+ --email sahara@example.com \
+ | grep " id " | get_field 2)
+ openstack role add \
+ $ADMIN_ROLE \
+ --project $SERVICE_TENANT \
+ --user $SAHARA_USER
+
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ SAHARA_SERVICE=$(openstack service create \
+ sahara \
+ --type=data_processing \
+ --description="Sahara Data Processing" \
+ | grep " id " | get_field 2)
+ openstack endpoint create \
+ $SAHARA_SERVICE \
+ --region RegionOne \
+ --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ fi
+}
+
+# cleanup_sahara() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_sahara {
+
+ # Cleanup auth cache dir
+ sudo rm -rf $SAHARA_AUTH_CACHE_DIR
+}
+
+# configure_sahara() - Set config files, create data dirs, etc
+function configure_sahara {
+
+ if [[ ! -d $SAHARA_CONF_DIR ]]; then
+ sudo mkdir -p $SAHARA_CONF_DIR
+ fi
+ sudo chown $STACK_USER $SAHARA_CONF_DIR
+
+ # Copy over sahara configuration file and configure common parameters.
+ # TODO(slukjanov): rename when sahara internals will be updated
+ cp $SAHARA_DIR/etc/savanna/savanna.conf.sample $SAHARA_CONF_FILE
+
+ # Create auth cache dir
+ sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
+ rm -rf $SAHARA_AUTH_CACHE_DIR/*
+
+ # Set obsolete keystone auth configs for backward compatibility
+ iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+ iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+ iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
+ iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+ iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara
+ iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+ # Set actual keystone auth configs
+ iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara
+ iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
+ iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
+ iniset $SAHARA_CONF_FILE DEFAULT debug $SAHARA_DEBUG
+
+ iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
+
+ if is_service_enabled neutron; then
+ iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
+ iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
+ fi
+
+ if is_service_enabled heat; then
+ iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
+ else
+ iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
+ fi
+
+ iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+ recreate_database sahara utf8
+ $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
+}
+
+# install_sahara() - Collect source and prepare
+function install_sahara {
+ git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
+ setup_develop $SAHARA_DIR
+}
+
+# start_sahara() - Start running processes, including screen
+function start_sahara {
+ screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
+}
+
+# stop_sahara() - Stop running processes
+function stop_sahara {
+ # Kill the Sahara screen windows
+ screen -S $SCREEN_NAME -p sahara -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard
new file mode 100644
index 0000000..a81df0f
--- /dev/null
+++ b/lib/sahara-dashboard
@@ -0,0 +1,72 @@
+# lib/sahara-dashboard
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_HOST``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_sahara_dashboard
+# - configure_sahara_dashboard
+# - cleanup_sahara_dashboard
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/horizon
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git}
+SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master}
+
+SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
+SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard
+SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
+
+# Functions
+# ---------
+
+function configure_sahara_dashboard {
+
+ echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+ echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+ echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+
+ if is_service_enabled neutron; then
+ echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+ fi
+}
+
+# install_sahara_dashboard() - Collect source and prepare
+function install_sahara_dashboard {
+ install_python_saharaclient
+ git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH
+ setup_develop $SAHARA_DASHBOARD_DIR
+}
+
+function install_python_saharaclient {
+ git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
+ setup_develop $SAHARA_PYTHONCLIENT_DIR
+}
+
+# Cleanup file settings.py from Sahara
+function cleanup_sahara_dashboard {
+ sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/savanna b/lib/savanna
deleted file mode 100644
index 2cb092c..0000000
--- a/lib/savanna
+++ /dev/null
@@ -1,173 +0,0 @@
-# lib/savanna
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_savanna
-# configure_savanna
-# start_savanna
-# stop_savanna
-# cleanup_savanna
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git}
-SAVANNA_BRANCH=${SAVANNA_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DIR=$DEST/savanna
-SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
-SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf
-SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
-
-SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
-SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
-SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
-
-# Support entry points installation of console scripts
-if [[ -d $SAVANNA_DIR/bin ]]; then
- SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
-else
- SAVANNA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,savanna
-
-
-# Functions
-# ---------
-
-# create_savanna_accounts() - Set up common required savanna accounts
-#
-# Tenant User Roles
-# ------------------------------
-# service savanna admin
-function create_savanna_accounts {
-
- SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
- ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
-
- SAVANNA_USER=$(openstack user create \
- savanna \
- --password "$SERVICE_PASSWORD" \
- --project $SERVICE_TENANT \
- --email savanna@example.com \
- | grep " id " | get_field 2)
- openstack role add \
- $ADMIN_ROLE \
- --project $SERVICE_TENANT \
- --user $SAVANNA_USER
-
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- SAVANNA_SERVICE=$(openstack service create \
- savanna \
- --type=data_processing \
- --description="Savanna Data Processing" \
- | grep " id " | get_field 2)
- openstack endpoint create \
- $SAVANNA_SERVICE \
- --region RegionOne \
- --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
- fi
-}
-
-# cleanup_savanna() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_savanna {
-
- # Cleanup auth cache dir
- sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
-}
-
-# configure_savanna() - Set config files, create data dirs, etc
-function configure_savanna {
-
- if [[ ! -d $SAVANNA_CONF_DIR ]]; then
- sudo mkdir -p $SAVANNA_CONF_DIR
- fi
- sudo chown $STACK_USER $SAVANNA_CONF_DIR
-
- # Copy over savanna configuration file and configure common parameters.
- cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
-
- # Create auth cache dir
- sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
- sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
- rm -rf $SAVANNA_AUTH_CACHE_DIR/*
-
- # Set obsolete keystone auth configs for backward compatibility
- iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
- iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
- iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
- iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
- iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
- iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
-
- # Set actual keystone auth configs
- iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
- iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
- iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
- iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
- iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
- iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
-
- iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
-
- iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
-
- if is_service_enabled neutron; then
- iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true
- iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
- fi
-
- if is_service_enabled heat; then
- iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
- else
- iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
- fi
-
- iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
- recreate_database savanna utf8
- $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head
-}
-
-# install_savanna() - Collect source and prepare
-function install_savanna {
- git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
- setup_develop $SAVANNA_DIR
-}
-
-# start_savanna() - Start running processes, including screen
-function start_savanna {
- screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE"
-}
-
-# stop_savanna() - Stop running processes
-function stop_savanna {
- # Kill the Savanna screen windows
- screen -S $SCREEN_NAME -p savanna -X kill
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
deleted file mode 100644
index 6fe15a3..0000000
--- a/lib/savanna-dashboard
+++ /dev/null
@@ -1,72 +0,0 @@
-# lib/savanna-dashboard
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_HOST``
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_savanna_dashboard
-# - configure_savanna_dashboard
-# - cleanup_savanna_dashboard
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/horizon
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git}
-SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master}
-
-SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git}
-SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
-SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
-
-# Functions
-# ---------
-
-function configure_savanna_dashboard {
-
- echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
- echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
- echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-
- if is_service_enabled neutron; then
- echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
- fi
-}
-
-# install_savanna_dashboard() - Collect source and prepare
-function install_savanna_dashboard {
- install_python_savannaclient
- git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
- setup_develop $SAVANNA_DASHBOARD_DIR
-}
-
-function install_python_savannaclient {
- git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
- setup_develop $SAVANNA_PYTHONCLIENT_DIR
-}
-
-# Cleanup file settings.py from Savanna
-function cleanup_savanna_dashboard {
- sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
-}
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
-
diff --git a/lib/swift b/lib/swift
index b8bc1b6..b655440 100644
--- a/lib/swift
+++ b/lib/swift
@@ -67,8 +67,8 @@
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
-# Default is ``staticweb, tempurl, formpost``
-SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb}
+# Default is ``staticweb, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
# the end of the pipeline.