Merge "Update Neutron section in README"
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index eeb1f21..d4968a6 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -63,11 +63,6 @@
* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
* Fumihiko Kakuma <kakuma@valinux.co.jp>
-Sahara
-~~~~~~
-
-* Sergey Lukjanov <slukjanov@mirantis.com>
-
Swift
~~~~~
diff --git a/README.md b/README.md
index cd83dd8..7833b03 100644
--- a/README.md
+++ b/README.md
@@ -117,19 +117,13 @@
# RPC Backend
-Multiple RPC backends are available. Currently, this
-includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of
-choice may be selected via the `localrc` section.
+Support for a RabbitMQ RPC backend is included. Additional RPC backends may
+be available via external plugins. Enabling or disabling RabbitMQ is handled
+via the usual service functions and ``ENABLED_SERVICES``.
-Note that selecting more than one RPC backend will result in a failure.
+Example disabling RabbitMQ in ``local.conf``:
-Example (ZeroMQ):
-
- ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq"
-
-Example (Qpid):
-
- ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid"
+ disable_service rabbit
# Apache Frontend
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index b09d386..f61002b 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -122,7 +122,7 @@
::
- enable_service qpid
+ enable_service q-svc
How do I run a specific OpenStack milestone?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f15c306..2dd0241 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -170,7 +170,6 @@
* `lib/nova <lib/nova.html>`__
* `lib/oslo <lib/oslo.html>`__
* `lib/rpc\_backend <lib/rpc_backend.html>`__
-* `lib/sahara <lib/sahara.html>`__
* `lib/swift <lib/swift.html>`__
* `lib/tempest <lib/tempest.html>`__
* `lib/tls <lib/tls.html>`__
@@ -181,7 +180,6 @@
* `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
* `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
-* `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
* `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
* `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
* `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
@@ -238,7 +236,6 @@
* `exercises/floating\_ips.sh <exercises/floating_ips.sh.html>`__
* `exercises/horizon.sh <exercises/horizon.sh.html>`__
* `exercises/neutron-adv-test.sh <exercises/neutron-adv-test.sh.html>`__
-* `exercises/sahara.sh <exercises/sahara.sh.html>`__
* `exercises/sec\_groups.sh <exercises/sec_groups.sh.html>`__
* `exercises/swift.sh <exercises/swift.sh.html>`__
* `exercises/volumes.sh <exercises/volumes.sh.html>`__
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 2dd70d8..c5c4e1e 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -22,6 +22,8 @@
+--------------------+-------------------------------------------+--------------------+
|magnum |git://git.openstack.org/openstack/magnum | |
+--------------------+-------------------------------------------+--------------------+
+|sahara |git://git.openstack.org/openstack/sahara | |
++--------------------+-------------------------------------------+--------------------+
|trove |git://git.openstack.org/openstack/trove | |
+--------------------+-------------------------------------------+--------------------+
|zaqar |git://git.openstack.org/openstack/zarar | |
diff --git a/exercises/sahara.sh b/exercises/sahara.sh
deleted file mode 100755
index 8cad945..0000000
--- a/exercises/sahara.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env bash
-
-# **sahara.sh**
-
-# Sanity check that Sahara started if enabled
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-is_service_enabled sahara || exit 55
-
-if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
- SAHARA_SERVICE_PROTOCOL="https"
-fi
-
-SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-$CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
deleted file mode 100644
index f177766..0000000
--- a/extras.d/70-sahara.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# sahara.sh - DevStack extras script to install Sahara
-
-if is_service_enabled sahara; then
- if [[ "$1" == "source" ]]; then
- # Initial source
- source $TOP_DIR/lib/sahara
- elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing sahara"
- install_sahara
- install_python_saharaclient
- cleanup_sahara
- elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- echo_summary "Configuring sahara"
- configure_sahara
- create_sahara_accounts
- elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- echo_summary "Initializing sahara"
- sahara_register_images
- start_sahara
- fi
-
- if [[ "$1" == "unstack" ]]; then
- stop_sahara
- fi
-
- if [[ "$1" == "clean" ]]; then
- cleanup_sahara
- fi
-fi
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
index 301a3bd..4908152 100644
--- a/files/apache-nova-api.template
+++ b/files/apache-nova-api.template
@@ -14,3 +14,12 @@
%SSLCERTFILE%
%SSLKEYFILE%
</VirtualHost>
+
+Alias /compute %PUBLICWSGI%
+<Location /compute>
+ SetHandler wsgi-script
+ Options +ExecCGI
+ WSGIProcessGroup nova-api
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/neutron b/files/debs/neutron
index 2d69a71..b5a457e 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -9,11 +9,9 @@
postgresql-server-dev-all
python-mysqldb
python-mysql.connector
-python-qpid # NOPRIME
dnsmasq-base
dnsmasq-utils # for dhcp_release only available in dist:precise
rabbitmq-server # NOPRIME
-qpidd # NOPRIME
sqlite3
vlan
radvd # NOPRIME
diff --git a/files/debs/nova b/files/debs/nova
index 9d9acde..346b8b3 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -24,10 +24,8 @@
curl
genisoimage # required for config_drive
rabbitmq-server # NOPRIME
-qpidd # NOPRIME
socat # used by ajaxterm
python-libvirt # NOPRIME
python-libxml2
python-numpy # used by websockify for spice console
python-m2crypto
-python-qpid # NOPRIME
diff --git a/files/debs/qpid b/files/debs/qpid
deleted file mode 100644
index e3bbf09..0000000
--- a/files/debs/qpid
+++ /dev/null
@@ -1 +0,0 @@
-sasl2-bin # NOPRIME
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index e75db89..1339799 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -11,6 +11,3 @@
sudo
vlan
radvd # NOPRIME
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-qpidd # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 6f8aef1..039456f 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -22,7 +22,3 @@
sqlite3
sudo
vlan
-
-# FIXME: qpid is not part of openSUSE, those names are tentative
-python-qpid # NOPRIME
-qpidd # NOPRIME
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 8292e7b..29851be 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,7 +11,6 @@
openvswitch # NOPRIME
postgresql-devel
rabbitmq-server # NOPRIME
-qpid-cpp-server # NOPRIME
sqlite
sudo
radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index d32c332..6eeb623 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -23,6 +23,5 @@
parted
polkit
rabbitmq-server # NOPRIME
-qpid-cpp-server # NOPRIME
sqlite
sudo
diff --git a/files/rpms/qpid b/files/rpms/qpid
deleted file mode 100644
index 41dd2f6..0000000
--- a/files/rpms/qpid
+++ /dev/null
@@ -1,3 +0,0 @@
-qpid-proton-c-devel # NOPRIME
-cyrus-sasl-lib # NOPRIME
-cyrus-sasl-plain # NOPRIME
diff --git a/functions-common b/functions-common
index 6ab567a..d6be1ec 100644
--- a/functions-common
+++ b/functions-common
@@ -806,7 +806,7 @@
# Gets service id
local service_id=$(
# Gets service id
- openstack service show $1 -f value -c id 2>/dev/null ||
+ openstack service show $2 -f value -c id 2>/dev/null ||
# Creates new service if not exists
openstack service create \
$2 \
@@ -1342,7 +1342,7 @@
if is_service_enabled $service; then
# Clean up the screen window
- screen -S $SCREEN_NAME -p $service -X kill
+ screen -S $SCREEN_NAME -p $service -X kill || true
fi
}
@@ -1683,7 +1683,7 @@
# ``ENABLED_SERVICES`` list, if they are not already present.
#
# For example:
-# enable_service qpid
+# enable_service q-svc
#
# This function does not know about the special cases
# for nova, glance, and neutron built into is_service_enabled().
diff --git a/inc/python b/inc/python
index 07a811e..54e19a7 100644
--- a/inc/python
+++ b/inc/python
@@ -66,7 +66,8 @@
# Wrapper for ``pip install`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``
+# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``USE_CONSTRAINTS``
# pip_install package [package ...]
function pip_install {
local xtrace=$(set +o | grep xtrace)
@@ -103,6 +104,13 @@
fi
fi
+ cmd_pip="$cmd_pip install"
+
+ # Handle a constraints file, if needed.
+ if [[ "$USE_CONSTRAINTS" == "True" ]]; then
+ cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
+ fi
+
local pip_version=$(python -c "import pip; \
print(pip.__version__.strip('.')[0])")
if (( pip_version<6 )); then
@@ -116,7 +124,7 @@
https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
- $cmd_pip install $upgrade \
+ $cmd_pip $upgrade \
$@
# Also install test requirements
@@ -128,7 +136,7 @@
https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
- $cmd_pip install $upgrade \
+ $cmd_pip $upgrade \
-r $test_req
fi
}
@@ -195,7 +203,7 @@
function is_in_projects_txt {
local project_dir=$1
local project_name=$(basename $project_dir)
- return grep "/$project_name\$" $REQUIREMENTS_DIR/projects.txt >/dev/null
+ grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
}
# ``pip install -e`` the package, which processes the dependencies
@@ -215,7 +223,7 @@
# ``errexit`` requires us to trap the exit code when the repo is changed
local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
- if [[ $update_requirements != "changed" ]]; then
+ if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then
if is_in_projects_txt $project_dir; then
(cd $REQUIREMENTS_DIR; \
./.venv/bin/python update.py $project_dir)
@@ -227,6 +235,14 @@
fi
fi
+ if [ -n "$REQUIREMENTS_DIR" ]; then
+ # Constrain this package to this project directory from here on out.
+ local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+ $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
+ $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
+ "$flags file://$project_dir#egg=$name"
+ fi
+
setup_package $project_dir $flags
# We've just gone and possibly modified the user's source tree in an
diff --git a/lib/ceilometer b/lib/ceilometer
index ed9b933..163ed0b 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -360,16 +360,6 @@
fi
}
-# install_ceilometermiddleware() - Collect source and prepare
-function install_ceilometermiddleware {
- if use_library_from_git "ceilometermiddleware"; then
- git_clone_by_name "ceilometermiddleware"
- setup_dev_lib "ceilometermiddleware"
- else
- pip_install_gr ceilometermiddleware
- fi
-}
-
# start_ceilometer() - Start running processes, including screen
function start_ceilometer {
run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
diff --git a/lib/ceph b/lib/ceph
index cbdc3b8..16dcda2 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -264,10 +264,6 @@
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
- # NOTE(eharney): When Glance has fully migrated to Glance store,
- # default_store can be removed from [DEFAULT]. (See lib/glance.)
- iniset $GLANCE_API_CONF DEFAULT default_store rbd
- iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
iniset $GLANCE_API_CONF glance_store default_store rbd
iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
diff --git a/lib/glance b/lib/glance
index f2c5e99..4dbce9f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -113,9 +113,7 @@
iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
- if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
- fi
+ iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
@@ -126,9 +124,7 @@
iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
- if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
- fi
+ iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
iniset_rpc_backend glance $GLANCE_API_CONF
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 9e72aa0..ca0b70c 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,4 +1,10 @@
#!/bin/bash
-# REVISIT(devvesa): This file is intentionally left empty
-# in order to keep Q_PLUGIN=midonet work.
+# REVISIT(devvesa): This file is needed so Q_PLUGIN=midonet will work.
+
+# FIXME(yamamoto): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
+function has_neutron_plugin_security_group {
+ # 0 means True here
+ return 0
+}
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index f465cc9..34190f9 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -42,7 +42,7 @@
function neutron_lbaas_stop {
pids=$(ps aux | awk '/haproxy/ { print $2 }')
- [ ! -z "$pids" ] && sudo kill $pids
+ [ ! -z "$pids" ] && sudo kill $pids || true
}
# Restore xtrace
diff --git a/lib/nova b/lib/nova
index 88b336a..41248b1 100644
--- a/lib/nova
+++ b/lib/nova
@@ -405,19 +405,25 @@
local nova_service=$(get_or_create_service "nova" \
"compute" "Nova Compute Service")
+ local nova_api_url
+ if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
+ nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
+ else
+ nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
+ fi
get_or_create_endpoint $nova_service \
"$REGION_NAME" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
+ "$nova_api_url/v2/\$(tenant_id)s" \
+ "$nova_api_url/v2/\$(tenant_id)s" \
+ "$nova_api_url/v2/\$(tenant_id)s"
local nova_v21_service=$(get_or_create_service "novav21" \
"computev21" "Nova Compute Service V2.1")
get_or_create_endpoint $nova_v21_service \
"$REGION_NAME" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s"
+ "$nova_api_url/v2.1/\$(tenant_id)s" \
+ "$nova_api_url/v2.1/\$(tenant_id)s" \
+ "$nova_api_url/v2.1/\$(tenant_id)s"
fi
fi
diff --git a/lib/oslo b/lib/oslo
index be935bb..123572c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -36,6 +36,7 @@
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.reports"]=$DEST/oslo.reports
GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
GITDIR["oslo.serialization"]=$DEST/oslo.serialization
GITDIR["oslo.service"]=$DEST/oslo.service
@@ -64,6 +65,7 @@
# install_oslo() - Collect source and prepare
function install_oslo {
+ _do_install_oslo_lib "automaton"
_do_install_oslo_lib "cliff"
_do_install_oslo_lib "debtcollector"
_do_install_oslo_lib "futurist"
@@ -77,6 +79,7 @@
_do_install_oslo_lib "oslo.messaging"
_do_install_oslo_lib "oslo.middleware"
_do_install_oslo_lib "oslo.policy"
+ _do_install_oslo_lib "oslo.reports"
_do_install_oslo_lib "oslo.rootwrap"
_do_install_oslo_lib "oslo.serialization"
_do_install_oslo_lib "oslo.service"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 33ab03d..03eacd8 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -1,72 +1,32 @@
#!/bin/bash
#
# lib/rpc_backend
-# Interface for interactig with different RPC backends
+# Interface for installing RabbitMQ on the system
# Dependencies:
#
# - ``functions`` file
# - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used
-# - ``RPC_MESSAGING_PROTOCOL`` option for configuring the messaging protocol
# ``stack.sh`` calls the entry points in this order:
#
# - check_rpc_backend
# - install_rpc_backend
# - restart_rpc_backend
-# - iniset_rpc_backend
+# - iniset_rpc_backend (stable interface)
+#
+# Note: if implementing an out of tree plugin for an RPC backend, you
+# should install all services through normal plugin methods, then
+# redefine ``iniset_rpc_backend`` in your code. That's the one portion
+# of this file which is a standard interface.
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
-RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9}
-
-# TODO(sdague): RPC backend selection is super wonky because we treat
-# messaging server as a service, which it really isn't for multi host
-QPID_HOST=${QPID_HOST:-}
-
-
# Functions
# ---------
-# Make sure we only have one rpc backend enabled.
-# Also check the specified rpc backend is available on your platform.
-function check_rpc_backend {
- local c svc
-
- local rpc_needed=1
- # We rely on the fact that filenames in lib/* match the service names
- # that can be passed as arguments to is_service_enabled.
- # We check for a call to iniset_rpc_backend in these files, meaning
- # the service needs a backend.
- rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}')
- for c in ${rpc_candidates}; do
- if is_service_enabled $c; then
- rpc_needed=0
- break
- fi
- done
- local rpc_backend_cnt=0
- for svc in qpid zeromq rabbit; do
- is_service_enabled $svc &&
- (( rpc_backend_cnt++ )) || true
- done
- if [ "$rpc_backend_cnt" -gt 1 ]; then
- echo "ERROR: only one rpc backend may be enabled,"
- echo " set only one of 'rabbit', 'qpid', 'zeromq'"
- echo " via ENABLED_SERVICES."
- elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then
- echo "ERROR: at least one rpc backend must be enabled,"
- echo " set one of 'rabbit', 'qpid', 'zeromq'"
- echo " via ENABLED_SERVICES."
- fi
-
- if is_service_enabled qpid && ! qpid_is_supported; then
- die $LINENO "Qpid support is not available for this version of your distribution."
- fi
-}
-
# clean up after rpc backend - eradicate all traces so changing backends
# produces a clean switch
function cleanup_rpc_backend {
@@ -79,110 +39,14 @@
# And the Erlang runtime too
apt_get purge -y erlang*
fi
- elif is_service_enabled qpid; then
- if is_fedora; then
- uninstall_package qpid-cpp-server
- elif is_ubuntu; then
- uninstall_package qpidd
- else
- exit_distro_not_supported "qpid installation"
- fi
- elif is_service_enabled zeromq; then
- if is_fedora; then
- uninstall_package zeromq python-zmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- uninstall_package redis python-redis
- fi
- elif is_ubuntu; then
- uninstall_package libzmq1 python-zmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- uninstall_package redis-server python-redis
- fi
- elif is_suse; then
- uninstall_package libzmq1 python-pyzmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- uninstall_package redis python-redis
- fi
- else
- exit_distro_not_supported "zeromq installation"
- fi
- fi
-
- # Remove the AMQP 1.0 messaging libraries
- if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
- if is_fedora; then
- uninstall_package qpid-proton-c-devel
- uninstall_package python-qpid-proton
- fi
- # TODO(kgiusti) ubuntu cleanup
fi
}
# install rpc backend
function install_rpc_backend {
- # Regardless of the broker used, if AMQP 1.0 is configured load
- # the necessary messaging client libraries for oslo.messaging
- if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
- if is_fedora; then
- install_package qpid-proton-c-devel
- install_package python-qpid-proton
- elif is_ubuntu; then
- # TODO(kgiusti) The QPID AMQP 1.0 protocol libraries
- # are not yet in the ubuntu repos. Enable these installs
- # once they are present:
- #install_package libqpid-proton2-dev
- #install_package python-qpid-proton
- # Also add 'uninstall' directives in cleanup_rpc_backend()!
- exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
- else
- exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
- fi
- # Install pyngus client API
- # TODO(kgiusti) can remove once python qpid bindings are
- # available on all supported platforms _and_ pyngus is added
- # to the requirements.txt file in oslo.messaging
- pip_install_gr pyngus
- fi
-
if is_service_enabled rabbit; then
# Install rabbitmq-server
install_package rabbitmq-server
- elif is_service_enabled qpid; then
- if is_fedora; then
- install_package qpid-cpp-server
- elif is_ubuntu; then
- install_package qpidd
- else
- exit_distro_not_supported "qpid installation"
- fi
- _configure_qpid
- elif is_service_enabled zeromq; then
- if is_fedora; then
- install_package zeromq python-zmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- install_package redis python-redis
- fi
- elif is_ubuntu; then
- install_package libzmq1 python-zmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- install_package redis-server python-redis
- fi
- elif is_suse; then
- install_package libzmq1 python-pyzmq
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- install_package redis python-redis
- fi
- else
- exit_distro_not_supported "zeromq installation"
- fi
- # Necessary directory for socket location.
- sudo mkdir -p /var/run/openstack
- sudo chown $STACK_USER /var/run/openstack
- fi
-
- # If using the QPID broker, install the QPID python client API
- if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
- install_package python-qpid
fi
}
@@ -232,17 +96,12 @@
sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*"
fi
fi
- elif is_service_enabled qpid; then
- echo_summary "Starting qpid"
- restart_service qpidd
fi
}
# builds transport url string
function get_transport_url {
- if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
- echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/"
- elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+ if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/"
fi
}
@@ -252,29 +111,7 @@
local package=$1
local file=$2
local section=${3:-DEFAULT}
- if is_service_enabled zeromq; then
- iniset $file $section rpc_backend "zmq"
- iniset $file $section rpc_zmq_host `hostname`
- if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
- iniset $file $section rpc_zmq_matchmaker "redis"
- MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
- iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
- else
- die $LINENO "Other matchmaker drivers not supported"
- fi
- elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
- # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
- if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
- iniset $file $section rpc_backend "amqp"
- else
- iniset $file $section rpc_backend "qpid"
- fi
- iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST}
- if [ -n "$QPID_USERNAME" ]; then
- iniset $file $section qpid_username $QPID_USERNAME
- iniset $file $section qpid_password $QPID_PASSWORD
- fi
- elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+ if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
iniset $file $section rpc_backend "rabbit"
iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST
iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
@@ -288,17 +125,6 @@
fi
}
-# Check if qpid can be used on the current distro.
-# qpid_is_supported
-function qpid_is_supported {
- if [[ -z "$DISTRO" ]]; then
- GetDistro
- fi
-
- # Qpid is not in openSUSE
- ( ! is_suse )
-}
-
function rabbit_setuser {
local user="$1" pass="$2" found="" out=""
out=$(sudo rabbitmqctl list_users) ||
@@ -314,85 +140,6 @@
sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*"
}
-# Set up the various configuration files used by the qpidd broker
-function _configure_qpid {
-
- # the location of the configuration files have changed since qpidd 0.14
- local qpid_conf_file
- if [ -e /etc/qpid/qpidd.conf ]; then
- qpid_conf_file=/etc/qpid/qpidd.conf
- elif [ -e /etc/qpidd.conf ]; then
- qpid_conf_file=/etc/qpidd.conf
- else
- exit_distro_not_supported "qpidd.conf file not found!"
- fi
-
- # force the ACL file to a known location
- local qpid_acl_file=/etc/qpid/qpidd.acl
- if [ ! -e $qpid_acl_file ]; then
- sudo mkdir -p -m 755 `dirname $qpid_acl_file`
- sudo touch $qpid_acl_file
- sudo chmod o+r $qpid_acl_file
- fi
- sudo sed -i.bak '/^acl-file=/d' $qpid_conf_file
- echo "acl-file=$qpid_acl_file" | sudo tee --append $qpid_conf_file
-
- sudo sed -i '/^auth=/d' $qpid_conf_file
- if [ -z "$QPID_USERNAME" ]; then
- # no QPID user configured, so disable authentication
- # and access control
- echo "auth=no" | sudo tee --append $qpid_conf_file
- cat <<EOF | sudo tee $qpid_acl_file
-acl allow all all
-EOF
- else
- # Configure qpidd to use PLAIN authentication, and add
- # QPID_USERNAME to the ACL:
- echo "auth=yes" | sudo tee --append $qpid_conf_file
- if [ -z "$QPID_PASSWORD" ]; then
- read_password QPID_PASSWORD "ENTER A PASSWORD FOR QPID USER $QPID_USERNAME"
- fi
- # Create ACL to allow $QPID_USERNAME full access
- cat <<EOF | sudo tee $qpid_acl_file
-group admin ${QPID_USERNAME}@QPID
-acl allow admin all
-acl deny all all
-EOF
- # Add user to SASL database
- if is_ubuntu; then
- install_package sasl2-bin
- elif is_fedora; then
- install_package cyrus-sasl-lib
- install_package cyrus-sasl-plain
- fi
- local sasl_conf_file=/etc/sasl2/qpidd.conf
- sudo sed -i.bak '/PLAIN/!s/mech_list: /mech_list: PLAIN /' $sasl_conf_file
- local sasl_db=`sudo grep sasldb_path $sasl_conf_file | cut -f 2 -d ":" | tr -d [:blank:]`
- if [ ! -e $sasl_db ]; then
- sudo mkdir -p -m 755 `dirname $sasl_db`
- fi
- echo $QPID_PASSWORD | sudo saslpasswd2 -c -p -f $sasl_db -u QPID $QPID_USERNAME
- sudo chmod o+r $sasl_db
- fi
-
- # If AMQP 1.0 is specified, ensure that the version of the
- # broker can support AMQP 1.0 and configure the queue and
- # topic address patterns used by oslo.messaging.
- if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
- QPIDD=$(type -p qpidd)
- if ! $QPIDD --help | grep -q "queue-patterns"; then
- exit_distro_not_supported "qpidd with AMQP 1.0 support"
- fi
- if ! grep -q "queue-patterns=exclusive" $qpid_conf_file; then
- cat <<EOF | sudo tee --append $qpid_conf_file
-queue-patterns=exclusive
-queue-patterns=unicast
-topic-patterns=broadcast
-EOF
- fi
- fi
-}
-
# Restore xtrace
$XTRACE
diff --git a/lib/sahara b/lib/sahara
deleted file mode 100644
index 51e431a..0000000
--- a/lib/sahara
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/bin/bash
-#
-# lib/sahara
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_sahara
-# install_python_saharaclient
-# configure_sahara
-# sahara_register_images
-# start_sahara
-# stop_sahara
-# cleanup_sahara
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-
-# Set up default directories
-GITDIR["python-saharaclient"]=$DEST/python-saharaclient
-SAHARA_DIR=$DEST/sahara
-
-SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
-SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
-
-if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
- SAHARA_SERVICE_PROTOCOL="https"
-fi
-SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
-SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
-SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386}
-SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
-
-SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake}
-
-# Support entry points installation of console scripts
-if [[ -d $SAHARA_DIR/bin ]]; then
- SAHARA_BIN_DIR=$SAHARA_DIR/bin
-else
- SAHARA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,sahara
-
-# Functions
-# ---------
-
-# create_sahara_accounts() - Set up common required sahara accounts
-#
-# Tenant User Roles
-# ------------------------------
-# service sahara admin
-function create_sahara_accounts {
-
- create_service_user "sahara"
-
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
- # TODO: remove "data_processing" service when #1356053 will be fixed
- local sahara_service_old=$(openstack service create \
- "data_processing" \
- --name "sahara" \
- --description "Sahara Data Processing" \
- -f value -c id
- )
- local sahara_service_new=$(openstack service create \
- "data-processing" \
- --name "sahara" \
- --description "Sahara Data Processing" \
- -f value -c id
- )
- get_or_create_endpoint $sahara_service_old \
- "$REGION_NAME" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
- get_or_create_endpoint $sahara_service_new \
- "$REGION_NAME" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
- "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
- fi
-}
-
-# cleanup_sahara() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_sahara {
-
- # Cleanup auth cache dir
- sudo rm -rf $SAHARA_AUTH_CACHE_DIR
-}
-
-# configure_sahara() - Set config files, create data dirs, etc
-function configure_sahara {
- sudo install -d -o $STACK_USER $SAHARA_CONF_DIR
-
- if [[ -f $SAHARA_DIR/etc/sahara/policy.json ]]; then
- cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR
- fi
-
- # Create auth cache dir
- sudo install -d -o $STACK_USER -m 700 $SAHARA_AUTH_CACHE_DIR
- rm -rf $SAHARA_AUTH_CACHE_DIR/*
-
- configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR
-
- iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT
-
- # Set configuration to send notifications
-
- if is_service_enabled ceilometer; then
- iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true"
- iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging"
- fi
-
- iniset $SAHARA_CONF_FILE DEFAULT verbose True
- iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
- iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS
-
- iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
-
- if is_service_enabled neutron; then
- iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
-
- if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE
- fi
- else
- iniset $SAHARA_CONF_FILE DEFAULT use_neutron false
- fi
-
- if is_service_enabled heat; then
- iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
-
- if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE
- fi
- else
- iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
- fi
-
- if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE
- fi
-
- if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE
- fi
-
- if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE
- fi
-
- if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
- iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE
- fi
-
- # Register SSL certificates if provided
- if is_ssl_enabled_service sahara; then
- ensure_certificates SAHARA
-
- iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT"
- iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY"
- fi
-
- iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
- # Format logging
- if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
- setup_colorized_logging $SAHARA_CONF_FILE DEFAULT
- fi
-
- if is_service_enabled tls-proxy; then
- # Set the service port for a proxy to take the original
- iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT
- fi
-
- recreate_database sahara
- $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
-}
-
-# install_sahara() - Collect source and prepare
-function install_sahara {
- git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
- setup_develop $SAHARA_DIR
-}
-
-# install_python_saharaclient() - Collect source and prepare
-function install_python_saharaclient {
- if use_library_from_git "python-saharaclient"; then
- git_clone_by_name "python-saharaclient"
- setup_dev_lib "python-saharaclient"
- fi
-}
-
-# sahara_register_images() - Registers images in sahara image registry
-function sahara_register_images {
- if is_service_enabled heat && [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
- # Register heat image for Fake plugin
- local fake_plugin_properties="--property _sahara_tag_0.1=True"
- fake_plugin_properties+=" --property _sahara_tag_fake=True"
- fake_plugin_properties+=" --property _sahara_username=fedora"
- openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image set $(basename "$HEAT_CFN_IMAGE_URL" ".qcow2") $fake_plugin_properties
- fi
-}
-
-# start_sahara() - Start running processes, including screen
-function start_sahara {
- local service_port=$SAHARA_SERVICE_PORT
- local service_protocol=$SAHARA_SERVICE_PROTOCOL
- if is_service_enabled tls-proxy; then
- service_port=$SAHARA_SERVICE_PORT_INT
- service_protocol="http"
- fi
-
- run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
- run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
- run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE"
-
- echo "Waiting for Sahara to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then
- die $LINENO "Sahara did not start"
- fi
-
- # Start proxies if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT &
- fi
-}
-
-# stop_sahara() - Stop running processes
-function stop_sahara {
- # Kill the Sahara screen windows
- stop_process sahara
- stop_process sahara-api
- stop_process sahara-eng
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/swift b/lib/swift
index 0cd51aa..3207fac 100644
--- a/lib/swift
+++ b/lib/swift
@@ -697,6 +697,21 @@
fi
}
+# install_ceilometermiddleware() - Collect source and prepare
+# note that this doesn't really have anything to do with ceilometer;
+# though ceilometermiddleware has ceilometer in its name as an
+# artifact of history, it is not a ceilometer specific tool. It
+# simply generates pycadf-based notifications about requests and
+# responses on the swift proxy
+function install_ceilometermiddleware {
+ if use_library_from_git "ceilometermiddleware"; then
+ git_clone_by_name "ceilometermiddleware"
+ setup_dev_lib "ceilometermiddleware"
+ else
+ pip_install_gr ceilometermiddleware
+ fi
+}
+
# start_swift() - Start running processes, including screen
function start_swift {
# (re)start memcached to make sure we have a clean memcache.
@@ -772,7 +787,7 @@
stop_process s-${type}
done
# Blast out any stragglers
- pkill -f swift-
+ pkill -f swift- || true
}
function swift_configure_tempurls {
diff --git a/lib/zaqar b/lib/zaqar
index 8d51910..891b0ea 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -128,10 +128,9 @@
configure_redis
fi
- if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- iniset $ZAQAR_CONF DEFAULT notification_driver messaging
- iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
- fi
+ iniset $ZAQAR_CONF DEFAULT notification_driver messaging
+ iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
+
iniset_rpc_backend zaqar $ZAQAR_CONF
cleanup_zaqar
diff --git a/stack.sh b/stack.sh
index 591c0dc..17cbe75 100755
--- a/stack.sh
+++ b/stack.sh
@@ -500,12 +500,8 @@
source $TOP_DIR/lib/database
source $TOP_DIR/lib/rpc_backend
-# Make sure we only have one rpc backend enabled,
-# and the specified rpc backend is available on your platform.
-check_rpc_backend
-
# Service to enable with SSL if ``USE_SSL`` is True
-SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara"
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
die $LINENO "tls-proxy and SSL are mutually exclusive"
@@ -683,6 +679,9 @@
echo_summary "Installing package prerequisites"
source $TOP_DIR/tools/install_prereqs.sh
+# Normalise USE_CONSTRAINTS
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
+
# Configure an appropriate Python environment
if [[ "$OFFLINE" != "True" ]]; then
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
@@ -1023,15 +1022,6 @@
export OS_REGION_NAME=$REGION_NAME
fi
-
-# ZeroMQ
-# ------
-if is_service_enabled zeromq; then
- echo_summary "Starting zeromq receiver"
- run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
-fi
-
-
# Horizon
# -------
diff --git a/stackrc b/stackrc
index f2aafe9..342f9bf 100644
--- a/stackrc
+++ b/stackrc
@@ -149,6 +149,12 @@
# Zero disables timeouts
GIT_TIMEOUT=${GIT_TIMEOUT:-0}
+# Constraints mode
+# - False (default) : update git projects dependencies from global-requirements.
+#
+# - True : use upper-constraints.txt to constrain versions of packages intalled
+# and do not edit projects at all.
+USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
# Repositories
# ------------
@@ -225,10 +231,6 @@
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-master}
-# data processing service
-SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
-SAHARA_BRANCH=${SAHARA_BRANCH:-master}
-
# object storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-master}
@@ -290,10 +292,6 @@
GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master}
-# python saharaclient
-GITREPO["python-saharaclient"]=${SAHARACLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
-GITBRANCH["python-saharaclient"]=${SAHARACLIENT_BRANCH:-master}
-
# python swift client library
GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
@@ -367,6 +365,10 @@
GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+# oslo.reports
+GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
+GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
+
# oslo.rootwrap
GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 1f7169c..8dc3ba3 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -35,12 +35,12 @@
ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
-ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth"
+ALL_LIBS+=" oslo.serialization django_openstack_auth"
ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
-ALL_LIBS+=" oslo.cache"
+ALL_LIBS+=" oslo.cache oslo.reports"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 0862135..3a364fe 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -43,6 +43,9 @@
'project_name': args.os_project_name,
},
}
+ if args.os_identity_api_version == '3':
+ self._cloud_data['auth']['user_domain_id'] = 'default'
+ self._cloud_data['auth']['project_domain_id'] = 'default'
if args.os_cacert:
self._cloud_data['cacert'] = args.os_cacert