Merge "Move policy.json creation to _configure_neutron_common"
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 05a8d95..90b7d44 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -28,8 +28,10 @@
local.conf
==========
-The new configuration file is ``local.conf`` and resides in the root
-DevStack directory like the old ``localrc`` file. It is a modified INI
+The new configuration file is ``local.conf`` and should reside in the
+root Devstack directory. An example of such ``local.conf`` file
+is provided in the ``devstack/samples`` directory. Copy this file into
+the root Devstack directory and adapt it to your needs. It is a modified INI
format file that introduces a meta-section header to carry additional
information regarding the configuration files to be changed.
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 557b522..1530a84 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -175,12 +175,12 @@
SERVICE_TOKEN=xyzpdqlazydog
DATABASE_TYPE=mysql
SERVICE_HOST=192.168.42.11
- MYSQL_HOST=192.168.42.11
- RABBIT_HOST=192.168.42.11
- GLANCE_HOSTPORT=192.168.42.11:9292
+ MYSQL_HOST=$SERVICE_HOST
+ RABBIT_HOST=$SERVICE_HOST
+ GLANCE_HOSTPORT=$SERVICE_HOST:9292
ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol
NOVA_VNC_ENABLED=True
- NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html"
+ NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index 6883898..bfd7567 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,5 +1,5 @@
<VirtualHost *:80>
- WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
+ WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP}
WSGIApplicationGroup %{GLOBAL}
@@ -8,7 +8,10 @@
WSGIProcessGroup horizon
DocumentRoot %HORIZON_DIR%/.blackhole/
- Alias /media %HORIZON_DIR%/openstack_dashboard/static
+ Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static
+ Alias %WEBROOT%/static %HORIZON_DIR%/static
+
+ RedirectMatch "^/$" "%WEBROOT%/"
<Directory />
Options FollowSymLinks
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 5d5052a..ffc947a 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -5,3 +5,4 @@
sysfsutils
sg3-utils
python-guestfs # NOPRIME
+cryptsetup
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index 7040b84..b3a468d 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -4,3 +4,4 @@
open-iscsi
sysfsutils
sg3_utils
+cryptsetup
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index c1a8e8f..81278b3 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -4,4 +4,4 @@
genisoimage
sysfsutils
sg3_utils
-
+cryptsetup
diff --git a/functions b/functions
index 5bbe18f..4001e9d 100644
--- a/functions
+++ b/functions
@@ -10,6 +10,10 @@
# - ``GLANCE_HOSTPORT``
#
+# ensure we don't re-source this in the same environment
+[[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0
+declare -r _DEVSTACK_FUNCTIONS=1
+
# Include the common functions
FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
source ${FUNC_DIR}/functions-common
diff --git a/functions-common b/functions-common
index 60cf04c..f6a5253 100644
--- a/functions-common
+++ b/functions-common
@@ -28,7 +28,6 @@
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
# - ``TRACK_DEPENDS``
-# - ``UNDO_REQUIREMENTS``
# - ``http_proxy``, ``https_proxy``, ``no_proxy``
#
@@ -36,6 +35,10 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+# ensure we don't re-source this in the same environment
+[[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0
+declare -r _DEVSTACK_FUNCTIONS_COMMON=1
+
# Global Config Variables
declare -A GITREPO
declare -A GITBRANCH
@@ -852,13 +855,18 @@
# Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
function _get_or_create_endpoint_with_interface {
local endpoint_id
+ # TODO(dgonzalez): The check of the region name, as done in the grep
+ # statement below, exists only because keystone does currently
+ # not allow filtering the region name when listing endpoints. If keystone
+ # gets support for this, the check for the region name can be removed.
+ # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772
endpoint_id=$(openstack endpoint list \
--os-url $KEYSTONE_SERVICE_URI_V3 \
--os-identity-api-version=3 \
--service $1 \
--interface $2 \
--region $4 \
- -c ID -f value)
+ -c ID -c Region -f value | grep $4 | cut -f 1 -d " ")
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
endpoint_id=$(openstack endpoint create \
diff --git a/inc/python b/inc/python
index 54e19a7..5c9dc5c 100644
--- a/inc/python
+++ b/inc/python
@@ -67,7 +67,6 @@
# Wrapper for ``pip install`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
-# ``USE_CONSTRAINTS``
# pip_install package [package ...]
function pip_install {
local xtrace=$(set +o | grep xtrace)
@@ -105,11 +104,8 @@
fi
cmd_pip="$cmd_pip install"
-
- # Handle a constraints file, if needed.
- if [[ "$USE_CONSTRAINTS" == "True" ]]; then
- cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
- fi
+ # Always apply constraints
+ cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
local pip_version=$(python -c "import pip; \
print(pip.__version__.strip('.')[0])")
@@ -187,13 +183,13 @@
# use this, especially *oslo* ones
function setup_install {
local project_dir=$1
- setup_package_with_req_sync $project_dir
+ setup_package_with_constraints_edit $project_dir
}
# this should be used for projects which run services, like all services
function setup_develop {
local project_dir=$1
- setup_package_with_req_sync $project_dir -e
+ setup_package_with_constraints_edit $project_dir -e
}
# determine if a project as specified by directory is in
@@ -209,32 +205,16 @@
# ``pip install -e`` the package, which processes the dependencies
# using pip before running `setup.py develop`
#
-# Updates the dependencies in project_dir from the
-# openstack/requirements global list before installing anything.
+# Updates the constraints from REQUIREMENTS_DIR to reflect the
+# future installed state of this package. This ensures when we
+# install this package we get the from source version.
#
-# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
+# Uses globals ``REQUIREMENTS_DIR``
# setup_develop directory
-function setup_package_with_req_sync {
+function setup_package_with_constraints_edit {
local project_dir=$1
local flags=$2
- # Don't update repo if local changes exist
- # Don't use buggy "git diff --quiet"
- # ``errexit`` requires us to trap the exit code when the repo is changed
- local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
-
- if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then
- if is_in_projects_txt $project_dir; then
- (cd $REQUIREMENTS_DIR; \
- ./.venv/bin/python update.py $project_dir)
- else
- # soft update projects not found in requirements project.txt
- echo "$project_dir not a constrained repository, soft enforcing requirements"
- (cd $REQUIREMENTS_DIR; \
- ./.venv/bin/python update.py -s $project_dir)
- fi
- fi
-
if [ -n "$REQUIREMENTS_DIR" ]; then
# Constrain this package to this project directory from here on out.
local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
@@ -245,19 +225,6 @@
setup_package $project_dir $flags
- # We've just gone and possibly modified the user's source tree in an
- # automated way, which is considered bad form if it's a development
- # tree because we've screwed up their next git checkin. So undo it.
- #
- # However... there are some circumstances, like running in the gate
- # where we really really want the overridden version to stick. So provide
- # a variable that tells us whether or not we should UNDO the requirements
- # changes (this will be set to False in the OpenStack ci gate)
- if [ $UNDO_REQUIREMENTS = "True" ]; then
- if [[ $update_requirements != "changed" ]]; then
- (cd $project_dir && git reset --hard)
- fi
- fi
}
# ``pip install -e`` the package, which processes the dependencies
diff --git a/lib/apache b/lib/apache
index c7d69f2..a8e9bc5 100644
--- a/lib/apache
+++ b/lib/apache
@@ -11,7 +11,6 @@
# lib/apache exports the following functions:
#
# - install_apache_wsgi
-# - config_apache_wsgi
# - apache_site_config_for
# - enable_apache_site
# - disable_apache_site
diff --git a/lib/ceilometer b/lib/ceilometer
index ce93ebd..3df75b7 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -211,7 +211,6 @@
cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR
diff --git a/lib/databases/mysql b/lib/databases/mysql
index fb55b60..7ae9a93 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -85,12 +85,12 @@
sudo mysqladmin -u root password $DATABASE_PASSWORD || true
fi
- # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases:
+ # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
# Now update ``my.cnf`` for some local needs and restart the mysql service
- # Change ‘bind-address’ from localhost (127.0.0.1) to any (::) and
+ # Change bind-address from localhost (127.0.0.1) to any (::) and
# set default db type to InnoDB
sudo bash -c "source $TOP_DIR/functions && \
iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \
diff --git a/lib/glance b/lib/glance
index f200dca..b1b0f32 100644
--- a/lib/glance
+++ b/lib/glance
@@ -154,7 +154,10 @@
iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift
iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
- iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v2.0/
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id default
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id default
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
# commenting is not strictly necessary but it's confusing to have bad values in conf
inicomment $GLANCE_API_CONF glance_store swift_store_user
diff --git a/lib/horizon b/lib/horizon
index b0f306b..9fe0aa8 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -93,6 +93,9 @@
local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
cp $HORIZON_SETTINGS $local_settings
+ _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\"
+ _horizon_config_set $local_settings "" CUSTOM_THEME_PATH \"themes/webroot\"
+
_horizon_config_set $local_settings "" COMPRESS_OFFLINE True
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\"
@@ -122,6 +125,7 @@
s,%HORIZON_DIR%,$HORIZON_DIR,g;
s,%APACHE_NAME%,$APACHE_NAME,g;
s,%DEST%,$DEST,g;
+ s,%WEBROOT%,$HORIZON_APACHE_ROOT,g;
\" $FILES/apache-horizon.template >$horizon_conf"
if is_ubuntu; then
diff --git a/lib/infra b/lib/infra
index 3d68e45..89397de 100644
--- a/lib/infra
+++ b/lib/infra
@@ -22,7 +22,6 @@
# Defaults
# --------
GITDIR["pbr"]=$DEST/pbr
-REQUIREMENTS_DIR=$DEST/requirements
# Entry Points
# ------------
@@ -30,8 +29,6 @@
# install_infra() - Collect source and prepare
function install_infra {
local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
- # bring down global requirements
- git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
[ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
# We don't care about testing git pbr in the requirements venv.
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
diff --git a/lib/ironic b/lib/ironic
index 1323446..b3ad586 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -618,6 +618,7 @@
local node_id=$(ironic node-create $standalone_node_uuid\
--chassis_uuid $chassis_id \
--driver $IRONIC_DEPLOY_DRIVER \
+ --name node-$total_nodes \
-p cpus=$ironic_node_cpu\
-p memory_mb=$ironic_node_ram\
-p local_gb=$ironic_node_disk\
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 498cf46..da8c064 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -730,7 +730,9 @@
function start_neutron_other_agents {
run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
- if is_service_enabled q-vpn; then
+ if is_service_enabled neutron-vpnaas; then
+ : # Started by plugin
+ elif is_service_enabled q-vpn; then
run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
else
run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
@@ -793,7 +795,8 @@
}
# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
-# on startup, or back to the public interface on cleanup
+# on startup, or back to the public interface on cleanup. If no IP is
+# configured on the interface, just add it as a port to the OVS bridge.
function _move_neutron_addresses_route {
local from_intf=$1
local to_intf=$2
@@ -806,7 +809,8 @@
# on configure we will also add $from_intf as a port on $to_intf,
# assuming it is an OVS bridge.
- local IP_BRD=$(ip -f $af a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }')
+ local IP_ADD=""
+ local IP_DEL=""
local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
local ADD_OVS_PORT=""
@@ -826,7 +830,12 @@
ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
fi
- sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
+ if [[ "$IP_BRD" != "" ]]; then
+ IP_ADD="sudo ip addr del $IP_BRD dev $from_intf"
+ IP_DEL="sudo ip addr add $IP_BRD dev $to_intf"
+ fi
+
+ $IP_ADD; $IP_DEL; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
fi
}
@@ -834,9 +843,7 @@
# runs that a clean run would need to clean up
function cleanup_neutron {
- if [[ $(ip -f inet a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
- fi
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
_move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
@@ -966,9 +973,9 @@
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND"
+ iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+ iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
fi
_neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE
@@ -983,9 +990,9 @@
iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+ iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+ iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
fi
if ! is_service_enabled q-l3; then
@@ -1018,18 +1025,16 @@
iniset $Q_L3_CONF_FILE DEFAULT verbose True
iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+ iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+ iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
fi
_neutron_setup_interface_driver $Q_L3_CONF_FILE
neutron_plugin_configure_l3_agent
- if [[ $(ip -f inet a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
- fi
+ _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
_move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6"
@@ -1042,9 +1047,9 @@
iniset $Q_META_CONF_FILE DEFAULT verbose True
iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
- iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+ iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+ iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
fi
# Configures keystone for metadata_agent
diff --git a/lib/swift b/lib/swift
index 96d730e..fc736a6 100644
--- a/lib/swift
+++ b/lib/swift
@@ -97,7 +97,7 @@
# the beginning of the pipeline, before authentication middlewares.
SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain}
-# The ring uses a configurable number of bits from a path’s MD5 hash as
+# The ring uses a configurable number of bits from a path's MD5 hash as
# a partition index that designates a device. The number of bits kept
# from the hash is known as the partition power, and 2 to the partition
# power indicates the partition count. Partitioning the full MD5 hash
@@ -610,7 +610,7 @@
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
- local another_role=$(openstack role list | awk "/ anotherrole / { print \$2 }")
+ local another_role=$(get_or_create_role "anotherrole")
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
# temp urls, which break when uploaded by a non-admin role
diff --git a/lib/tempest b/lib/tempest
index 3a9ba81..e7f825f 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -82,6 +82,21 @@
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
+# Do we want to make a configuration where Tempest has admin on
+# the cloud. We don't always want to so that we can ensure Tempest
+# would work on a public cloud.
+TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN)
+
+# Credential provider configuration option variables
+TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN}
+TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False $TEMPEST_USE_TEST_ACCOUNTS)
+
+# The number of workers tempest is expected to be run with. This is used for
+# generating a accounts.yaml for running with test-accounts. This is also the
+# same variable that devstack-gate uses to specify the number of workers that
+# it will run tempest with
+TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
+
# Functions
# ---------
@@ -174,11 +189,6 @@
password=${ADMIN_PASSWORD:-secrete}
- # Do we want to make a configuration where Tempest has admin on
- # the cloud. We don't always want to so that we can ensure Tempest
- # would work on a public cloud.
- TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN)
-
# See ``lib/keystone`` where these users and tenants are set up
ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
@@ -313,7 +323,7 @@
fi
if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
# Only Identity v3 is available; then skip Identity API v2 tests
- iniset $TEMPEST_CONFIG identity-feature-enabled v2_api False
+ iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
# In addition, use v3 auth tokens for running all Tempest tests
iniset $TEMPEST_CONFIG identity auth_version v3
else
@@ -335,11 +345,6 @@
# Image Features
iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
- # Auth
- TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN}
- iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
- iniset $TEMPEST_CONFIG auth tempest_roles "Member"
-
# Compute
iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME
@@ -386,7 +391,7 @@
# and is now the default behavior.
iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True}
- # Network
+ # Network
iniset $TEMPEST_CONFIG network api_version 2.0
iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
@@ -468,7 +473,7 @@
if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then
# Enabled extensions are either the ones explicitly specified or those available on the API endpoint
volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")}
- # Remove disabled extensions
+ # Remove disabled extensions
volume_api_extensions=$(remove_disabled_extensions $volume_api_extensions $DISABLE_VOLUME_API_EXTENSIONS)
fi
iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions $volume_api_extensions
@@ -545,6 +550,19 @@
sudo chown $STACK_USER $BOTO_CONF
fi
+ # Auth
+ iniset $TEMPEST_CONFIG auth tempest_roles "Member"
+ if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
+ if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
+ tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
+ else:
+ tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY etc/accounts.yaml
+ fi
+ iniset $TEMPEST_CONFIG auth allow_tenant_isolation False
+ iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
+ else
+ iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
+ fi
# Restore IFS
IFS=$ifs
}
diff --git a/stack.sh b/stack.sh
index cc8bc8c..639f72b 100755
--- a/stack.sh
+++ b/stack.sh
@@ -683,14 +683,16 @@
# OpenStack uses a fair number of other projects.
+# Bring down global requirements before any use of pip_install. This is
+# necessary to ensure that the constraints file is in place before we
+# attempt to apply any constraints to pip installs.
+git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+
# Install package requirements
# Source it so the entire environment is available
echo_summary "Installing package prerequisites"
source $TOP_DIR/tools/install_prereqs.sh
-# Normalise USE_CONSTRAINTS
-USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
-
# Configure an appropriate Python environment
if [[ "$OFFLINE" != "True" ]]; then
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
@@ -1421,7 +1423,7 @@
# If you installed Horizon on this server you should be able
# to access the site using your browser.
if is_service_enabled horizon; then
- echo "Horizon is now available at http://$SERVICE_HOST/"
+ echo "Horizon is now available at http://$SERVICE_HOST$HORIZON_APACHE_ROOT"
fi
# If Keystone is present you can point ``nova`` cli to this server
diff --git a/stackrc b/stackrc
index 8beef96..156cb1f 100644
--- a/stackrc
+++ b/stackrc
@@ -87,6 +87,9 @@
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
+# Set the root URL for Horizon
+HORIZON_APACHE_ROOT="/dashboard"
+
# Whether to use 'dev mode' for screen windows. Dev mode works by
# stuffing text into the screen windows so that a developer can use
# ctrl-c, up-arrow, enter to restart the service. Starting services
@@ -149,13 +152,6 @@
# Zero disables timeouts
GIT_TIMEOUT=${GIT_TIMEOUT:-0}
-# Constraints mode
-# - False (default) : update git projects dependencies from global-requirements.
-#
-# - True : use upper-constraints.txt to constrain versions of packages intalled
-# and do not edit projects at all.
-USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS)
-
# Repositories
# ------------
@@ -163,6 +159,9 @@
# Another option is https://git.openstack.org
GIT_BASE=${GIT_BASE:-git://git.openstack.org}
+# The location of REQUIREMENTS once cloned
+REQUIREMENTS_DIR=$DEST/requirements
+
# Which libraries should we install from git instead of using released
# versions on pypi?
#
@@ -627,9 +626,6 @@
# Set default screen name
SCREEN_NAME=${SCREEN_NAME:-stack}
-# Undo requirements changes by global requirements
-UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True}
-
# Allow the use of an alternate protocol (such as https) for service endpoints
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 0f7c962..7b42c8c 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -20,7 +20,7 @@
cd $TOP_DIR
# Import common functions
-source $TOP_DIR/functions
+source $TOP_DIR/stackrc
FILES=$TOP_DIR/files