Merge "Enable oslo.limit to be installed from git repo"
diff --git a/.zuul.yaml b/.zuul.yaml
index d1e356f..b5ab127 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -67,6 +67,16 @@
- controller
- nodeset:
+ name: devstack-single-node-centos-9-stream
+ nodes:
+ - name: controller
+ label: centos-9-stream
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: devstack-single-node-opensuse-15
nodes:
- name: controller
@@ -87,6 +97,16 @@
- controller
- nodeset:
+ name: devstack-single-node-debian-bullseye
+ nodes:
+ - name: controller
+ label: debian-bullseye
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-two-node
nodes:
- name: controller
@@ -524,7 +544,6 @@
c-bak: true
c-sch: true
c-vol: true
- cinder: true
# Services we don't need.
# This section is not really needed, it's for readability.
horizon: false
@@ -590,6 +609,7 @@
# Keep enabeling the services here to run with system scope
CINDER_ENFORCE_SCOPE: true
GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
- job:
name: devstack-multinode
@@ -614,15 +634,55 @@
configure_swap_size: 4096
- job:
- name: devstack-async
+ name: devstack-platform-centos-9-stream
parent: tempest-full-py3
- description: Async mode enabled
+ description: CentOS 9 Stream platform test
+ nodeset: devstack-single-node-centos-9-stream
voting: false
+ timeout: 9000
vars:
+ configure_swap_size: 4096
+
+- job:
+ name: devstack-platform-debian-bullseye
+ parent: tempest-full-py3
+ description: Debian Bullseye platform test
+ nodeset: devstack-single-node-debian-bullseye
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 4096
+ # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS
+ # for the time being.
devstack_localrc:
- DEVSTACK_PARALLEL: True
- zuul_copy_output:
- /opt/stack/async: logs
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
- job:
name: devstack-no-tls-proxy
@@ -727,7 +787,8 @@
- devstack-enforce-scope
- devstack-platform-fedora-latest
- devstack-platform-centos-8-stream
- - devstack-async
+ - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bullseye
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 6745614..dd8f21f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -642,6 +642,12 @@
VOLUME_NAME_PREFIX="volume-"
VOLUME_BACKING_FILE_SIZE=24G
+When running highly concurrent tests, the default per-project quotas
+for volumes, backups, or snapshots may be too small. These can be
+adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``,
+or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for
+each is 10.)
+
Keystone
~~~~~~~~
@@ -666,7 +672,6 @@
disable_service horizon
KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
- KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
REGION_NAME=RegionTwo
KEYSTONE_REGION_NAME=RegionOne
@@ -679,17 +684,6 @@
KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit
it in the configuration of RegionOne.
-Disabling Identity API v2
-+++++++++++++++++++++++++
-
-The Identity API v2 is deprecated as of Mitaka and it is recommended to only
-use the v3 API. It is possible to setup keystone without v2 API, by doing:
-
-::
-
- ENABLE_IDENTITY_V2=False
-
-
Glance
++++++
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 7c8d2b8..3edd708 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -24,8 +24,6 @@
======================================== ===
Plugin Name URL
======================================== ===
-inspur/venus `https://opendev.org/inspur/venus <https://opendev.org/inspur/venus>`__
-inspur/venus-dashboard `https://opendev.org/inspur/venus-dashboard <https://opendev.org/inspur/venus-dashboard>`__
openstack/aodh `https://opendev.org/openstack/aodh <https://opendev.org/openstack/aodh>`__
openstack/barbican `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
openstack/blazar `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
@@ -101,6 +99,8 @@
openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin <https://opendev.org/openstack/telemetry-tempest-plugin>`__
openstack/trove `https://opendev.org/openstack/trove <https://opendev.org/openstack/trove>`__
openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard <https://opendev.org/openstack/trove-dashboard>`__
+openstack/venus `https://opendev.org/openstack/venus <https://opendev.org/openstack/venus>`__
+openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard <https://opendev.org/openstack/venus-dashboard>`__
openstack/vitrage `https://opendev.org/openstack/vitrage <https://opendev.org/openstack/vitrage>`__
openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard <https://opendev.org/openstack/vitrage-dashboard>`__
openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin <https://opendev.org/openstack/vitrage-tempest-plugin>`__
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1284360..1a353e5 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,5 +1,4 @@
Listen %PUBLICPORT%
-Listen %ADMINPORT%
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
<Directory %KEYSTONE_BIN%>
@@ -20,20 +19,6 @@
%SSLKEYFILE%
</VirtualHost>
-<VirtualHost *:%ADMINPORT%>
- WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%M"
- ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
%SSLLISTEN%<VirtualHost *:443>
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
@@ -49,13 +34,3 @@
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
-
-Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_admin>
- SetHandler wsgi-script
- Options +ExecCGI
-
- WSGIProcessGroup keystone-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
-</Location>
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index 2f1f139..d3b9be8 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,4 +1,4 @@
-dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
+dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config
changetype: modify
replace: olcSuffix
olcSuffix: ${BASE_DN}
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 64befc5..33a55f8 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
ceph # NOPRIME
-redhat-lsb-core
+redhat-lsb-core # not:rhel9
xfsprogs
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 68e5472..7ce5a72 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,9 +1,10 @@
cryptsetup
dosfstools
-genisoimage
+genisoimage # not:rhel9
iscsi-initiator-utils
libosinfo
lvm2
sg3_utils
# Stuff for diablo volumes
sysfsutils
+xorriso # not:rhel8
diff --git a/files/rpms/nova b/files/rpms/nova
index 8ea8ccc..9522e57 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,7 +3,7 @@
dnsmasq # for q-dhcp
dnsmasq-utils # for dhcp_release
ebtables
-genisoimage # required for config_drive
+genisoimage # not:rhel9 required for config_drive
iptables
iputils
kernel-modules
@@ -13,3 +13,4 @@
rabbitmq-server # NOPRIME
sqlite
sudo
+xorriso # not:rhel8
diff --git a/files/rpms/swift b/files/rpms/swift
index 18c957c..b6009a3 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -4,4 +4,4 @@
rsync-daemon
sqlite
xfsprogs
-xinetd # not:f34
+xinetd # not:f34,rhel9
diff --git a/functions-common b/functions-common
index 11679e4..80f4355 100644
--- a/functions-common
+++ b/functions-common
@@ -85,7 +85,7 @@
if [ -f "$SSL_BUNDLE_FILE" ]; then
CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
fi
- # demo -> devstack
+ # devstack: user with the member role on demo project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack \
@@ -96,18 +96,7 @@
--os-password $ADMIN_PASSWORD \
--os-project-name demo
- # alt_demo -> devstack-alt
- $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack-alt \
- --os-region-name $REGION_NAME \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_SERVICE_URI \
- --os-username alt_demo \
- --os-password $ADMIN_PASSWORD \
- --os-project-name alt_demo
-
- # admin -> devstack-admin
+ # devstack-admin: user with the admin role on the admin project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-admin \
@@ -118,7 +107,51 @@
--os-password $ADMIN_PASSWORD \
--os-project-name admin
- # admin with a system-scoped token -> devstack-system
+ # devstack-alt: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-member: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-reader: user with the reader role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-reader: user with the reader role on demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+
+ # devstack-system-admin: user with the admin role on the system
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-system-admin \
@@ -129,6 +162,28 @@
--os-password $ADMIN_PASSWORD \
--os-system-scope all
+ # devstack-system-member: user with the member role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
+ # devstack-system-reader: user with the reader role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
cat >> $CLOUDS_YAML <<EOF
functional:
image_name: $DEFAULT_IMAGE_NAME
@@ -346,12 +401,19 @@
# - os_VENDOR
# - os_PACKAGE
function GetOSVersion {
- # We only support distros that provide a sane lsb_release
- _ensure_lsb_release
+ # CentOS Stream 9 does not provide lsb_release
+ source /etc/os-release
+ if [[ "${ID}${VERSION}" == "centos9" ]]; then
+ os_RELEASE=${VERSION_ID}
+ os_CODENAME="n/a"
+ os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+ else
+ _ensure_lsb_release
- os_RELEASE=$(lsb_release -r -s)
- os_CODENAME=$(lsb_release -c -s)
- os_VENDOR=$(lsb_release -i -s)
+ os_RELEASE=$(lsb_release -r -s)
+ os_CODENAME=$(lsb_release -c -s)
+ os_VENDOR=$(lsb_release -i -s)
+ fi
if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
os_PACKAGE="deb"
@@ -547,7 +609,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to 'required-projects' in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
git_timed clone $git_clone_flags $git_remote $git_dest
fi
@@ -559,7 +621,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to the \$PROJECTS variable in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
# '--branch' can also take tags
git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
@@ -936,6 +998,37 @@
echo $user_role_id
}
+# Gets or adds user role to system
+# Usage: get_or_add_user_system_role <role> <user> <system> [<user_domain>]
+function get_or_add_user_system_role {
+ local user_role_id
+ local domain_args
+
+ domain_args=$(_get_domain_args $4)
+
+ # Gets user role id
+ user_role_id=$(openstack role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ if [[ -z "$user_role_id" ]]; then
+ # Adds role to user and get it
+ openstack role add $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args
+ user_role_id=$(openstack role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ fi
+ echo $user_role_id
+}
+
# Gets or adds group role to project
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
diff --git a/lib/cinder b/lib/cinder
index f3e2430..76314c1 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -286,6 +286,11 @@
iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES
fi
+ # set default quotas
+ iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10}
+ iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10}
+ iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10}
+
# Avoid RPC timeouts in slow CI and test environments by doubling the
# default response timeout set by RPC clients. See bug #1873234 for more
# details and example failures.
@@ -348,7 +353,9 @@
# Format logging
setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI
- write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ if is_service_enabled c-api; then
+ write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
index d7c977e..c7ec306 100644
--- a/lib/cinder_backups/swift
+++ b/lib/cinder_backups/swift
@@ -24,6 +24,9 @@
# to use it.
iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+ if is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE
+ fi
}
# init_cinder_backup_swift: nothing to do
diff --git a/lib/databases/mysql b/lib/databases/mysql
index d4969d7..d0fa119 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -25,6 +25,8 @@
# provide a mysql.service symlink for backwards-compatibility, but
# let's not rely on that.
MYSQL_SERVICE_NAME=mariadb
+ elif [[ "$DISTRO" == "bullseye" ]]; then
+ MYSQL_SERVICE_NAME=mariadb
fi
fi
@@ -105,7 +107,7 @@
# In mariadb e.g. on Ubuntu socket plugin is used for authentication
# as root so it works only as sudo. To restore old "mysql like" behaviour,
# we need to change auth plugin for root user
- if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
fi
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 618834b..1f347f5 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -95,7 +95,6 @@
function install_database_postgresql {
echo_summary "Installing postgresql"
- deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle"
local pgpass=$HOME/.pgpass
if [[ ! -e $pgpass ]]; then
cat <<EOF > $pgpass
diff --git a/lib/glance b/lib/glance
index f18bea9..4c2755f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -288,24 +288,17 @@
function configure_glance_quotas {
- # NOTE(danms): We need to have some of the OS_ things unset in
- # order to use system scope, which is required for creating these
- # limits. This is a hack, but I dunno how else to get osc to use
- # system scope.
+ # Registered limit resources in keystone are system-specific resources.
+ # Make sure we use a system-scoped token to interact with this API.
- bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME;
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
- --region $REGION_NAME image_size_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
- --region $REGION_NAME image_stage_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit 100 --region $REGION_NAME \
- image_count_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit 100 --region $REGION_NAME \
- image_count_uploading"
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_uploading
# Tell glance to use these limits
iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True
diff --git a/lib/keystone b/lib/keystone
index 66e867c..b953972 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -9,7 +9,6 @@
# - ``tls`` file
# - ``DEST``, ``STACK_USER``
# - ``FILES``
-# - ``IDENTITY_API_VERSION``
# - ``BASE_SQL_CONN``
# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
# - ``S3_SERVICE_PORT`` (template backend only)
@@ -50,9 +49,7 @@
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
-KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini
KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
-KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin
# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values:
# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi
@@ -81,21 +78,12 @@
KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
-# Set Keystone interface configuration
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
-
# Public facing bits
KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-# Bind hosts
-KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST}
-
# Set the project for service accounts in Keystone
SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default}
SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service}
@@ -106,7 +94,6 @@
# if we are running with SSL use https protocols
if is_service_enabled tls-proxy; then
- KEYSTONE_AUTH_PROTOCOL="https"
KEYSTONE_SERVICE_PROTOCOL="https"
fi
@@ -134,6 +121,9 @@
# Cache settings
KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True}
+# Whether to create a keystone admin endpoint for legacy applications
+KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT)
+
# Functions
# ---------
@@ -154,11 +144,8 @@
sudo rm -f $(apache_site_config_for keystone)
else
stop_process "keystone"
- # TODO: remove admin at pike-2
remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
sudo rm -f $(apache_site_config_for keystone-wsgi-public)
- sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
fi
}
@@ -171,12 +158,10 @@
local keystone_certfile=""
local keystone_keyfile=""
local keystone_service_port=$KEYSTONE_SERVICE_PORT
- local keystone_auth_port=$KEYSTONE_AUTH_PORT
local venv_path=""
if is_service_enabled tls-proxy; then
keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
- keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
fi
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
@@ -185,7 +170,6 @@
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$keystone_service_port|g;
- s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%SSLLISTEN%|$keystone_ssl_listen|g;
s|%SSLENGINE%|$keystone_ssl|g;
@@ -223,22 +207,17 @@
iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_port=$KEYSTONE_AUTH_PORT
if is_service_enabled tls-proxy; then
# Set the service ports for a proxy to take the originals
service_port=$KEYSTONE_SERVICE_PORT_INT
- auth_port=$KEYSTONE_AUTH_PORT_INT
fi
- # Override the endpoints advertised by keystone (the public_endpoint and
- # admin_endpoint) so that clients use the correct endpoint. By default, the
- # keystone server uses the public_port and admin_port which isn't going to
- # work when you want to use a different port (in the case of proxy), or you
- # don't want the port (in the case of putting keystone on a path in
- # apache).
+ # Override the endpoints advertised by keystone so that clients use the correct
+ # endpoint. By default, the keystone server uses the public_port which isn't
+ # going to work when you want to use a different port (in the case of proxy),
+ # or you don't want the port (in the case of putting keystone on a path in apache).
iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
- iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
@@ -261,7 +240,6 @@
_config_keystone_apache_wsgi
else # uwsgi
write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity"
- write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin"
fi
iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
@@ -303,20 +281,28 @@
# admins admin admin admin
# nonadmins demo, alt_demo member, anotherrole demo, alt_demo
+# System User Roles
+# ------------------------------------------------------------------
+# all admin admin
+# all system_reader reader
+# all system_member member
+
# Migrated from keystone_data.sh
function create_keystone_accounts {
# The keystone bootstrapping process (performed via keystone-manage
- # bootstrap) creates an admin user, admin role, member role, and admin
+ # bootstrap) creates an admin user and an admin
# project. As a sanity check we exercise the CLI to retrieve the IDs for
# these values.
local admin_project
admin_project=$(openstack project show "admin" -f value -c id)
local admin_user
admin_user=$(openstack user show "admin" -f value -c id)
+ # These roles are also created during bootstrap but we don't need their IDs
local admin_role="admin"
local member_role="member"
+ local reader_role="reader"
async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
@@ -352,21 +338,53 @@
async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+
async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
- # alt_demo
+ # Create a user to act as a reader on project demo
+ local demo_reader
+ demo_reader=$(get_or_create_user "demo_reader" \
+ "$ADMIN_PASSWORD" "default" "demo_reader@example.com")
+
+ async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project
+
+ # Create a different project called alt_demo
local alt_demo_project
alt_demo_project=$(get_or_create_project "alt_demo" default)
+ # Create a user to act as member, admin and anotherrole on project alt_demo
local alt_demo_user
alt_demo_user=$(get_or_create_user "alt_demo" \
"$ADMIN_PASSWORD" "default" "alt_demo@example.com")
- async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
- async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+ async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project
async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+ # Create another user to act as a member on project alt_demo
+ local alt_demo_member
+ alt_demo_member=$(get_or_create_user "alt_demo_member" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com")
+ async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project
+
+ # Create another user to act as a reader on project alt_demo
+ local alt_demo_reader
+ alt_demo_reader=$(get_or_create_user "alt_demo_reader" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com")
+ async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project
+
+ # Create two users, give one the member role on the system and the other the
+ # reader role on the system. These two users model system-member and
+ # system-reader personas. The admin user already has the admin role on the
+ # system and we can re-use this user as a system-admin.
+ system_member_user=$(get_or_create_user "system_member" \
+ "$ADMIN_PASSWORD" "default" "system_member@example.com")
+ async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all"
+
+ system_reader_user=$(get_or_create_user "system_reader" \
+ "$ADMIN_PASSWORD" "default" "system_reader@example.com")
+ async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all"
+
# groups
local admin_group
admin_group=$(get_or_create_group "admins" \
@@ -381,8 +399,9 @@
async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
- async_wait ks-demo-{member,admin,another,invis}
- async_wait ks-alt-{member,admin,another}
+ async_wait ks-demo-{member,admin,another,invis,reader}
+ async_wait ks-alt-{admin,another,member-user,reader-user}
+ async_wait ks-system-{member,reader}
async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
if is_service_enabled ldap; then
@@ -518,7 +537,7 @@
function start_keystone {
# Get right service port for testing
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
+ local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$KEYSTONE_SERVICE_PORT_INT
auth_protocol="http"
@@ -537,7 +556,7 @@
# unencryted traffic at this point.
# If running in Apache, use the path rather than port.
- local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/
+ local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/
if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
die $LINENO "keystone did not start"
@@ -546,7 +565,6 @@
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
- start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
fi
# (re)start memcached to make sure we have a clean memcache.
@@ -567,11 +585,8 @@
# This function uses the following GLOBAL variables:
# - ``KEYSTONE_BIN_DIR``
# - ``ADMIN_PASSWORD``
-# - ``IDENTITY_API_VERSION``
# - ``REGION_NAME``
-# - ``KEYSTONE_SERVICE_PROTOCOL``
-# - ``KEYSTONE_SERVICE_HOST``
-# - ``KEYSTONE_SERVICE_PORT``
+# - ``KEYSTONE_SERVICE_URI``
function bootstrap_keystone {
$KEYSTONE_BIN_DIR/keystone-manage bootstrap \
--bootstrap-username admin \
@@ -580,8 +595,16 @@
--bootstrap-role-name admin \
--bootstrap-service-name keystone \
--bootstrap-region-id "$REGION_NAME" \
- --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \
--bootstrap-public-url "$KEYSTONE_SERVICE_URI"
+ if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then
+ openstack endpoint create --region "$REGION_NAME" \
+ --os-username admin \
+ --os-user-domain-id default \
+ --os-password "$ADMIN_PASSWORD" \
+ --os-project-name admin \
+ --os-project-domain-id default \
+ keystone admin "$KEYSTONE_SERVICE_URI"
+ fi
}
# create_ldap_domain() - Create domain file and initialize domain with a user
diff --git a/lib/ldap b/lib/ldap
index 5a53d0e..ea5faa1 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -33,14 +33,17 @@
if is_ubuntu; then
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=mdb
LDAP_ROOTPW_COMMAND=replace
elif is_fedora; then
LDAP_OLCDB_NUMBER=2
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
elif is_suse; then
# SUSE has slappasswd in /usr/sbin/
PATH=$PATH:/usr/sbin/
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
LDAP_SERVICE_NAME=ldap
fi
@@ -56,6 +59,7 @@
local slappass=$2
sed -e "
s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE|
s|\${SLAPPASS}|$slappass|
s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
s|\${BASE_DC}|$LDAP_BASE_DC|
@@ -157,7 +161,7 @@
slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION
slapd slapd/domain string Users
slapd shared/organization string $LDAP_DOMAIN
- slapd slapd/backend string HDB
+ slapd slapd/backend string ${LDAP_OLCDB_TYPE^^}
slapd slapd/purge_database boolean true
slapd slapd/move_old_database boolean true
slapd slapd/allow_ldap_v2 boolean false
diff --git a/lib/neutron b/lib/neutron
index 885df97..15d548e 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -37,6 +37,11 @@
NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
NEUTRON_DIR=$DEST/neutron
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
# Distributed Virtual Router (DVR) configuration
# Can be:
@@ -232,6 +237,7 @@
if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
neutron_ml2_extension_driver_add port_security
fi
+ configure_rbac_policies
fi
# Neutron OVS or LB agent
@@ -612,6 +618,19 @@
fi
}
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
+}
+
+
function configure_neutron_nova {
if is_neutron_legacy_enabled; then
# Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 3196849..b906a1b 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -90,6 +90,11 @@
NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
# Agent binaries. Note, binary paths for other agents are set in per-service
# scripts in lib/neutron_plugins/services/
AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -275,6 +280,12 @@
# L3 Service functions
source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
+
# Use security group or not
if has_neutron_plugin_security_group; then
Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
@@ -369,6 +380,21 @@
configure_ovn_plugin
fi
+ # Configure Neutron's advanced services
+ if is_service_enabled q-placement neutron-placement; then
+ configure_placement_extension
+ fi
+ if is_service_enabled q-trunk neutron-trunk; then
+ configure_trunk_extension
+ fi
+ if is_service_enabled q-qos neutron-qos; then
+ configure_qos
+ if is_service_enabled q-l3 neutron-l3; then
+ configure_l3_agent_extension_fip_qos
+ configure_l3_agent_extension_gateway_ip_qos
+ fi
+ fi
+
iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
# devstack is not a tool for running uber scale OpenStack
# clouds, therefore running without a dedicated RPC worker
@@ -468,6 +494,19 @@
if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
+ configure_rbac_policies
+}
+
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
}
# Start running OVN processes
@@ -543,11 +582,7 @@
function start_mutnauq_other_agents {
run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
- if is_service_enabled neutron-vpnaas; then
- : # Started by plugin
- else
- run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
- fi
+ run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
@@ -1022,6 +1057,15 @@
test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
}
+function plugin_agent_add_l2_agent_extension {
+ local l2_agent_extension=$1
+ if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+ L2_AGENT_EXTENSIONS=$l2_agent_extension
+ elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+ L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
+ fi
+}
+
# Restore xtrace
$_XTRACE_NEUTRON
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index e1f868f..f00feac 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -156,5 +156,9 @@
return 0
}
+function configure_qos_ml2 {
+ neutron_ml2_extension_driver_add "qos"
+}
+
# Restore xtrace
$_XTRACE_NEUTRON_ML2
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 1f737fb..3fc3828 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -119,7 +119,13 @@
OVS_DATADIR=$DATA_DIR/ovs
OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
-OVN_DATADIR=$DATA_DIR/ovn
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ OVN_DATADIR=$DATA_DIR/ovn
+else
+ # When using OVN from packages, the data dir for OVN DBs is
+ # /var/lib/ovn
+ OVN_DATADIR=/var/lib/ovn
+fi
OVN_SHAREDIR=$OVS_PREFIX/share/ovn
OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts
OVN_RUNDIR=$OVS_PREFIX/var/run/ovn
@@ -561,14 +567,19 @@
# create new ones on each devstack run.
_disable_libvirt_apparmor
+ local mkdir_cmd="mkdir -p ${OVN_DATADIR}"
- mkdir -p $OVN_DATADIR
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then
+ mkdir_cmd="sudo ${mkdir_cmd}"
+ fi
+
+ $mkdir_cmd
mkdir -p $OVS_DATADIR
rm -f $OVS_DATADIR/*.db
rm -f $OVS_DATADIR/.*.db.~lock~
- rm -f $OVN_DATADIR/*.db
- rm -f $OVN_DATADIR/.*.db.~lock~
+ sudo rm -f $OVN_DATADIR/*.db
+ sudo rm -f $OVN_DATADIR/.*.db.~lock~
}
function _start_ovs {
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 294171f..08951d1 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -211,5 +211,5 @@
# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module
function load_conntrack_gre_module {
- sudo modprobe nf_conntrack_proto_gre
+ load_module nf_conntrack_proto_gre False
}
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 98b96ac..ccb5398 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -100,6 +100,11 @@
SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26}
SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
+NEUTRON_ADMIN_CLOUD_NAME="devstack-admin"
+if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin"
+fi
+
default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
@@ -151,6 +156,10 @@
project_id=$(openstack project list | grep " demo " | get_field 1)
die_if_not_set $LINENO project_id "Failure retrieving project_id for demo"
+ local admin_project_id
+ admin_project_id=$(openstack project list | grep " admin " | get_field 1)
+ die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin"
+
# Allow drivers that need to create an initial network to do so here
if type -p neutron_plugin_create_initial_network_profile > /dev/null; then
neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK
@@ -159,10 +168,10 @@
if is_networking_extension_supported "auto-allocated-topology"; then
if [[ "$USE_SUBNETPOOL" == "True" ]]; then
if [[ "$IP_VERSION" =~ 4.* ]]; then
- SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id)
+ SUBNETPOOL_V4_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id)
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
- SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id)
+ SUBNETPOOL_V6_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --project "$admin_project_id" --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id)
fi
fi
fi
@@ -170,14 +179,14 @@
if is_provider_network; then
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
if [[ "$IP_VERSION" =~ 4.* ]]; then
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
+ SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
fi
@@ -187,7 +196,7 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
fi
- IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
+ IPV6_SUBNET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
fi
@@ -197,7 +206,7 @@
sudo ip link set $PUBLIC_INTERFACE up
fi
else
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -215,11 +224,11 @@
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router create $Q_ROUTER_NAME --project $admin_project_id | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
fi
@@ -229,9 +238,9 @@
fi
# Create an external network, and a subnet. Configure the external network as router gw
if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} --project $admin_project_id | grep ' id ' | get_field 2)
else
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --project $admin_project_id | grep ' id ' | get_field 2)
fi
die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
@@ -258,11 +267,12 @@
if [[ -n "$NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $NETWORK_GATEWAY "
fi
+
subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
local subnet_id
- subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
echo $subnet_id
}
@@ -285,14 +295,17 @@
subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} "
subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
local ipv6_subnet_id
- ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ ipv6_subnet_id=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
echo $ipv6_subnet_id
}
# Create public IPv4 subnet
function _neutron_create_public_subnet_v4 {
- local subnet_params="--ip-version 4 "
+ local admin_project_id
+ admin_project_id=$(openstack project list | grep " admin " | get_field 1)
+ die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin"
+ local subnet_params="--ip-version 4 --project $admin_project_id"
subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
@@ -300,26 +313,29 @@
subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp "
subnet_params+="$PUBLIC_SUBNET_NAME"
local id_and_ext_gw_ip
- id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
echo $id_and_ext_gw_ip
}
# Create public IPv6 subnet
function _neutron_create_public_subnet_v6 {
- local subnet_params="--ip-version 6 "
+ local admin_project_id
+ admin_project_id=$(openstack project list | grep " admin " | get_field 1)
+ die_if_not_set $LINENO admin_project_id "Failure retrieving project_id for admin"
+ local subnet_params="--ip-version 6 --project $admin_project_id "
subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp "
subnet_params+="$IPV6_PUBLIC_SUBNET_NAME"
local ipv6_id_and_ext_gw_ip
- ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ ipv6_id_and_ext_gw_ip=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
echo $ipv6_id_and_ext_gw_ip
}
# Configure neutron router for IPv4 public access
function _neutron_configure_router_v4 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
# Create a public subnet on the external network
local id_and_ext_gw_ip
id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -327,7 +343,7 @@
ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
# Configure the external network as the default router gateway
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
# This logic is specific to using OVN or the l3-agent for layer 3
if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
@@ -354,7 +370,7 @@
sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
sudo ip link set $ext_gw_interface up
fi
- ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
+ ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
fi
_neutron_set_router_id
@@ -363,7 +379,7 @@
# Configure neutron router for IPv6 public access
function _neutron_configure_router_v6 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
# Create a public subnet on the external network
local ipv6_id_and_ext_gw_ip
ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -375,7 +391,7 @@
# If the external network has not already been set as the default router
# gateway when configuring an IPv4 public subnet, do so now
if [[ "$IP_VERSION" == "6" ]]; then
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
fi
# This logic is specific to using OVN or the l3-agent for layer 3
@@ -396,7 +412,7 @@
sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Configure and enable public bridge
# Override global IPV6_ROUTER_GW_IP with the true value from neutron
- IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
+ IPV6_ROUTER_GW_IP=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
if is_neutron_ovs_base_plugin; then
@@ -424,6 +440,15 @@
function is_networking_extension_supported {
local extension=$1
# TODO(sc68cal) cache this instead of calling every time
- EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
+ EXT_LIST=$(openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" extension list --network -c Alias -f value)
[[ $EXT_LIST =~ $extension ]] && return 0
}
+
+function plugin_agent_add_l3_agent_extension {
+ local l3_agent_extension=$1
+ if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then
+ L3_AGENT_EXTENSIONS=$l3_agent_extension
+ elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then
+ L3_AGENT_EXTENSIONS+=",$l3_agent_extension"
+ fi
+}
diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement
new file mode 100644
index 0000000..3ec185b
--- /dev/null
+++ b/lib/neutron_plugins/services/placement
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+function configure_placement_service_plugin {
+ neutron_service_plugin_class_add "placement"
+}
+
+function configure_placement_neutron {
+ iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE"
+ iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME"
+ iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD"
+ iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME"
+ iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement region_name "$REGION_NAME"
+}
+
+function configure_placement_extension {
+ configure_placement_service_plugin
+ configure_placement_neutron
+}
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
new file mode 100644
index 0000000..af9eb3d
--- /dev/null
+++ b/lib/neutron_plugins/services/qos
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+function configure_qos_service_plugin {
+ neutron_service_plugin_class_add "qos"
+}
+
+
+function configure_qos_core_plugin {
+ configure_qos_$NEUTRON_CORE_PLUGIN
+}
+
+
+function configure_qos_l2_agent {
+ plugin_agent_add_l2_agent_extension "qos"
+}
+
+
+function configure_qos {
+ configure_qos_service_plugin
+ configure_qos_core_plugin
+ configure_qos_l2_agent
+}
+
+function configure_l3_agent_extension_fip_qos {
+ plugin_agent_add_l3_agent_extension "fip_qos"
+}
+
+function configure_l3_agent_extension_gateway_ip_qos {
+ plugin_agent_add_l3_agent_extension "gateway_ip_qos"
+}
diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk
new file mode 100644
index 0000000..8e0f694
--- /dev/null
+++ b/lib/neutron_plugins/services/trunk
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+function configure_trunk_extension {
+ neutron_service_plugin_class_add "trunk"
+}
diff --git a/lib/nova b/lib/nova
index f4f4797..1420183 100644
--- a/lib/nova
+++ b/lib/nova
@@ -260,7 +260,8 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
- LIBVIRT_CPU_MODE=none
+ LIBVIRT_CPU_MODE=custom
+ LIBVIRT_CPU_MODEL=Nehalem
if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
@@ -298,6 +299,9 @@
fi
fi
+ # Ensure each compute host uses a unique iSCSI initiator
+ echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
+
if [[ ${ISCSID_DEBUG} == "True" ]]; then
# Install an override that starts iscsid with debugging
# enabled.
@@ -311,6 +315,10 @@
sudo systemctl daemon-reload
fi
+ # set chap algorithms. The default chap_algorithm is md5 which will
+ # not work under FIPS
+ iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
+
# ensure that iscsid is started, even when disabled by default
restart_service iscsid
fi
@@ -475,7 +483,8 @@
fi
# nova defaults to genisoimage but only mkisofs is available for 15.0+
- if is_suse; then
+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately
+ if is_suse || is_fedora; then
iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
fi
@@ -484,8 +493,13 @@
iniset $NOVA_CONF upgrade_levels compute "auto"
- write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
- write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ if is_service_enabled n-api; then
+ write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+ fi
+
+ if is_service_enabled n-api-meta; then
+ write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ fi
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -827,7 +841,7 @@
NOVNC_WEB_DIR=/usr/share/novnc
install_package novnc
else
- NOVNC_WEB_DIR=$DEST/noVNC
+ NOVNC_WEB_DIR=$DEST/novnc
git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
fi
fi
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 321775d..c1cd132 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,6 +40,9 @@
configure_libvirt
iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
+ if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then
+ iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL"
+ fi
# Do not enable USB tablet input devices to avoid QEMU CPU overhead.
iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
diff --git a/lib/swift b/lib/swift
index 9885241..9c13701 100644
--- a/lib/swift
+++ b/lib/swift
@@ -430,7 +430,7 @@
swift_pipeline+=" authtoken"
if is_service_enabled s3api;then
swift_pipeline+=" s3token"
- iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3}
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
fi
swift_pipeline+=" keystoneauth"
@@ -521,7 +521,7 @@
local auth_vers
auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
- if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then
+ if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then
iniset ${testfile} func_test auth_port 443
else
iniset ${testfile} func_test auth_port 80
@@ -866,12 +866,15 @@
function swift_configure_tempurls {
# note we are using swift credentials!
- OS_USERNAME=swift \
- OS_PASSWORD=$SERVICE_PASSWORD \
- OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \
- OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- openstack object store account \
+ openstack --os-cloud "" \
+ --os-region-name $REGION_NAME \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username=swift \
+ --os-password=$SERVICE_PASSWORD \
+ --os-user-domain-name=$SERVICE_DOMAIN_NAME \
+ --os-project-name=$SERVICE_PROJECT_NAME \
+ --os-project-domain-name=$SERVICE_DOMAIN_NAME \
+ object store account \
set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
}
diff --git a/lib/tempest b/lib/tempest
index 8fd54c5..bdbd3ca 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -90,6 +90,10 @@
# it will run tempest with
TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
+NEUTRON_ADMIN_CLOUD_NAME="devstack-admin"
+if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ NEUTRON_ADMIN_CLOUD_NAME="devstack-system-admin"
+fi
# Functions
# ---------
@@ -115,7 +119,7 @@
local tmp_c
tmp_c=$1
if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
- (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+ (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > $tmp_c
else
echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -287,8 +291,8 @@
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME)
# make sure shared network presence does not confuses the tempest tests
- openstack --os-cloud devstack-admin network create --share shared
- openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" network create --share shared --project "$admin_project_id"
+ openstack --os-cloud "$NEUTRON_ADMIN_CLOUD_NAME" --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet --project "$admin_project_id"
fi
iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -443,6 +447,8 @@
iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
+ iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE"
+
# Scenario
SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
diff --git a/openrc b/openrc
index beeaebe..6d488bb 100644
--- a/openrc
+++ b/openrc
@@ -74,7 +74,7 @@
fi
# Identity API version
-export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
+export OS_IDENTITY_API_VERSION=3
# Ask keystoneauth1 to use keystone
export OS_AUTH_TYPE=password
diff --git a/stack.sh b/stack.sh
index 48f61fb..a10e6ef 100755
--- a/stack.sh
+++ b/stack.sh
@@ -227,7 +227,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8"
+SUPPORTED_DISTROS="bullseye|focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -300,13 +300,17 @@
}
function _install_rdo {
- if [[ "$TARGET_BRANCH" == "master" ]]; then
- # rdo-release.el8.rpm points to latest RDO release, use that for master
- sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
- else
- # For stable branches use corresponding release rpm
- rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
- sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ if [[ $DISTRO == "rhel8" ]]; then
+ if [[ "$TARGET_BRANCH" == "master" ]]; then
+ # rdo-release.el8.rpm points to latest RDO release, use that for master
+ sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ else
+ # For stable branches use corresponding release rpm
+ rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
+ sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ fi
+ elif [[ $DISTRO == "rhel9" ]]; then
+ sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo
fi
sudo dnf -y update
}
@@ -385,6 +389,10 @@
# RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
# Patch: https://github.com/rpm-software-management/dnf/pull/1448
echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
+elif [[ $DISTRO == "rhel9" ]]; then
+ sudo dnf config-manager --set-enabled crb
+ # rabbitmq and other packages are provided by RDO repositories.
+ _install_rdo
fi
# Ensure python is installed
@@ -876,7 +884,7 @@
install_keystonemiddleware
if is_service_enabled keystone; then
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
stack_install_service keystone
configure_keystone
fi
@@ -1063,37 +1071,18 @@
# Keystone
# --------
-# Rather than just export these, we write them out to a
-# intermediate userrc file that can also be used to debug if
-# something goes wrong between here and running
-# tools/create_userrc.sh (this script relies on services other
-# than keystone being available, so we can't call it right now)
-cat > $TOP_DIR/userrc_early <<EOF
-# Use this for debugging issues before files in accrc are created
-
-# Set up password auth credentials now that Keystone is bootstrapped
-export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
-export OS_USERNAME=admin
-export OS_USER_DOMAIN_ID=default
-export OS_PASSWORD=$ADMIN_PASSWORD
-export OS_PROJECT_NAME=admin
-export OS_PROJECT_DOMAIN_ID=default
-export OS_REGION_NAME=$KEYSTONE_REGION_NAME
-
-EOF
-
if is_service_enabled tls-proxy; then
- echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
fi
-source $TOP_DIR/userrc_early
+# Write a clouds.yaml file and use the devstack-admin cloud
+write_clouds_yaml
+export OS_CLOUD=${OS_CLOUD:-devstack-admin}
if is_service_enabled keystone; then
echo_summary "Starting Keystone"
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
init_keystone
start_keystone
bootstrap_keystone
@@ -1118,9 +1107,6 @@
fi
-# Write a clouds.yaml file
-write_clouds_yaml
-
# Horizon
# -------
@@ -1380,7 +1366,7 @@
# which is helpful in image bundle steps.
if is_service_enabled nova && is_service_enabled keystone; then
- USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+ USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD"
if [ -f $SSL_BUNDLE_FILE ]; then
USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
diff --git a/stackrc b/stackrc
index e0d71df..681e9de 100755
--- a/stackrc
+++ b/stackrc
@@ -175,21 +175,9 @@
export PS4='+ $(short_source): '
fi
-# Configure Identity API version: 2.0, 3
-IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
-
-# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack
-# deployment will be deploying the Identity v2 pipelines. If this option is set
-# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to
-# skip Identity v2 specific tests; and iii) configure Horizon to use Identity
-# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION
-# will to be set to ``3`` in order to make DevStack register the Identity
-# endpoint as v3. This flag is experimental and will be used as basis to
-# identify the projects which still have issues to operate with Identity v3.
-ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
-if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
- IDENTITY_API_VERSION=3
-fi
+# Configure Identity API version
+# TODO(frickler): Drop this when plugins no longer need it
+IDENTITY_API_VERSION=3
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
@@ -606,8 +594,8 @@
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -627,7 +615,8 @@
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
- LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none}
+ LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom}
+ LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 8a2c337..fe5dafa 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -26,39 +26,6 @@
FILES=$TOP_DIR/files
fi
-# Keystone Port Reservation
-# -------------------------
-# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
-# being used as ephemeral ports by the system. The default(s) are 35357 and
-# 35358 which are in the Linux defined ephemeral port range (in disagreement
-# with the IANA ephemeral port range). This is a workaround for bug #1253482
-# where Keystone will try and bind to the port and the port will already be
-# in use as an ephemeral port by another process. This places an explicit
-# exception into the Kernel for the Keystone AUTH ports.
-function fixup_keystone {
- keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-
- # Only do the reserved ports when available, on some system (like containers)
- # where it's not exposed we are almost pretty sure these ports would be
- # exclusive for our DevStack.
- if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
- # Get any currently reserved ports, strip off leading whitespace
- reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
-
- if [[ -z "${reserved_ports}" ]]; then
- # If there are no currently reserved ports, reserve the keystone ports
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
- else
- # If there are currently reserved ports, keep those and also reserve the
- # Keystone specific ports. Duplicate reservations are merged into a single
- # reservation (or range) automatically by the kernel.
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
- fi
- else
- echo_summary "WARNING: unable to reserve keystone ports"
- fi
-}
-
# Python Packages
# ---------------
@@ -106,6 +73,16 @@
# overwriting works. So this hacks around those packages that
# have been dragged in by some other system dependency
sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info
+
+ # After updating setuptools based on the requirements, the files from the
+ # python3-setuptools RPM are deleted, it breaks some tools such as semanage
+ # (used in diskimage-builder) that use the -s flag of the python
+ # interpreter, enforcing the use of the packages from /usr/lib.
+ # Importing setuptools/pkg_resources in a such environment fails.
+ # Enforce the package re-installation to fix those applications.
+ if is_package_installed python3-setuptools; then
+ sudo dnf reinstall -y python3-setuptools
+ fi
}
function fixup_suse {
@@ -167,10 +144,11 @@
# overwriting works. So this hacks around those packages that
# have been dragged in by some other system dependency
sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
}
function fixup_all {
- fixup_keystone
fixup_ubuntu
fixup_fedora
fixup_suse
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index a80c178..259375a 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -118,7 +118,7 @@
configure_pypi_alternative_url
fi
-if is_fedora && [[ ${DISTRO} == f* ]]; then
+if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then
# get-pip.py will not install over the python3-pip package in
# Fedora 34 any more.
# https://bugzilla.redhat.com/show_bug.cgi?id=1988935
@@ -128,7 +128,7 @@
# if python3-pip is later installed.
# For general sanity, we just use the packaged pip. It should be
# recent enough anyway. This is included via rpms/general
- continue
+ : # Simply fall through
else
install_get_pip
fi
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index e91464f..0212d00 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -27,7 +27,7 @@
}
CN=$1
-if [ -z "$CN" ]]; then
+if [ -z "$CN" ]; then
usage
fi
ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME}
@@ -52,5 +52,5 @@
make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME
# Create a cert bundle
-cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
-
+cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
+ $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 7be995e..74dcdb2 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -65,7 +65,7 @@
def _read_clouds(self):
try:
with open(self._clouds_path) as clouds_file:
- self._clouds = yaml.load(clouds_file)
+ self._clouds = yaml.safe_load(clouds_file)
except IOError:
# The user doesn't have a clouds.yaml file.
print("The user clouds.yaml file didn't exist.")