Merge "[ci] Remove the implied-branches pragma"
diff --git a/.zuul.yaml b/.zuul.yaml
index d114053..6ad7148 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -9,6 +9,16 @@
- controller
- nodeset:
+ name: openstack-single-node-jammy
+ nodes:
+ - name: controller
+ label: ubuntu-jammy
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-focal
nodes:
- name: controller
@@ -49,10 +59,10 @@
- controller
- nodeset:
- name: devstack-single-node-centos-8-stream
+ name: devstack-single-node-centos-9-stream
nodes:
- name: controller
- label: centos-8-stream
+ label: centos-9-stream
groups:
- name: tempest
nodes:
@@ -72,7 +82,17 @@
name: devstack-single-node-fedora-latest
nodes:
- name: controller
- label: fedora-34
+ label: fedora-35
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-debian-bullseye
+ nodes:
+ - name: controller
+ label: debian-bullseye
groups:
- name: tempest
nodes:
@@ -109,6 +129,36 @@
- compute1
- nodeset:
+ name: openstack-two-node-centos-9-stream
+ nodes:
+ - name: controller
+ label: centos-9-stream
+ - name: compute1
+ label: centos-9-stream
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
name: openstack-two-node-focal
nodes:
- name: controller
@@ -319,8 +369,10 @@
'{{ devstack_log_dir }}/devstacklog.txt.summary': logs
'{{ devstack_log_dir }}/tcpdump.pcap': logs
'{{ devstack_log_dir }}/worlddump-latest.txt': logs
+ '{{ devstack_log_dir }}/qemu.coredump': logs
'{{ devstack_full_log}}': logs
'{{ stage_dir }}/verify_tempest_conf.log': logs
+ '{{ stage_dir }}/performance.json': logs
'{{ stage_dir }}/apache': logs
'{{ stage_dir }}/apache_config': logs
'{{ stage_dir }}/etc': logs
@@ -339,6 +391,7 @@
'{{ stage_dir }}/rpm-qa.txt': logs
'{{ stage_dir }}/core': logs
'{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/services.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
/etc/ceph: logs
@@ -404,7 +457,7 @@
PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
devstack_services:
# Shared services
- dstat: true
+ dstat: false
etcd3: true
memory_tracker: true
mysql: true
@@ -413,7 +466,7 @@
subnode:
devstack_services:
# Shared services
- dstat: true
+ dstat: false
memory_tracker: true
devstack_localrc:
# Multinode specific settings
@@ -479,7 +532,7 @@
# Core services enabled for this branch.
# This list replaces the test-matrix.
# Shared services
- dstat: true
+ dstat: false
etcd3: true
memory_tracker: true
mysql: true
@@ -529,7 +582,7 @@
# Core services enabled for this branch.
# This list replaces the test-matrix.
# Shared services
- dstat: true
+ dstat: false
memory_tracker: true
tls-proxy: true
# Nova services
@@ -565,11 +618,13 @@
name: devstack-ipv6
parent: devstack
description: |
- Devstack single node job for integration gate with IPv6.
+ Devstack single node job for integration gate with IPv6,
+ all services and tunnels using IPv6 addresses.
vars:
devstack_localrc:
SERVICE_IP_VERSION: 6
SERVICE_HOST: ""
+ TUNNEL_IP_VERSION: 6
- job:
name: devstack-enforce-scope
@@ -578,9 +633,7 @@
This job runs the devstack with scope checks enabled.
vars:
devstack_localrc:
- # Keep enabeling the services here to run with system scope
- CINDER_ENFORCE_SCOPE: true
- GLANCE_ENFORCE_SCOPE: true
+ ENFORCE_SCOPE: true
- job:
name: devstack-multinode
@@ -595,16 +648,92 @@
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
- name: devstack-platform-centos-8-stream
+ name: devstack-platform-centos-9-stream
parent: tempest-full-py3
- description: CentOS 8 Stream platform test
- nodeset: devstack-single-node-centos-8-stream
+ description: CentOS 9 Stream platform test
+ nodeset: devstack-single-node-centos-9-stream
+ timeout: 9000
+ # TODO(kopecmartin) n-v until the following is resolved:
+ # https://bugs.launchpad.net/neutron/+bug/1979047
voting: false
+ vars:
+ configure_swap_size: 4096
+
+- job:
+ name: devstack-platform-debian-bullseye
+ parent: tempest-full-py3
+ description: Debian Bullseye platform test
+ nodeset: devstack-single-node-debian-bullseye
timeout: 9000
vars:
configure_swap_size: 4096
- job:
+ name: devstack-platform-ubuntu-jammy
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test
+ nodeset: openstack-single-node-jammy
+ timeout: 9000
+ vars:
+ configure_swap_size: 4096
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovn-source
+ parent: devstack-platform-ubuntu-jammy
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
+ voting: false
+ vars:
+ devstack_localrc:
+ OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovs
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+ nodeset: openstack-single-node-jammy
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 8192
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Horizon doesn't like py310
+ horizon: false
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+
+- job:
name: devstack-no-tls-proxy
parent: tempest-full-py3
description: |
@@ -621,6 +750,12 @@
description: Fedora latest platform test
nodeset: devstack-single-node-fedora-latest
voting: false
+ vars:
+ configure_swap_size: 4096
+ # Python 3.10 dependency issues; see
+ # https://bugs.launchpad.net/horizon/+bug/1960204
+ devstack_services:
+ horizon: false
- job:
name: devstack-platform-fedora-latest-virt-preview
@@ -629,6 +764,7 @@
nodeset: devstack-single-node-fedora-latest
voting: false
vars:
+ configure_swap_size: 4096
devstack_localrc:
ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
@@ -706,7 +842,11 @@
- devstack-ipv6
- devstack-enforce-scope
- devstack-platform-fedora-latest
- - devstack-platform-centos-8-stream
+ - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bullseye
+ - devstack-platform-ubuntu-jammy
+ - devstack-platform-ubuntu-jammy-ovn-source
+ - devstack-platform-ubuntu-jammy-ovs
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
@@ -750,6 +890,11 @@
jobs:
- devstack
- devstack-ipv6
+ # TODO(kopecmartin) n-v until the following is resolved:
+ # https://bugs.launchpad.net/neutron/+bug/1979047
+ # - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bullseye
+ - devstack-platform-ubuntu-jammy
- devstack-enforce-scope
- devstack-multinode
- devstack-unit-tests
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 6745614..757b400 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -279,7 +279,7 @@
::
- LOGDAYS=1
+ LOGDAYS=2
Some coloring is used during the DevStack runs to make it easier to
see what is going on. This can be disabled with::
@@ -521,8 +521,8 @@
can be configured with any valid IPv6 prefix. The default values make
use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
-Service Version
-~~~~~~~~~~~~~~~
+Service IP Version
+~~~~~~~~~~~~~~~~~~
DevStack can enable service operation over either IPv4 or IPv6 by
setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or
@@ -542,6 +542,27 @@
HOST_IPV6=${some_local_ipv6_address}
+Tunnel IP Version
+~~~~~~~~~~~~~~~~~
+
+DevStack can enable tunnel operation over either IPv4 or IPv6 by
+setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or
+``TUNNEL_IP_VERSION=6`` respectively.
+
+When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints,
+for example, ``HOST_IP``.
+
+When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints,
+for example, ``HOST_IPV6``.
+
+The default value for this setting is ``4``. Dual-mode support, for
+example ``4+6`` is not supported, as this value must match the address
+family of the local tunnel endpoint IP(v6) address.
+
+The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the
+setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP``
+when set to ``4``, and ``HOST_IPV6`` when set to ``6``.
+
Multi-node setup
~~~~~~~~~~~~~~~~
@@ -642,6 +663,12 @@
VOLUME_NAME_PREFIX="volume-"
VOLUME_BACKING_FILE_SIZE=24G
+When running highly concurrent tests, the default per-project quotas
+for volumes, backups, or snapshots may be too small. These can be
+adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``,
+or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for
+each is 10.)
+
Keystone
~~~~~~~~
@@ -666,7 +693,6 @@
disable_service horizon
KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
- KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
REGION_NAME=RegionTwo
KEYSTONE_REGION_NAME=RegionOne
@@ -679,17 +705,6 @@
KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit
it in the configuration of RegionOne.
-Disabling Identity API v2
-+++++++++++++++++++++++++
-
-The Identity API v2 is deprecated as of Mitaka and it is recommended to only
-use the v3 API. It is possible to setup keystone without v2 API, by doing:
-
-::
-
- ENABLE_IDENTITY_V2=False
-
-
Glance
++++++
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index c0b3f58..658422b 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -75,13 +75,21 @@
useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+::
+
+ chmod +x /opt/stack
+
This user will be making many changes to your system during installation
and operation so it needs to have sudo privileges to root without a
password:
::
- echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
From here on use the ``stack`` user. **Logout** and **login** as the
``stack`` user.
@@ -169,7 +177,7 @@
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
GLANCE_HOSTPORT=$SERVICE_HOST:9292
- ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client
+ ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent
NOVA_VNC_ENABLED=True
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
VNCSERVER_LISTEN=$HOST_IP
@@ -395,7 +403,7 @@
3. Verify that login via ssh works without a password::
- ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION
+ ssh -i /root/.ssh/id_rsa stack@DESTINATION
In essence, this means that every compute node's root user's public RSA key
must exist in every other compute node's stack user's authorized_keys file and
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index a0e97ed..0529e30 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -49,13 +49,21 @@
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+ $ sudo chmod +x /opt/stack
+
Since this user will be making many changes to your system, it will need
to have sudo privileges:
.. code-block:: console
$ apt-get install sudo -y || yum install -y sudo
- $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+ $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
.. note:: On some systems you may need to use ``sudo visudo``.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 08ce4cb..0434d68 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -57,6 +57,14 @@
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+ $ sudo chmod +x /opt/stack
+
Since this user will be making many changes to your system, it should
have sudo privileges:
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 3edd708..2e8e8f5 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -75,6 +75,8 @@
openstack/networking-sfc `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
openstack/neutron `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
+openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
+openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
@@ -92,6 +94,7 @@
openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
openstack/senlin `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
openstack/shade `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
+openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver <https://opendev.org/openstack/skyline-apiserver>`__
openstack/solum `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
openstack/storlets `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
openstack/tacker `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
@@ -112,7 +115,6 @@
openstack/zun `https://opendev.org/openstack/zun <https://opendev.org/openstack/zun>`__
openstack/zun-ui `https://opendev.org/openstack/zun-ui <https://opendev.org/openstack/zun-ui>`__
performa/os-faults `https://opendev.org/performa/os-faults <https://opendev.org/performa/os-faults>`__
-skyline/skyline-apiserver `https://opendev.org/skyline/skyline-apiserver <https://opendev.org/skyline/skyline-apiserver>`__
starlingx/config `https://opendev.org/starlingx/config <https://opendev.org/starlingx/config>`__
starlingx/fault `https://opendev.org/starlingx/fault <https://opendev.org/starlingx/fault>`__
starlingx/ha `https://opendev.org/starlingx/ha <https://opendev.org/starlingx/ha>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 7d70d74..62dd15b 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -238,7 +238,7 @@
locations in the top-level of the plugin repository:
- ``./devstack/files/debs/$plugin_name`` - Packages to install when running
- on Ubuntu, Debian or Linux Mint.
+ on Ubuntu or Debian.
- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
on Red Hat, Fedora, or CentOS.
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1284360..1a353e5 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,5 +1,4 @@
Listen %PUBLICPORT%
-Listen %ADMINPORT%
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
<Directory %KEYSTONE_BIN%>
@@ -20,20 +19,6 @@
%SSLKEYFILE%
</VirtualHost>
-<VirtualHost *:%ADMINPORT%>
- WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%M"
- ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
%SSLLISTEN%<VirtualHost *:443>
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
@@ -49,13 +34,3 @@
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
-
-Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_admin>
- SetHandler wsgi-script
- Options +ExecCGI
-
- WSGIProcessGroup keystone-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
-</Location>
diff --git a/files/debs/nova b/files/debs/nova
index 0194f00..5c00ad7 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -1,7 +1,5 @@
conntrack
curl
-dnsmasq-base
-dnsmasq-utils # for dhcp_release
ebtables
genisoimage # required for config_drive
iptables
diff --git a/files/debs/swift b/files/debs/swift
index 4b8ac3d..67c6c8d 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -2,5 +2,6 @@
liberasurecode-dev
make
memcached
+rsync
sqlite3
xfsprogs
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index 2f1f139..d3b9be8 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,4 +1,4 @@
-dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
+dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config
changetype: modify
replace: olcSuffix
olcSuffix: ${BASE_DN}
diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template
new file mode 100644
index 0000000..dc519d7
--- /dev/null
+++ b/files/lvm-backing-file.template
@@ -0,0 +1,16 @@
+[Unit]
+Description=Activate LVM backing file %BACKING_FILE%
+DefaultDependencies=no
+After=systemd-udev-settle.service
+Before=lvm2-activation-early.service
+Wants=systemd-udev-settle.service
+
+[Service]
+ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE%
+ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)'
+RemainAfterExit=yes
+Type=oneshot
+
+[Install]
+WantedBy=local-fs.target
+Also=systemd-udev-settle.service
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 1cc2f62..082b9ac 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,8 +1,6 @@
cdrkit-cdrtools-compat # dist:sle12
conntrack-tools
curl
-dnsmasq
-dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
ebtables
iptables
iputils
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 64befc5..33a55f8 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
ceph # NOPRIME
-redhat-lsb-core
+redhat-lsb-core # not:rhel9
xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 33da0a5..7697513 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -16,6 +16,7 @@
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
+mod_ssl # required for tls-proxy on centos 9 stream computes
net-tools
openssh-server
openssl
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 68e5472..7ce5a72 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,9 +1,10 @@
cryptsetup
dosfstools
-genisoimage
+genisoimage # not:rhel9
iscsi-initiator-utils
libosinfo
lvm2
sg3_utils
# Stuff for diablo volumes
sysfsutils
+xorriso # not:rhel8
diff --git a/files/rpms/nova b/files/rpms/nova
index 8ea8ccc..f2824ee 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,9 +1,7 @@
conntrack-tools
curl
-dnsmasq # for q-dhcp
-dnsmasq-utils # for dhcp_release
ebtables
-genisoimage # required for config_drive
+genisoimage # not:rhel9 required for config_drive
iptables
iputils
kernel-modules
@@ -13,3 +11,4 @@
rabbitmq-server # NOPRIME
sqlite
sudo
+xorriso # not:rhel8
diff --git a/files/rpms/swift b/files/rpms/swift
index 18c957c..7d906aa 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -4,4 +4,4 @@
rsync-daemon
sqlite
xfsprogs
-xinetd # not:f34
+xinetd # not:f35,rhel9
diff --git a/functions b/functions
index ccca5cd..7ada0fe 100644
--- a/functions
+++ b/functions
@@ -414,10 +414,10 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id)
fi
_upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
fi
diff --git a/functions-common b/functions-common
index 11679e4..92a6678 100644
--- a/functions-common
+++ b/functions-common
@@ -49,7 +49,7 @@
STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
KEYSTONE_SERVICE_URI \
LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
- HOST_IPV6 SERVICE_IP_VERSION"
+ HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION"
# Saves significant environment variables to .stackenv for later use
@@ -85,7 +85,7 @@
if [ -f "$SSL_BUNDLE_FILE" ]; then
CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
fi
- # demo -> devstack
+ # devstack: user with the member role on demo project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack \
@@ -96,18 +96,7 @@
--os-password $ADMIN_PASSWORD \
--os-project-name demo
- # alt_demo -> devstack-alt
- $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack-alt \
- --os-region-name $REGION_NAME \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_SERVICE_URI \
- --os-username alt_demo \
- --os-password $ADMIN_PASSWORD \
- --os-project-name alt_demo
-
- # admin -> devstack-admin
+ # devstack-admin: user with the admin role on the admin project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-admin \
@@ -118,7 +107,62 @@
--os-password $ADMIN_PASSWORD \
--os-project-name admin
- # admin with a system-scoped token -> devstack-system
+ # devstack-admin-demo: user with the admin role on the demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-admin-demo \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username admin \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+
+ # devstack-alt: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-member: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-reader: user with the reader role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-reader: user with the reader role on demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+
+ # devstack-system-admin: user with the admin role on the system
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-system-admin \
@@ -129,6 +173,28 @@
--os-password $ADMIN_PASSWORD \
--os-system-scope all
+ # devstack-system-member: user with the member role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
+ # devstack-system-reader: user with the reader role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
cat >> $CLOUDS_YAML <<EOF
functional:
image_name: $DEFAULT_IMAGE_NAME
@@ -346,14 +412,21 @@
# - os_VENDOR
# - os_PACKAGE
function GetOSVersion {
- # We only support distros that provide a sane lsb_release
- _ensure_lsb_release
+ # CentOS Stream 9 does not provide lsb_release
+ source /etc/os-release
+ if [[ "${ID}${VERSION}" == "centos9" ]]; then
+ os_RELEASE=${VERSION_ID}
+ os_CODENAME="n/a"
+ os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+ else
+ _ensure_lsb_release
- os_RELEASE=$(lsb_release -r -s)
- os_CODENAME=$(lsb_release -c -s)
- os_VENDOR=$(lsb_release -i -s)
+ os_RELEASE=$(lsb_release -r -s)
+ os_CODENAME=$(lsb_release -c -s)
+ os_VENDOR=$(lsb_release -i -s)
+ fi
- if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
+ if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then
os_PACKAGE="deb"
else
os_PACKAGE="rpm"
@@ -371,9 +444,8 @@
function GetDistro {
GetOSVersion
- if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \
- "$os_VENDOR" =~ (LinuxMint) ]]; then
- # 'Everyone' refers to Ubuntu / Debian / Mint releases by
+ if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+ # 'Everyone' refers to Ubuntu / Debian releases by
# the code name adjective
DISTRO=$os_CODENAME
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
@@ -391,6 +463,7 @@
DISTRO="sle${os_RELEASE%.*}"
elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
"$os_VENDOR" =~ (CentOS) || \
+ "$os_VENDOR" =~ (AlmaLinux) || \
"$os_VENDOR" =~ (Scientific) || \
"$os_VENDOR" =~ (OracleServer) || \
"$os_VENDOR" =~ (Virtuozzo) ]]; then
@@ -451,6 +524,7 @@
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "RedHatEnterprise" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
+ [ "$os_VENDOR" = "AlmaLinux" ] || \
[ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
}
@@ -496,7 +570,6 @@
[ "$os_PACKAGE" = "deb" ]
}
-
# Git Functions
# =============
@@ -547,7 +620,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to 'required-projects' in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
git_timed clone $git_clone_flags $git_remote $git_dest
fi
@@ -559,7 +632,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to the \$PROJECTS variable in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
# '--branch' can also take tags
git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
@@ -572,7 +645,7 @@
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
- find $git_dest -name '*.pyc' -delete
+ sudo find $git_dest -name '*.pyc' -delete
# handle git_ref accordingly to type (tag, branch)
if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
@@ -588,6 +661,18 @@
fi
fi
+ # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions
+ # about how we clone and work with repos. Mark them safe globally
+ # as a work-around.
+ #
+ # NOTE(danms): On bionic (and likely others) git-config may write
+ # ~stackuser/.gitconfig if not run with sudo -H. Using --system
+ # writes these changes to /etc/gitconfig which is more
+ # discoverable anyway.
+ #
+ # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9
+ sudo git config --system --add safe.directory ${git_dest}
+
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
@@ -793,10 +878,10 @@
# Gets domain id
domain_id=$(
# Gets domain id
- openstack domain show $1 \
+ openstack --os-cloud devstack-system-admin domain show $1 \
-f value -c id 2>/dev/null ||
# Creates new domain
- openstack domain create $1 \
+ openstack --os-cloud devstack-system-admin domain create $1 \
--description "$2" \
-f value -c id
)
@@ -811,7 +896,7 @@
# Gets group id
group_id=$(
# Creates new group with --or-show
- openstack group create $1 \
+ openstack --os-cloud devstack-system-admin group create $1 \
--domain $2 --description "$desc" --or-show \
-f value -c id
)
@@ -830,7 +915,7 @@
# Gets user id
user_id=$(
# Creates new user with --or-show
- openstack user create \
+ openstack --os-cloud devstack-system-admin user create \
$1 \
--password "$2" \
--domain=$3 \
@@ -847,7 +932,7 @@
local project_id
project_id=$(
# Creates new project with --or-show
- openstack project create $1 \
+ openstack --os-cloud devstack-system-admin project create $1 \
--domain=$2 \
--or-show -f value -c id
)
@@ -860,7 +945,7 @@
local role_id
role_id=$(
# Creates role with --or-show
- openstack role create $1 \
+ openstack --os-cloud devstack-system-admin role create $1 \
--or-show -f value -c id
)
echo $role_id
@@ -890,7 +975,7 @@
domain_args=$(_get_domain_args $4 $5)
# Gets user role id
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--project $3 \
@@ -898,11 +983,11 @@
| grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--user $2 \
--project $3 \
$domain_args
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--project $3 \
@@ -917,17 +1002,17 @@
function get_or_add_user_domain_role {
local user_role_id
# Gets user role id
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--domain $3 \
| grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--user $2 \
--domain $3
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--domain $3 \
@@ -936,22 +1021,53 @@
echo $user_role_id
}
+# Gets or adds user role to system
+# Usage: get_or_add_user_system_role <role> <user> <system> [<user_domain>]
+function get_or_add_user_system_role {
+ local user_role_id
+ local domain_args
+
+ domain_args=$(_get_domain_args $4)
+
+ # Gets user role id
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ if [[ -z "$user_role_id" ]]; then
+ # Adds role to user and get it
+ openstack --os-cloud devstack-system-admin role add $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ fi
+ echo $user_role_id
+}
+
# Gets or adds group role to project
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
local group_role_id
# Gets group role id
- group_role_id=$(openstack role assignment list \
+ group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--group $2 \
--project $3 \
-f value)
if [[ -z "$group_role_id" ]]; then
# Adds role to group and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--group $2 \
--project $3
- group_role_id=$(openstack role assignment list \
+ group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--group $2 \
--project $3 \
@@ -967,9 +1083,9 @@
# Gets service id
service_id=$(
# Gets service id
- openstack service show $2 -f value -c id 2>/dev/null ||
+ openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null ||
# Creates new service if not exists
- openstack service create \
+ openstack --os-cloud devstack-system-admin service create \
$2 \
--name $1 \
--description="$3" \
@@ -982,14 +1098,14 @@
# Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
function _get_or_create_endpoint_with_interface {
local endpoint_id
- endpoint_id=$(openstack endpoint list \
+ endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \
--service $1 \
--interface $2 \
--region $4 \
-c ID -f value)
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
- endpoint_id=$(openstack endpoint create \
+ endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \
$1 $2 $3 --region $4 -f value -c id)
fi
@@ -1023,7 +1139,7 @@
# Get a URL from the identity service
# Usage: get_endpoint_url <service> <interface>
function get_endpoint_url {
- echo $(openstack endpoint list \
+ echo $(openstack --os-cloud devstack-system-admin endpoint list \
--service $1 --interface $2 \
-c URL -f value)
}
@@ -1038,7 +1154,7 @@
}
function is_ironic_enforce_scope {
- is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+ is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0
return 1
}
@@ -1447,6 +1563,7 @@
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local extra=""
if [[ -n "$group" ]]; then
extra="Group=$group"
@@ -1460,6 +1577,9 @@
iniset -sudo $unitfile "Service" "KillMode" "process"
iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
+ if [[ -n "$env_vars" ]] ; then
+ iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+ fi
if [[ -n "$group" ]]; then
iniset -sudo $unitfile "Service" "Group" "$group"
fi
@@ -1474,6 +1594,7 @@
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local unitfile="$SYSTEMD_DIR/$service"
mkdir -p $SYSTEMD_DIR
@@ -1488,6 +1609,9 @@
iniset -sudo $unitfile "Service" "NotifyAccess" "all"
iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
+ if [[ -n "$env_vars" ]] ; then
+ iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+ fi
if [[ -n "$group" ]]; then
iniset -sudo $unitfile "Service" "Group" "$group"
fi
@@ -1535,10 +1659,14 @@
local systemd_service="devstack@$service.service"
local group=$3
local user=${4:-$STACK_USER}
+ if [[ -z "$user" ]]; then
+ user=$STACK_USER
+ fi
+ local env_vars="$5"
if [[ "$command" =~ "uwsgi" ]] ; then
- write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user"
+ write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
else
- write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+ write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
fi
$SYSTEMCTL enable $systemd_service
@@ -1559,18 +1687,20 @@
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# If an optional group is provided sg will be used to run the
# command as that group.
-# run_process service "command-line" [group] [user]
+# run_process service "command-line" [group] [user] [env_vars]
+# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2"
function run_process {
local service=$1
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local name=$service
time_start "run_process"
if is_service_enabled $service; then
- _run_under_systemd "$name" "$command" "$group" "$user"
+ _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars"
fi
time_stop "run_process"
}
diff --git a/inc/ini-config b/inc/ini-config
index 7993682..f65e42d 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -189,6 +189,9 @@
local option=$3
local value=$4
+ # Escape the ampersand character (&)
+ value=$(echo $value | sed -e 's/&/\\&/g')
+
if [[ -z $section || -z $option ]]; then
$xtrace
return
diff --git a/inc/python b/inc/python
index 9382d35..3eb3efe 100644
--- a/inc/python
+++ b/inc/python
@@ -186,15 +186,11 @@
$xtrace
- # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
- # the same behaviour of setuptools before version 25.0.0.
- # related issue: https://github.com/pypa/pip/issues/3874
$sudo_pip \
http_proxy="${http_proxy:-}" \
https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
- SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
$cmd_pip $upgrade \
$@
result=$?
diff --git a/lib/apache b/lib/apache
index 4bea07d..94f3cfc 100644
--- a/lib/apache
+++ b/lib/apache
@@ -27,6 +27,11 @@
APACHE_USER=${APACHE_USER:-$STACK_USER}
APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
+APACHE_LOCAL_HOST=$SERVICE_LOCAL_HOST
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+ APACHE_LOCAL_HOST=[$APACHE_LOCAL_HOST]
+fi
+
# Set up apache name and configuration directory
# Note that APACHE_CONF_DIR is really more accurately apache's vhost
@@ -82,19 +87,15 @@
apxs="apxs"
fi
- # This varies based on packaged/installed. If we've
- # pip_installed, then the pip setup will only build a "python"
- # module that will be either python2 or python3 depending on what
- # it was built with.
- #
- # For package installs, the distro ships both plugins and you need
- # to select the right one ... it will not be autodetected.
- UWSGI_PYTHON_PLUGIN=python3
-
if is_ubuntu; then
local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
install_package ${pkg_list}
- elif is_fedora; then
+ # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall
+ # into the install-from-source because the upstream packages
+ # didn't fix Python 3.10 compatibility before release. Should be
+ # fixed in uwsgi 4.9.0; can remove this when packages available
+ # or we drop this release
+ elif is_fedora && ! [[ $DISTRO =~ f35 ]]; then
# Note httpd comes with mod_proxy_uwsgi and it is loaded by
# default; the mod_proxy_uwsgi package actually conflicts now.
# See:
@@ -122,7 +123,6 @@
popd
# delete the temp directory
sudo rm -rf $dir
- UWSGI_PYTHON_PLUGIN=python
fi
if is_ubuntu || is_suse ; then
@@ -283,7 +283,7 @@
# configured after graceful shutdown
iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
+ iniset "$file" uwsgi plugins http,python3
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -328,7 +328,7 @@
rm -rf $file
iniset "$file" uwsgi wsgi-file "$wsgi"
port=$(get_random_port)
- iniset "$file" uwsgi http-socket "127.0.0.1:$port"
+ iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
iniset "$file" uwsgi processes $API_WORKERS
# This is running standalone
iniset "$file" uwsgi master true
@@ -336,7 +336,7 @@
iniset "$file" uwsgi die-on-term true
iniset "$file" uwsgi exit-on-reload false
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
+ iniset "$file" uwsgi plugins http,python3
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -364,7 +364,7 @@
apache_conf=$(apache_site_config_for $name)
echo "KeepAlive Off" | sudo tee $apache_conf
echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
- echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf
+ echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
}
diff --git a/lib/cinder b/lib/cinder
index f3e2430..7dd7539 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -286,6 +286,11 @@
iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES
fi
+ # set default quotas
+ iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10}
+ iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10}
+ iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10}
+
# Avoid RPC timeouts in slow CI and test environments by doubling the
# default response timeout set by RPC clients. See bug #1873234 for more
# details and example failures.
@@ -348,7 +353,9 @@
# Format logging
setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI
- write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ if is_service_enabled c-api; then
+ write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
@@ -373,7 +380,7 @@
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
- if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $CINDER_CONF oslo_policy enforce_scope true
iniset $CINDER_CONF oslo_policy enforce_new_defaults true
fi
@@ -381,16 +388,24 @@
# create_cinder_accounts() - Set up common required cinder accounts
-# Tenant User Roles
+# Project User Roles
# ------------------------------------------------------------------
-# service cinder admin # if enabled
+# SERVICE_PROJECT_NAME cinder service
+# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled)
# Migrated from keystone_data.sh
function create_cinder_accounts {
# Cinder
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- create_service_user "cinder"
+ local extra_role=""
+
+ # cinder needs the "creator" role in order to interact with barbican
+ if is_service_enabled barbican; then
+ extra_role=$(get_or_create_role "creator")
+ fi
+
+ create_service_user "cinder" $extra_role
# block-storage is the official service type
get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
@@ -440,10 +455,6 @@
be_type=${be%%:*}
be_name=${be##*:}
if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
- # Always init the default volume group for lvm.
- if [[ "$be_type" == "lvm" ]]; then
- init_default_lvm_volume_group
- fi
init_cinder_backend_${be_type} ${be_name}
fi
done
@@ -549,8 +560,13 @@
fi
run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
- run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
- run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+ # Tune glibc for Python Services using single malloc arena for all threads
+ # and disabling dynamic thresholds to reduce memory usage when using native
+ # threads directly or via eventlet.tpool
+ # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html
+ malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144"
+ run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning"
+ run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning"
# NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received
# by the scheduler start the cinder-volume service last (or restart it) after the scheduler
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
index e4003c0..4b18049 100644
--- a/lib/cinder_backups/ceph
+++ b/lib/cinder_backups/ceph
@@ -26,12 +26,15 @@
function configure_cinder_backup_ceph {
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
- if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ # Execute this part only when cephadm is not used
+ if [[ "$CEPHADM_DEPLOY" = "False" ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+ if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
fi
- sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
- sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
index d7c977e..c7ec306 100644
--- a/lib/cinder_backups/swift
+++ b/lib/cinder_backups/swift
@@ -24,6 +24,9 @@
# to use it.
iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+ if is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE
+ fi
}
# init_cinder_backup_swift: nothing to do
diff --git a/lib/database b/lib/database
index 7940cf2..78563f6 100644
--- a/lib/database
+++ b/lib/database
@@ -89,6 +89,10 @@
DATABASE_PASSWORD=$MYSQL_PASSWORD
fi
+ return 0
+}
+
+function define_database_baseurl {
# We configure Nova, Horizon, Glance and Keystone to use MySQL as their
# database server. While they share a single server, each has their own
# database and tables.
@@ -100,8 +104,6 @@
# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services
BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
-
- return 0
}
# Recreate a given database
diff --git a/lib/databases/mysql b/lib/databases/mysql
index d4969d7..b292da2 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -12,6 +12,7 @@
set +o xtrace
MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL}
+INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES)
register_database mysql
@@ -25,6 +26,8 @@
# provide a mysql.service symlink for backwards-compatibility, but
# let's not rely on that.
MYSQL_SERVICE_NAME=mariadb
+ elif [[ "$DISTRO" == "bullseye" ]]; then
+ MYSQL_SERVICE_NAME=mariadb
fi
fi
@@ -83,10 +86,16 @@
exit_distro_not_supported "mysql configuration"
fi
- # Start mysql-server
+ # Change bind-address from localhost (127.0.0.1) to any (::)
+ iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
+
+ # (Re)Start mysql-server
if is_fedora || is_suse; then
# service is not started by default
start_service $MYSQL_SERVICE_NAME
+ elif is_ubuntu; then
+ # required since bind-address could have changed above
+ restart_service $MYSQL_SERVICE_NAME
fi
# Set the root password - only works the first time. For Ubuntu, we already
@@ -99,13 +108,13 @@
if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
local cmd_args="-uroot -p$DATABASE_PASSWORD "
else
- local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 "
+ local cmd_args="-uroot -p$DATABASE_PASSWORD -h$SERVICE_LOCAL_HOST "
fi
# In mariadb e.g. on Ubuntu socket plugin is used for authentication
# as root so it works only as sudo. To restore old "mysql like" behaviour,
# we need to change auth plugin for root user
- if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
fi
@@ -116,9 +125,7 @@
# Now update ``my.cnf`` for some local needs and restart the mysql service
- # Change bind-address from localhost (127.0.0.1) to any (::) and
- # set default db type to InnoDB
- iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
+ # Set default db type to InnoDB
iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
iniset -sudo $my_conf mysqld default-storage-engine InnoDB
iniset -sudo $my_conf mysqld max_connections 1024
@@ -143,6 +150,19 @@
iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
fi
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ echo "enabling MySQL performance counting"
+
+ # Install our sqlalchemy plugin
+ pip_install ${TOP_DIR}/tools/dbcounter
+
+ # Create our stats database for accounting
+ recreate_database stats
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+ "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+ count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
+ fi
+
restart_service $MYSQL_SERVICE_NAME
}
@@ -173,18 +193,20 @@
chmod 0600 $HOME/.my.cnf
fi
# Install mysql-server
- if is_oraclelinux; then
- install_package mysql-community-server
- elif is_fedora; then
- install_package mariadb-server mariadb-devel
- sudo systemctl enable $MYSQL_SERVICE_NAME
- elif is_suse; then
- install_package mariadb-server
- sudo systemctl enable $MYSQL_SERVICE_NAME
- elif is_ubuntu; then
- install_package $MYSQL_SERVICE_NAME-server
- else
- exit_distro_not_supported "mysql installation"
+ if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then
+ if is_oraclelinux; then
+ install_package mysql-community-server
+ elif is_fedora; then
+ install_package mariadb-server mariadb-devel mariadb
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_suse; then
+ install_package mariadb-server
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_ubuntu; then
+ install_package $MYSQL_SERVICE_NAME-server
+ else
+ exit_distro_not_supported "mysql installation"
+ fi
fi
}
@@ -200,7 +222,17 @@
function database_connection_url_mysql {
local db=$1
- echo "$BASE_SQL_CONN/$db?charset=utf8"
+ local plugin
+
+ # NOTE(danms): We don't enable perf on subnodes yet because the
+ # plugin is not installed there
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ if is_service_enabled mysql; then
+ plugin="&plugin=dbcounter"
+ fi
+ fi
+
+ echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
}
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 618834b..4f0a5a0 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -13,7 +13,7 @@
MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200}
-
+INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES)
register_database postgresql
@@ -95,7 +95,6 @@
function install_database_postgresql {
echo_summary "Installing postgresql"
- deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle"
local pgpass=$HOME/.pgpass
if [[ ! -e $pgpass ]]; then
cat <<EOF > $pgpass
@@ -105,15 +104,17 @@
else
sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass
fi
- if is_ubuntu; then
- install_package postgresql
- elif is_fedora || is_suse; then
- install_package postgresql-server
- if is_fedora; then
- sudo systemctl enable postgresql
+ if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then
+ if is_ubuntu; then
+ install_package postgresql
+ elif is_fedora || is_suse; then
+ install_package postgresql-server
+ if is_fedora; then
+ sudo systemctl enable postgresql
+ fi
+ else
+ exit_distro_not_supported "postgresql installation"
fi
- else
- exit_distro_not_supported "postgresql installation"
fi
}
diff --git a/lib/glance b/lib/glance
index f18bea9..ba98f41 100644
--- a/lib/glance
+++ b/lib/glance
@@ -288,24 +288,17 @@
function configure_glance_quotas {
- # NOTE(danms): We need to have some of the OS_ things unset in
- # order to use system scope, which is required for creating these
- # limits. This is a hack, but I dunno how else to get osc to use
- # system scope.
+ # Registered limit resources in keystone are system-specific resources.
+ # Make sure we use a system-scoped token to interact with this API.
- bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME;
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
- --region $REGION_NAME image_size_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL \
- --region $REGION_NAME image_stage_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit 100 --region $REGION_NAME \
- image_count_total; \
- openstack --os-cloud devstack-system-admin registered limit create \
- --service glance --default-limit 100 --region $REGION_NAME \
- image_count_uploading"
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_uploading
# Tell glance to use these limits
iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True
@@ -316,13 +309,13 @@
iniset $GLANCE_API_CONF oslo_limit username glance
iniset $GLANCE_API_CONF oslo_limit auth_type password
iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
- iniset $GLANCE_API_CONF oslo_limit system_scope "'all'"
+ iniset $GLANCE_API_CONF oslo_limit system_scope all
iniset $GLANCE_API_CONF oslo_limit endpoint_id \
- $(openstack endpoint list --service glance -f value -c ID)
+ $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID)
# Allow the glance service user to read quotas
- openstack role add --user glance --user-domain Default --system all \
- reader
+ openstack --os-cloud devstack-system-admin role add --user glance \
+ --user-domain $SERVICE_DOMAIN_NAME --system all reader
}
# configure_glance() - Set config files, create data dirs, etc
@@ -439,7 +432,7 @@
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
- if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $GLANCE_API_CONF oslo_policy enforce_scope true
iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
diff --git a/lib/keystone b/lib/keystone
index 66e867c..80a136f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -9,7 +9,6 @@
# - ``tls`` file
# - ``DEST``, ``STACK_USER``
# - ``FILES``
-# - ``IDENTITY_API_VERSION``
# - ``BASE_SQL_CONN``
# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
# - ``S3_SERVICE_PORT`` (template backend only)
@@ -50,9 +49,7 @@
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
-KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini
KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
-KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin
# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values:
# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi
@@ -81,21 +78,12 @@
KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
-# Set Keystone interface configuration
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
-
# Public facing bits
KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-# Bind hosts
-KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST}
-
# Set the project for service accounts in Keystone
SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default}
SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service}
@@ -106,7 +94,6 @@
# if we are running with SSL use https protocols
if is_service_enabled tls-proxy; then
- KEYSTONE_AUTH_PROTOCOL="https"
KEYSTONE_SERVICE_PROTOCOL="https"
fi
@@ -134,6 +121,15 @@
# Cache settings
KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True}
+# Whether to create a keystone admin endpoint for legacy applications
+KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT)
+
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the Identity API policies to start checking the scope of token. By Default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE)
+
# Functions
# ---------
@@ -154,11 +150,8 @@
sudo rm -f $(apache_site_config_for keystone)
else
stop_process "keystone"
- # TODO: remove admin at pike-2
remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
sudo rm -f $(apache_site_config_for keystone-wsgi-public)
- sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
fi
}
@@ -171,12 +164,10 @@
local keystone_certfile=""
local keystone_keyfile=""
local keystone_service_port=$KEYSTONE_SERVICE_PORT
- local keystone_auth_port=$KEYSTONE_AUTH_PORT
local venv_path=""
if is_service_enabled tls-proxy; then
keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
- keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
fi
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
@@ -185,7 +176,6 @@
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$keystone_service_port|g;
- s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%SSLLISTEN%|$keystone_ssl_listen|g;
s|%SSLENGINE%|$keystone_ssl|g;
@@ -223,22 +213,17 @@
iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_port=$KEYSTONE_AUTH_PORT
if is_service_enabled tls-proxy; then
# Set the service ports for a proxy to take the originals
service_port=$KEYSTONE_SERVICE_PORT_INT
- auth_port=$KEYSTONE_AUTH_PORT_INT
fi
- # Override the endpoints advertised by keystone (the public_endpoint and
- # admin_endpoint) so that clients use the correct endpoint. By default, the
- # keystone server uses the public_port and admin_port which isn't going to
- # work when you want to use a different port (in the case of proxy), or you
- # don't want the port (in the case of putting keystone on a path in
- # apache).
+ # Override the endpoints advertised by keystone so that clients use the correct
+ # endpoint. By default, the keystone server uses the public_port which isn't
+ # going to work when you want to use a different port (in the case of proxy),
+ # or you don't want the port (in the case of putting keystone on a path in apache).
iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
- iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
@@ -261,7 +246,6 @@
_config_keystone_apache_wsgi
else # uwsgi
write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity"
- write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin"
fi
iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
@@ -281,6 +265,11 @@
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
fi
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $KEYSTONE_CONF oslo_policy enforce_scope true
+ iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
+ iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+ fi
}
# create_keystone_accounts() - Sets up common required keystone accounts
@@ -303,20 +292,28 @@
# admins admin admin admin
# nonadmins demo, alt_demo member, anotherrole demo, alt_demo
+# System User Roles
+# ------------------------------------------------------------------
+# all admin admin
+# all system_reader reader
+# all system_member member
+
# Migrated from keystone_data.sh
function create_keystone_accounts {
# The keystone bootstrapping process (performed via keystone-manage
- # bootstrap) creates an admin user, admin role, member role, and admin
+ # bootstrap) creates an admin user and an admin
# project. As a sanity check we exercise the CLI to retrieve the IDs for
# these values.
local admin_project
admin_project=$(openstack project show "admin" -f value -c id)
local admin_user
admin_user=$(openstack user show "admin" -f value -c id)
+ # These roles are also created during bootstrap but we don't need their IDs
local admin_role="admin"
local member_role="member"
+ local reader_role="reader"
async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
@@ -352,21 +349,53 @@
async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+
async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
- # alt_demo
+ # Create a user to act as a reader on project demo
+ local demo_reader
+ demo_reader=$(get_or_create_user "demo_reader" \
+ "$ADMIN_PASSWORD" "default" "demo_reader@example.com")
+
+ async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project
+
+ # Create a different project called alt_demo
local alt_demo_project
alt_demo_project=$(get_or_create_project "alt_demo" default)
+ # Create a user to act as member, admin and anotherrole on project alt_demo
local alt_demo_user
alt_demo_user=$(get_or_create_user "alt_demo" \
"$ADMIN_PASSWORD" "default" "alt_demo@example.com")
- async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
- async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+ async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project
async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+ # Create another user to act as a member on project alt_demo
+ local alt_demo_member
+ alt_demo_member=$(get_or_create_user "alt_demo_member" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com")
+ async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project
+
+ # Create another user to act as a reader on project alt_demo
+ local alt_demo_reader
+ alt_demo_reader=$(get_or_create_user "alt_demo_reader" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com")
+ async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project
+
+ # Create two users, give one the member role on the system and the other the
+ # reader role on the system. These two users model system-member and
+ # system-reader personas. The admin user already has the admin role on the
+ # system and we can re-use this user as a system-admin.
+ system_member_user=$(get_or_create_user "system_member" \
+ "$ADMIN_PASSWORD" "default" "system_member@example.com")
+ async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all"
+
+ system_reader_user=$(get_or_create_user "system_reader" \
+ "$ADMIN_PASSWORD" "default" "system_reader@example.com")
+ async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all"
+
# groups
local admin_group
admin_group=$(get_or_create_group "admins" \
@@ -381,8 +410,9 @@
async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
- async_wait ks-demo-{member,admin,another,invis}
- async_wait ks-alt-{member,admin,another}
+ async_wait ks-demo-{member,admin,another,invis,reader}
+ async_wait ks-alt-{admin,another,member-user,reader-user}
+ async_wait ks-system-{member,reader}
async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
if is_service_enabled ldap; then
@@ -518,7 +548,7 @@
function start_keystone {
# Get right service port for testing
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
+ local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$KEYSTONE_SERVICE_PORT_INT
auth_protocol="http"
@@ -537,7 +567,7 @@
# unencryted traffic at this point.
# If running in Apache, use the path rather than port.
- local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/
+ local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/
if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
die $LINENO "keystone did not start"
@@ -546,7 +576,6 @@
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
- start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
fi
# (re)start memcached to make sure we have a clean memcache.
@@ -567,11 +596,8 @@
# This function uses the following GLOBAL variables:
# - ``KEYSTONE_BIN_DIR``
# - ``ADMIN_PASSWORD``
-# - ``IDENTITY_API_VERSION``
# - ``REGION_NAME``
-# - ``KEYSTONE_SERVICE_PROTOCOL``
-# - ``KEYSTONE_SERVICE_HOST``
-# - ``KEYSTONE_SERVICE_PORT``
+# - ``KEYSTONE_SERVICE_URI``
function bootstrap_keystone {
$KEYSTONE_BIN_DIR/keystone-manage bootstrap \
--bootstrap-username admin \
@@ -580,8 +606,16 @@
--bootstrap-role-name admin \
--bootstrap-service-name keystone \
--bootstrap-region-id "$REGION_NAME" \
- --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \
--bootstrap-public-url "$KEYSTONE_SERVICE_URI"
+ if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then
+ openstack endpoint create --region "$REGION_NAME" \
+ --os-username admin \
+ --os-user-domain-id default \
+ --os-password "$ADMIN_PASSWORD" \
+ --os-project-name admin \
+ --os-project-domain-id default \
+ keystone admin "$KEYSTONE_SERVICE_URI"
+ fi
}
# create_ldap_domain() - Create domain file and initialize domain with a user
diff --git a/lib/ldap b/lib/ldap
index 5a53d0e..ea5faa1 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -33,14 +33,17 @@
if is_ubuntu; then
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=mdb
LDAP_ROOTPW_COMMAND=replace
elif is_fedora; then
LDAP_OLCDB_NUMBER=2
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
elif is_suse; then
# SUSE has slappasswd in /usr/sbin/
PATH=$PATH:/usr/sbin/
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
LDAP_SERVICE_NAME=ldap
fi
@@ -56,6 +59,7 @@
local slappass=$2
sed -e "
s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE|
s|\${SLAPPASS}|$slappass|
s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
s|\${BASE_DC}|$LDAP_BASE_DC|
@@ -157,7 +161,7 @@
slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION
slapd slapd/domain string Users
slapd shared/organization string $LDAP_DOMAIN
- slapd slapd/backend string HDB
+ slapd slapd/backend string ${LDAP_OLCDB_TYPE^^}
slapd slapd/purge_database boolean true
slapd slapd/move_old_database boolean true
slapd slapd/allow_ldap_v2 boolean false
diff --git a/lib/libraries b/lib/libraries
index 67ff21f..9ea3230 100755
--- a/lib/libraries
+++ b/lib/libraries
@@ -38,6 +38,7 @@
GITDIR["oslo.context"]=$DEST/oslo.context
GITDIR["oslo.db"]=$DEST/oslo.db
GITDIR["oslo.i18n"]=$DEST/oslo.i18n
+GITDIR["oslo.limit"]=$DEST/oslo.limit
GITDIR["oslo.log"]=$DEST/oslo.log
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
@@ -102,6 +103,7 @@
_install_lib_from_source "oslo.context"
_install_lib_from_source "oslo.db"
_install_lib_from_source "oslo.i18n"
+ _install_lib_from_source "oslo.limit"
_install_lib_from_source "oslo.log"
_install_lib_from_source "oslo.messaging"
_install_lib_from_source "oslo.middleware"
diff --git a/lib/lvm b/lib/lvm
index b826c1b..d3f6bf1 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -53,28 +53,10 @@
sudo vgremove -f $vg
}
-# _clean_lvm_backing_file() removes the backing file of the
-# volume group
-#
-# Usage: _clean_lvm_backing_file() $backing_file
-function _clean_lvm_backing_file {
- local backing_file=$1
-
- # If the backing physical device is a loop device, it was probably setup by DevStack
- if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev
- vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
- if [[ -n "$vg_dev" ]]; then
- sudo losetup -d $vg_dev
- fi
- rm -f $backing_file
- fi
-}
-
# clean_lvm_volume_group() cleans up the volume group and removes the
# backing file
#
-# Usage: clean_lvm_volume_group $vg
+# Usage: clean_lvm_volume_group() $vg
function clean_lvm_volume_group {
local vg=$1
@@ -83,11 +65,22 @@
# if there is no logical volume left, it's safe to attempt a cleanup
# of the backing file
if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
- _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX
+ local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX
+
+ if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \
+ [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+ sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service
+ sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+ sudo systemctl daemon-reload
+ fi
+
+ # If the backing physical device is a loop device, it was probably setup by DevStack
+ if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
+ rm -f $backing_file
+ fi
fi
}
-
# _create_lvm_volume_group creates default volume group
#
# Usage: _create_lvm_volume_group() $vg $size
@@ -106,8 +99,20 @@
directio="--direct-io=on"
fi
+ # Only create systemd service if it doesn't already exists
+ if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+ sed -e "
+ s|%DIRECTIO%|${directio}|g;
+ s|%BACKING_FILE%|${backing_file}|g;
+ " $FILES/lvm-backing-file.template | sudo tee \
+ /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+
+ sudo systemctl daemon-reload
+ sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service
+ fi
+
local vg_dev
- vg_dev=$(sudo losetup -f --show $directio $backing_file)
+ vg_dev=$(sudo losetup --associated $backing_file -O NAME -n)
# Only create volume group if it doesn't already exist
if ! sudo vgs $vg; then
diff --git a/lib/neutron b/lib/neutron
index 885df97..b3e3d72 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -37,6 +37,11 @@
NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
NEUTRON_DIR=$DEST/neutron
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
# Distributed Virtual Router (DVR) configuration
# Can be:
@@ -109,6 +114,12 @@
# Physical network for VLAN network usage.
NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+# The name of the service in the endpoint URL
+NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
+if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ NEUTRON_ENDPOINT_SERVICE_NAME="networking"
+fi
+
# Additional neutron api config files
declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
@@ -141,6 +152,7 @@
# cleanup_neutron() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
if is_neutron_ovs_base_plugin; then
neutron_ovs_base_cleanup
@@ -164,6 +176,7 @@
# configure_neutron() - Set config files, create data dirs, etc
function configure_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
(cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
@@ -206,7 +219,6 @@
iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
- iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
@@ -222,7 +234,12 @@
else
mech_drivers+=",linuxbridge"
fi
+ if [[ "$mech_drivers" == *"linuxbridge"* ]]; then
+ iniset $NEUTRON_CONF experimental linuxbridge True
+ fi
+
iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
+ iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION
iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
@@ -232,6 +249,7 @@
if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
neutron_ml2_extension_driver_add port_security
fi
+ configure_rbac_policies
fi
# Neutron OVS or LB agent
@@ -243,10 +261,10 @@
# Configure the neutron agent
if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
- iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
+ iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP
elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
- iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+ iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP
if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
@@ -353,6 +371,7 @@
# Takes a single optional argument which is the config file to update,
# if not passed $NOVA_CONF is used.
function configure_neutron_nova_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local conf=${1:-$NOVA_CONF}
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
@@ -379,13 +398,17 @@
# create_neutron_accounts() - Create required service accounts
function create_neutron_accounts_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local neutron_url
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+ neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/
else
neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+ fi
if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
@@ -402,6 +425,7 @@
# init_neutron() - Initialize databases, etc.
function init_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
recreate_database neutron
time_start "dbsync"
@@ -412,6 +436,7 @@
# install_neutron() - Collect source and prepare
function install_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
setup_develop $NEUTRON_DIR
@@ -464,19 +489,22 @@
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
- neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
+ neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
else
# Start the Neutron service
# TODO(sc68cal) Stop hard coding this
run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
- neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
+ neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port/
# Start proxy if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
fi
fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+ fi
if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
die $LINENO "neutron-api did not start"
@@ -485,6 +513,7 @@
# start_neutron() - Start running processes
function start_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
# Start up the neutron agents if enabled
# TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
# can resolve the $NEUTRON_AGENT_BINARY
@@ -522,6 +551,7 @@
# stop_neutron() - Stop running processes
function stop_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
for serv in neutron-api neutron-agent neutron-l3; do
stop_process $serv
done
@@ -537,13 +567,13 @@
fi
if is_service_enabled neutron-metadata-agent; then
- sudo pkill -9 -f neutron-ns-metadata-proxy || :
stop_process neutron-metadata-agent
fi
}
# neutron_service_plugin_class_add() - add service plugin class
function neutron_service_plugin_class_add_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local service_plugin_class=$1
local plugins=""
@@ -568,11 +598,13 @@
}
function neutron_server_config_add_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
}
# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
function neutron_deploy_rootwrap_filters_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local srcdir=$1
sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
@@ -612,6 +644,19 @@
fi
}
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
+}
+
+
function configure_neutron_nova {
if is_neutron_legacy_enabled; then
# Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 7b20a96..baf67f2 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -90,6 +90,11 @@
NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
# Agent binaries. Note, binary paths for other agents are set in per-service
# scripts in lib/neutron_plugins/services/
AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -133,10 +138,19 @@
VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
+# Allow to skip stopping of OVN services
+SKIP_STOP_OVN=${SKIP_STOP_OVN:-False}
+
# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
# /etc/neutron is assumed by many of devstack plugins. Do not change.
_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
+# The name of the service in the endpoint URL
+NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
+if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ NEUTRON_ENDPOINT_SERVICE_NAME="networking"
+fi
+
# List of config file names in addition to the main plugin config file
# To add additional plugin config files, use ``neutron_server_config_add``
# utility function. For example:
@@ -238,13 +252,6 @@
LB_PHYSICAL_INTERFACE=$default_route_dev
fi
-# When Neutron tunnels are enabled it is needed to specify the
-# IP address of the end point in the local server. This IP is set
-# by default to the same IP address that the HOST IP.
-# This variable can be used to specify a different end point IP address
-# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1``
-TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP}
-
# With the openvswitch plugin, set to True in ``localrc`` to enable
# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
#
@@ -277,7 +284,9 @@
source $TOP_DIR/lib/neutron_plugins/services/l3
# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
# Use security group or not
if has_neutron_plugin_security_group; then
@@ -374,9 +383,19 @@
fi
# Configure Neutron's advanced services
+ if is_service_enabled q-placement neutron-placement; then
+ configure_placement_extension
+ fi
if is_service_enabled q-trunk neutron-trunk; then
configure_trunk_extension
fi
+ if is_service_enabled q-qos neutron-qos; then
+ configure_qos
+ if is_service_enabled q-l3 neutron-l3; then
+ configure_l3_agent_extension_fip_qos
+ configure_l3_agent_extension_gateway_ip_qos
+ fi
+ fi
iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
# devstack is not a tool for running uber scale OpenStack
@@ -418,10 +437,13 @@
function create_mutnauq_accounts {
local neutron_url
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
+ neutron_url=$Q_PROTOCOL://$SERVICE_HOST/
else
neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+ fi
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
@@ -477,6 +499,19 @@
if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
+ configure_rbac_policies
+}
+
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
}
# Start running OVN processes
@@ -512,17 +547,20 @@
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
enable_service neutron-api
run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
- neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
+ neutron_url=$Q_PROTOCOL://$Q_HOST/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
else
run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
- neutron_url=$service_protocol://$Q_HOST:$service_port
+ neutron_url=$service_protocol://$Q_HOST:$service_port/
# Start proxy if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
fi
fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+ fi
echo "Waiting for Neutron to start..."
local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
@@ -552,11 +590,7 @@
function start_mutnauq_other_agents {
run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
- if is_service_enabled neutron-vpnaas; then
- : # Started by plugin
- else
- run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
- fi
+ run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
@@ -594,7 +628,6 @@
fi
if is_service_enabled q-meta; then
- sudo pkill -9 -f neutron-ns-metadata-proxy || :
stop_process q-meta
fi
@@ -612,7 +645,7 @@
stop_mutnauq_other
stop_mutnauq_l2_agent
- if [[ $Q_AGENT == "ovn" ]]; then
+ if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then
stop_ovn
fi
}
@@ -883,18 +916,30 @@
neutron_plugin_configure_plugin_agent
}
+function _replace_api_paste_composite {
+ local sep
+ sep=$(echo -ne "\x01")
+ # Replace it
+ $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE"
+ $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE"
+ $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE"
+}
+
# _configure_neutron_service() - Set config files for neutron service
# It is called when q-svc is enabled.
function _configure_neutron_service {
Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+ if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ _replace_api_paste_composite
+ fi
+
# Update either configuration file with plugin
iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
- iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
@@ -905,6 +950,9 @@
configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
+ # Configuration for placement client
+ configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
+
# Configure plugin
neutron_plugin_configure_service
}
@@ -1031,6 +1079,15 @@
test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
}
+function plugin_agent_add_l2_agent_extension {
+ local l2_agent_extension=$1
+ if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+ L2_AGENT_EXTENSIONS=$l2_agent_extension
+ elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+ L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
+ fi
+}
+
# Restore xtrace
$_XTRACE_NEUTRON
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index e1f868f..fa61f1e 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -125,6 +125,10 @@
fi
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
+ if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then
+ iniset $NEUTRON_CONF experimental linuxbridge True
+ fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION
if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
@@ -156,5 +160,9 @@
return 0
}
+function configure_qos_ml2 {
+ neutron_ml2_extension_driver_add "qos"
+}
+
# Restore xtrace
$_XTRACE_NEUTRON_ML2
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 1f737fb..8eb2993 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -24,11 +24,6 @@
# Load devstack ovs compliation and loading functions
source ${TOP_DIR}/lib/neutron_plugins/ovs_source
-# Defaults
-# --------
-
-Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT)
-
# Set variables for building OVN from source
OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git}
OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.')
@@ -74,6 +69,9 @@
# unless the distro kernel includes ovs+conntrack support.
OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES)
OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE)
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ Q_BUILD_OVS_FROM_GIT=True
+fi
# Whether or not to install the ovs python module from ovs source. This can be
# used to test and validate new ovs python features. This should only be used
@@ -101,8 +99,10 @@
OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+TUNNEL_IP=$TUNNEL_ENDPOINT_IP
if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+ TUNNEL_IP=[$TUNNEL_IP]
fi
OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
@@ -119,7 +119,13 @@
OVS_DATADIR=$DATA_DIR/ovs
OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
-OVN_DATADIR=$DATA_DIR/ovn
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ OVN_DATADIR=$DATA_DIR/ovn
+else
+ # When using OVN from packages, the data dir for OVN DBs is
+ # /var/lib/ovn
+ OVN_DATADIR=/var/lib/ovn
+fi
OVN_SHAREDIR=$OVS_PREFIX/share/ovn
OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts
OVN_RUNDIR=$OVS_PREFIX/var/run/ovn
@@ -165,12 +171,23 @@
# Utility Functions
# -----------------
+function wait_for_db_file {
+ local count=0
+ while [ ! -f $1 ]; do
+ sleep 1
+ count=$((count+1))
+ if [ "$count" -gt 40 ]; then
+ die $LINENO "DB File $1 not found"
+ fi
+ done
+}
+
function wait_for_sock_file {
local count=0
while [ ! -S $1 ]; do
sleep 1
count=$((count+1))
- if [ "$count" -gt 5 ]; then
+ if [ "$count" -gt 40 ]; then
die $LINENO "Socket $1 not found"
fi
done
@@ -249,7 +266,12 @@
local testcmd="test -e $OVS_RUNDIR/$service.pid"
test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
- sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info
+ local service_ctl_file
+ service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+ if [ -z "$service_ctl_file" ]; then
+ die $LINENO "ctl file for service $service is not present."
+ fi
+ sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
}
function clone_repository {
@@ -288,16 +310,13 @@
# compile_ovn() - Compile OVN from source and load needed modules
# Accepts three parameters:
-# - first optional is False by default and means that
-# modules are built and installed.
-# - second optional parameter defines prefix for
+# - first optional parameter defines prefix for
# ovn compilation
-# - third optional parameter defines localstatedir for
+# - second optional parameter defines localstatedir for
# ovn single machine runtime
function compile_ovn {
- local build_modules=${1:-False}
- local prefix=$2
- local localstatedir=$3
+ local prefix=$1
+ local localstatedir=$2
if [ -n "$prefix" ]; then
prefix="--prefix=$prefix"
@@ -341,11 +360,6 @@
# install_ovn() - Collect source and prepare
function install_ovn {
- if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then
- echo "Installation of OVS from source disabled."
- return 0
- fi
-
echo "Installing OVN and dependent packages"
# Check the OVN configuration
@@ -375,7 +389,7 @@
compile_ovs $OVN_BUILD_MODULES
if use_new_ovn_repository; then
- compile_ovn $OVN_BUILD_MODULES
+ compile_ovn
fi
sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
@@ -383,6 +397,8 @@
sudo mkdir -p $OVS_PREFIX/var/log/ovn
sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
else
+ # Load fixup_ovn_centos
+ source ${TOP_DIR}/tools/fixup_stuff.sh
fixup_ovn_centos
install_package $(get_packages openvswitch)
install_package $(get_packages ovn)
@@ -561,14 +577,19 @@
# create new ones on each devstack run.
_disable_libvirt_apparmor
+ local mkdir_cmd="mkdir -p ${OVN_DATADIR}"
- mkdir -p $OVN_DATADIR
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then
+ mkdir_cmd="sudo ${mkdir_cmd}"
+ fi
+
+ $mkdir_cmd
mkdir -p $OVS_DATADIR
rm -f $OVS_DATADIR/*.db
rm -f $OVS_DATADIR/.*.db.~lock~
- rm -f $OVN_DATADIR/*.db
- rm -f $OVN_DATADIR/.*.db.~lock~
+ sudo rm -f $OVN_DATADIR/*.db
+ sudo rm -f $OVN_DATADIR/.*.db.~lock~
}
function _start_ovs {
@@ -595,7 +616,7 @@
dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
fi
dbcmd+=" $OVS_DATADIR/conf.db"
- _run_process ovsdb-server "$dbcmd"
+ _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root"
# Note: ovn-controller will create and configure br-int once it is started.
# So, no need to create it now because nothing depends on that bridge here.
@@ -620,8 +641,8 @@
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
- sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
- sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname)
# Select this chassis to host gateway routers
if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
@@ -635,7 +656,7 @@
if is_service_enabled ovn-controller-vtep ; then
ovn_base_setup_bridge br-v
vtep-ctl add-ps br-v
- vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+ vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP
enable_service ovs-vtep
local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
@@ -683,14 +704,17 @@
local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
- _run_process ovn-northd "$cmd" "$stop_cmd"
+ _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
else
_start_process "$OVN_NORTHD_SERVICE"
fi
# Wait for the service to be ready
+ # Check for socket and db files for both OVN NB and SB
wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+ wait_for_db_file $OVN_DATADIR/ovnnb_db.db
+ wait_for_db_file $OVN_DATADIR/ovnsb_db.db
if is_service_enabled tls-proxy; then
sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 2e63fe3..cc41a8c 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -7,6 +7,12 @@
_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace)
set +o xtrace
+# Load devstack ovs compliation and loading functions
+source ${TOP_DIR}/lib/neutron_plugins/ovs_source
+
+# Defaults
+# --------
+
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
# OVS recognize default 'system' datapath or 'netdev' for userspace datapath
OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system}
@@ -60,26 +66,33 @@
}
function _neutron_ovs_base_install_agent_packages {
- # Install deps
- install_package $(get_packages "openvswitch")
- if is_ubuntu; then
- _neutron_ovs_base_install_ubuntu_dkms
- restart_service openvswitch-switch
- elif is_fedora; then
- restart_service openvswitch
- sudo systemctl enable openvswitch
- elif is_suse; then
- if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
+ if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then
+ remove_ovs_packages
+ compile_ovs False /usr/local /var
+ load_conntrack_gre_module
+ start_new_ovs
+ else
+ # Install deps
+ install_package $(get_packages "openvswitch")
+ if is_ubuntu; then
+ _neutron_ovs_base_install_ubuntu_dkms
restart_service openvswitch-switch
- else
- # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
- if [[ $DISTRO =~ "tumbleweed" ]]; then
- sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
+ elif is_fedora; then
+ restart_service openvswitch
+ sudo systemctl enable openvswitch
+ elif is_suse; then
+ if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
+ restart_service openvswitch-switch
+ else
+ # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
+ if [[ $DISTRO =~ "tumbleweed" ]]; then
+ sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
+ fi
+ restart_service openvswitch || {
+ journalctl -xe || :
+ systemctl status openvswitch
+ }
fi
- restart_service openvswitch || {
- journalctl -xe || :
- systemctl status openvswitch
- }
fi
fi
}
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 08951d1..164d574 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -14,6 +14,7 @@
# Defaults
# --------
+Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT)
# Set variables for building OVS from source
OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git}
@@ -86,9 +87,15 @@
install_package kernel-devel-$KERNEL_VERSION
install_package kernel-headers-$KERNEL_VERSION
+ if is_service_enabled tls-proxy; then
+ install_package openssl-devel
+ fi
elif is_ubuntu ; then
install_package linux-headers-$KERNEL_VERSION
+ if is_service_enabled tls-proxy; then
+ install_package libssl-dev
+ fi
fi
}
@@ -187,12 +194,12 @@
# start_new_ovs() - removes old ovs database, creates a new one and starts ovs
function start_new_ovs {
sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~
- sudo /usr/share/openvswitch/scripts/ovs-ctl start
+ sudo /usr/local/share/openvswitch/scripts/ovs-ctl start
}
# stop_new_ovs() - stops ovs
function stop_new_ovs {
- local ovs_ctl='/usr/share/openvswitch/scripts/ovs-ctl'
+ local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl'
if [ -x $ovs_ctl ] ; then
sudo $ovs_ctl stop
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 98b96ac..3dffc33 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -147,10 +147,6 @@
}
function create_neutron_initial_network {
- local project_id
- project_id=$(openstack project list | grep " demo " | get_field 1)
- die_if_not_set $LINENO project_id "Failure retrieving project_id for demo"
-
# Allow drivers that need to create an initial network to do so here
if type -p neutron_plugin_create_initial_network_profile > /dev/null; then
neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK
@@ -170,15 +166,15 @@
if is_provider_network; then
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
- die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
+ NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK"
if [[ "$IP_VERSION" =~ 4.* ]]; then
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
- die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
+ SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME"
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
@@ -187,8 +183,8 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
fi
- IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
- die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
+ IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id)
+ die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME"
fi
if [[ $Q_AGENT == "openvswitch" ]]; then
@@ -197,17 +193,17 @@
sudo ip link set $PUBLIC_INTERFACE up
fi
else
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
- die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
+ NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME"
if [[ "$IP_VERSION" =~ 4.* ]]; then
# Create IPv4 private subnet
- SUBNET_ID=$(_neutron_create_private_subnet_v4 $project_id)
+ SUBNET_ID=$(_neutron_create_private_subnet_v4)
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
# Create IPv6 private subnet
- IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6 $project_id)
+ IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6)
fi
fi
@@ -215,12 +211,12 @@
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
- die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
+ ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
- die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
+ ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
fi
EXTERNAL_NETWORK_FLAGS="--external"
@@ -229,9 +225,9 @@
fi
# Create an external network, and a subnet. Configure the external network as router gw
if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id)
else
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id)
fi
die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
@@ -249,35 +245,32 @@
# Create private IPv4 subnet
function _neutron_create_private_subnet_v4 {
- local project_id=$1
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- local subnet_params="--project $project_id "
- subnet_params+="--ip-version 4 "
+ local subnet_params="--ip-version 4 "
if [[ -n "$NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $NETWORK_GATEWAY "
fi
+
subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
local subnet_id
- subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
- die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
+ subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
+ die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet"
echo $subnet_id
}
# Create private IPv6 subnet
function _neutron_create_private_subnet_v6 {
- local project_id=$1
die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$FIXED_RANGE_V6
fi
- local subnet_params="--project $project_id "
- subnet_params+="--ip-version 6 "
+ local subnet_params="--ip-version 6 "
if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
fi
@@ -285,8 +278,8 @@
subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} "
subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
local ipv6_subnet_id
- ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
- die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
+ ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
+ die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet"
echo $ipv6_subnet_id
}
@@ -319,7 +312,7 @@
# Configure neutron router for IPv4 public access
function _neutron_configure_router_v4 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
# Create a public subnet on the external network
local id_and_ext_gw_ip
id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -327,7 +320,7 @@
ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
# Configure the external network as the default router gateway
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
# This logic is specific to using OVN or the l3-agent for layer 3
if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
@@ -363,7 +356,7 @@
# Configure neutron router for IPv6 public access
function _neutron_configure_router_v6 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
# Create a public subnet on the external network
local ipv6_id_and_ext_gw_ip
ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -375,7 +368,7 @@
# If the external network has not already been set as the default router
# gateway when configuring an IPv4 public subnet, do so now
if [[ "$IP_VERSION" == "6" ]]; then
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
fi
# This logic is specific to using OVN or the l3-agent for layer 3
@@ -396,7 +389,13 @@
sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Configure and enable public bridge
# Override global IPV6_ROUTER_GW_IP with the true value from neutron
- IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
+ # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's
+ # gateway ports aren't visible in API because such ports don't belongs
+ # to any tenant. Because of that, at least temporary we need to find
+ # IPv6 address of the router's gateway in a bit different way.
+ # It can be reverted when bug
+ # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed
+ IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"')
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
if is_neutron_ovs_base_plugin; then
@@ -404,7 +403,10 @@
ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
- # Configure interface for public bridge
+ # Configure interface for public bridge by setting the interface
+ # to "up" in case the job is running entirely private network based
+ # testing.
+ sudo ip link set $ext_gw_interface up
sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
# Any IPv6 private subnet that uses the default IPV6 subnet pool
# and that is plugged into the default router (Q_ROUTER_NAME) will
@@ -427,3 +429,12 @@
EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
[[ $EXT_LIST =~ $extension ]] && return 0
}
+
+function plugin_agent_add_l3_agent_extension {
+ local l3_agent_extension=$1
+ if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then
+ L3_AGENT_EXTENSIONS=$l3_agent_extension
+ elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then
+ L3_AGENT_EXTENSIONS+=",$l3_agent_extension"
+ fi
+}
diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement
new file mode 100644
index 0000000..3ec185b
--- /dev/null
+++ b/lib/neutron_plugins/services/placement
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+function configure_placement_service_plugin {
+ neutron_service_plugin_class_add "placement"
+}
+
+function configure_placement_neutron {
+ iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE"
+ iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME"
+ iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD"
+ iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME"
+ iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement region_name "$REGION_NAME"
+}
+
+function configure_placement_extension {
+ configure_placement_service_plugin
+ configure_placement_neutron
+}
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
new file mode 100644
index 0000000..af9eb3d
--- /dev/null
+++ b/lib/neutron_plugins/services/qos
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+function configure_qos_service_plugin {
+ neutron_service_plugin_class_add "qos"
+}
+
+
+function configure_qos_core_plugin {
+ configure_qos_$NEUTRON_CORE_PLUGIN
+}
+
+
+function configure_qos_l2_agent {
+ plugin_agent_add_l2_agent_extension "qos"
+}
+
+
+function configure_qos {
+ configure_qos_service_plugin
+ configure_qos_core_plugin
+ configure_qos_l2_agent
+}
+
+function configure_l3_agent_extension_fip_qos {
+ plugin_agent_add_l3_agent_extension "fip_qos"
+}
+
+function configure_l3_agent_extension_gateway_ip_qos {
+ plugin_agent_add_l3_agent_extension "gateway_ip_qos"
+}
diff --git a/lib/nova b/lib/nova
index bbb1039..6de1d33 100644
--- a/lib/nova
+++ b/lib/nova
@@ -107,20 +107,6 @@
QEMU_CONF=/etc/libvirt/qemu.conf
-# Set default defaults here as some hypervisor drivers override these
-PUBLIC_INTERFACE_DEFAULT=br100
-# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
-# the default isn't completely crazy. This will match ``eth*``, ``em*``, or
-# the new ``p*`` interfaces, then basically picks the first
-# alphabetically. It's probably wrong, however it's less wrong than
-# always using ``eth0`` which doesn't exist on new Linux distros at all.
-GUEST_INTERFACE_DEFAULT=$(ip link \
- | grep 'state UP' \
- | awk '{print $2}' \
- | sed 's/://' \
- | grep ^[ep] \
- | head -1)
-
# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
# In multi-node setups allows compute hosts to not run ``n-novnc``.
NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)
@@ -159,6 +145,9 @@
# image in devstack is CirrOS.
NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0}
+# Whether to use Keystone unified limits instead of legacy quota limits.
+NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS)
+
# Functions
# ---------
@@ -233,6 +222,10 @@
stop_process "n-api-meta"
remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
+
+ if [[ "$NOVA_BACKEND" == "LVM" ]]; then
+ clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME
+ fi
}
# configure_nova() - Set config files, create data dirs, etc
@@ -260,7 +253,8 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
- LIBVIRT_CPU_MODE=none
+ LIBVIRT_CPU_MODE=custom
+ LIBVIRT_CPU_MODEL=Nehalem
if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
@@ -314,6 +308,10 @@
sudo systemctl daemon-reload
fi
+ # set chap algorithms. The default chap_algorithm is md5 which will
+ # not work under FIPS.
+ iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
+
# ensure that iscsid is started, even when disabled by default
restart_service iscsid
fi
@@ -381,6 +379,13 @@
"http://$SERVICE_HOST:$S3_SERVICE_PORT" \
"http://$SERVICE_HOST:$S3_SERVICE_PORT"
fi
+
+ # Unified limits
+ if is_service_enabled n-api; then
+ if [[ "$NOVA_USE_UNIFIED_LIMITS" = True ]]; then
+ configure_nova_unified_limits
+ fi
+ fi
}
# create_nova_conf() - Create a new nova.conf file
@@ -478,7 +483,8 @@
fi
# nova defaults to genisoimage but only mkisofs is available for 15.0+
- if is_suse; then
+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately
+ if is_suse || is_fedora; then
iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
fi
@@ -487,8 +493,13 @@
iniset $NOVA_CONF upgrade_levels compute "auto"
- write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
- write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ if is_service_enabled n-api; then
+ write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+ fi
+
+ if is_service_enabled n-api-meta; then
+ write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ fi
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -716,6 +727,53 @@
fi
}
+function configure_nova_unified_limits {
+ # Registered limit resources in keystone are system-specific resources.
+ # Make sure we use a system-scoped token to interact with this API.
+
+ # Default limits here mirror the legacy config-based default values.
+ # Note: disk quota is new in nova as of unified limits.
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 10 --region $REGION_NAME servers
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 20 --region $REGION_NAME class:VCPU
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit $((50 * 1024)) --region $REGION_NAME class:MEMORY_MB
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 20 --region $REGION_NAME class:DISK_GB
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 128 --region $REGION_NAME server_metadata_items
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 5 --region $REGION_NAME server_injected_files
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 10240 --region $REGION_NAME server_injected_file_content_bytes
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 255 --region $REGION_NAME server_injected_file_path_bytes
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 100 --region $REGION_NAME server_key_pairs
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 10 --region $REGION_NAME server_groups
+ openstack --os-cloud devstack-system-admin registered limit create \
+ --service nova --default-limit 10 --region $REGION_NAME server_group_members
+
+ # Tell nova to use these limits
+ iniset $NOVA_CONF quota driver "nova.quota.UnifiedLimitsDriver"
+
+ # Configure oslo_limit so it can talk to keystone
+ iniset $NOVA_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME
+ iniset $NOVA_CONF oslo_limit password $SERVICE_PASSWORD
+ iniset $NOVA_CONF oslo_limit username nova
+ iniset $NOVA_CONF oslo_limit auth_type password
+ iniset $NOVA_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
+ iniset $NOVA_CONF oslo_limit system_scope all
+ iniset $NOVA_CONF oslo_limit endpoint_id \
+ $(openstack endpoint list --service nova -f value -c ID)
+
+ # Allow the nova service user to read quotas
+ openstack --os-cloud devstack-system-admin role add --user nova \
+ --user-domain $SERVICE_DOMAIN_NAME --system all reader
+}
+
function init_nova_service_user_conf {
iniset $NOVA_CONF service_user send_service_user_token True
iniset $NOVA_CONF service_user auth_type password
@@ -830,7 +888,7 @@
NOVNC_WEB_DIR=/usr/share/novnc
install_package novnc
else
- NOVNC_WEB_DIR=$DEST/noVNC
+ NOVNC_WEB_DIR=$DEST/novnc
git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
fi
fi
@@ -969,7 +1027,6 @@
local old_path=$PATH
export PATH=$NOVA_BIN_DIR:$PATH
- local api_cell_conf=$NOVA_CONF
local compute_cell_conf=$NOVA_CONF
run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 63882e0..3e7d280 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -90,7 +90,7 @@
install_package libvirt libvirt-devel python3-libvirt
if is_arch "aarch64"; then
- install_package edk2.git-aarch64
+ install_package edk2-aarch64
fi
fi
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 321775d..c1cd132 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,6 +40,9 @@
configure_libvirt
iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
+ if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then
+ iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL"
+ fi
# Do not enable USB tablet input devices to avoid QEMU CPU overhead.
iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
diff --git a/lib/swift b/lib/swift
index 9885241..251c462 100644
--- a/lib/swift
+++ b/lib/swift
@@ -179,12 +179,9 @@
# cleanup_swift() - Remove residual data files
function cleanup_swift {
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
- if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
- fi
- if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
- rm ${SWIFT_DISK_IMAGE}
- fi
+
+ destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
+
rm -rf ${SWIFT_DATA_DIR}/run/
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
_cleanup_swift_apache_wsgi
@@ -405,6 +402,11 @@
# Versioned Writes
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
+ # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068
+ if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512"
+ fi
+
# Configure Ceilometer
if is_service_enabled ceilometer; then
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
@@ -430,7 +432,7 @@
swift_pipeline+=" authtoken"
if is_service_enabled s3api;then
swift_pipeline+=" s3token"
- iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3}
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
fi
swift_pipeline+=" keystoneauth"
@@ -521,7 +523,7 @@
local auth_vers
auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
- if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then
+ if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then
iniset ${testfile} func_test auth_port 443
else
iniset ${testfile} func_test auth_port 80
@@ -575,28 +577,7 @@
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
# Create a loopback disk and format it to XFS.
- if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
- if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
- sudo rm -f ${SWIFT_DISK_IMAGE}
- fi
- fi
-
- mkdir -p ${SWIFT_DATA_DIR}/drives/images
- sudo touch ${SWIFT_DISK_IMAGE}
- sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE}
-
- truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
-
- # Make a fresh XFS filesystem
- /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE}
-
- # Mount the disk with mount options to make it as efficient as possible
- mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
- if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
- ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
- fi
+ create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE}
# Create a link to the above mount and
# create all of the directories needed to emulate a few different servers
@@ -866,12 +847,15 @@
function swift_configure_tempurls {
# note we are using swift credentials!
- OS_USERNAME=swift \
- OS_PASSWORD=$SERVICE_PASSWORD \
- OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \
- OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- openstack object store account \
+ openstack --os-cloud "" \
+ --os-region-name $REGION_NAME \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username=swift \
+ --os-password=$SERVICE_PASSWORD \
+ --os-user-domain-name=$SERVICE_DOMAIN_NAME \
+ --os-project-name=$SERVICE_PROJECT_NAME \
+ --os-project-domain-name=$SERVICE_DOMAIN_NAME \
+ object store account \
set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
}
diff --git a/lib/tempest b/lib/tempest
index 8fd54c5..87a2244 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -71,6 +71,17 @@
TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them. In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
# Neutron/Network variables
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -90,7 +101,6 @@
# it will run tempest with
TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
-
# Functions
# ---------
@@ -115,7 +125,9 @@
local tmp_c
tmp_c=$1
if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
- (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+ (cd $REQUIREMENTS_DIR &&
+ git show master:upper-constraints.txt 2>/dev/null ||
+ git show origin/master:upper-constraints.txt) > $tmp_c
else
echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -126,6 +138,48 @@
fi
}
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images. Takes 3 arguments, an array and two
+# variables. The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value of *both* other parameters.
+function get_active_images {
+ declare -n img_array=$1
+ declare -n img_id=$2
+ declare -n img_id_alt=$3
+
+ # start with a fresh array in case we are called multiple times
+ img_array=()
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ img_id="$IMAGE_UUID"
+ img_id_alt="$IMAGE_UUID"
+ fi
+ img_array+=($IMAGE_UUID)
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+ declare -n image_array=$1
+ declare -n image_id=$2
+ declare -n image_id_alt=$3
+ local -i poll_count
+
+ poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+ while (( poll_count-- > 0 )) ; do
+ sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+ get_active_images image_array image_id image_id_alt
+ if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+ return
+ fi
+ done
+ local msg
+ msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+ msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+ warn $LINENO "$msg"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -167,13 +221,21 @@
declare -a images
if is_service_enabled glance; then
- while read -r IMAGE_NAME IMAGE_UUID; do
- if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
- image_uuid="$IMAGE_UUID"
- image_uuid_alt="$IMAGE_UUID"
+ get_active_images images image_uuid image_uuid_alt
+
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ # Glance image import is asynchronous and may be configured
+ # to do image conversion. If image import is being used,
+ # it's possible that this code is being executed before the
+ # import has completed and there may be no active images yet.
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ poll_glance_images images image_uuid image_uuid_alt
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+ exit 1
+ fi
fi
- images+=($IMAGE_UUID)
- done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ fi
case "${#images[*]}" in
0)
@@ -287,8 +349,8 @@
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME)
# make sure shared network presence does not confuses the tempest tests
- openstack --os-cloud devstack-admin network create --share shared
- openstack --os-cloud devstack-admin subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
fi
iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -443,6 +505,8 @@
iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
+ iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE"
+
# Scenario
SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
@@ -600,9 +664,23 @@
fi
done
- iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
+ # ``enforce_scope``
+ # If services enable the enforce_scope for their policy
+ # we need to enable the same on Tempest side so that
+ # test can be run with scoped token.
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope keystone true
+ iniset $TEMPEST_CONFIG auth admin_system 'all'
+ iniset $TEMPEST_CONFIG auth admin_project_name ''
+ fi
- iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope glance true
+ fi
+
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope cinder true
+ fi
if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
# libvirt-lxc does not support boot from volume or attaching volumes
@@ -617,13 +695,13 @@
local tmp_cfg_file
tmp_cfg_file=$(mktemp)
cd $TEMPEST_DIR
- if [[ "$OFFLINE" != "True" ]]; then
- tox -revenv-tempest --notest
- fi
local tmp_u_c_m
tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
set_tempest_venv_constraints $tmp_u_c_m
+ if [[ "$OFFLINE" != "True" ]]; then
+ tox -revenv-tempest --notest
+ fi
tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
rm -f $tmp_u_c_m
diff --git a/lib/tls b/lib/tls
index b3cc0b4..b8758cd 100644
--- a/lib/tls
+++ b/lib/tls
@@ -169,7 +169,7 @@
[ req ]
default_bits = 1024
-default_md = sha1
+default_md = sha256
prompt = no
distinguished_name = req_distinguished_name
@@ -261,7 +261,7 @@
if [ ! -r "$ca_dir/$cert_name.crt" ]; then
# Generate a signing request
$OPENSSL req \
- -sha1 \
+ -sha256 \
-newkey rsa \
-nodes \
-keyout $ca_dir/private/$cert_name.key \
@@ -301,7 +301,7 @@
if [ ! -r "$ca_dir/cacert.pem" ]; then
# Create a signing certificate request
$OPENSSL req -config $ca_dir/ca.conf \
- -sha1 \
+ -sha256 \
-newkey rsa \
-nodes \
-keyout $ca_dir/private/cacert.key \
@@ -557,7 +557,7 @@
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
</VirtualHost>
EOF
if is_suse ; then
diff --git a/openrc b/openrc
index beeaebe..6d488bb 100644
--- a/openrc
+++ b/openrc
@@ -74,7 +74,7 @@
fi
# Identity API version
-export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
+export OS_IDENTITY_API_VERSION=3
# Ask keystoneauth1 to use keystone
export OS_AUTH_TYPE=password
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 9e66f20..0047d78 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -17,9 +17,18 @@
dest: "{{ stage_dir }}/verify_tempest_conf.log"
state: hard
when: tempest_log.stat.exists
+ - name: Capture most recent qemu crash dump, if any
+ shell:
+ executable: /bin/bash
+ cmd: |
+ coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64
+ ignore_errors: yes
roles:
- export-devstack-journal
- apache-logs-conf
+ # This should run as early as possible to make sure we don't skew
+ # the post-tempest results with other activities.
+ - capture-performance-data
- devstack-project-conf
# capture-system-logs should be the last role before stage-output
- capture-system-logs
diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst
new file mode 100644
index 0000000..b7a37c2
--- /dev/null
+++ b/roles/capture-performance-data/README.rst
@@ -0,0 +1,25 @@
+Generate performance logs for staging
+
+Captures usage information from mysql, systemd, apache logs, and other
+parts of the system and generates a performance.json file in the
+staging directory.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory
+
+.. zuul:rolevar:: devstack_conf_dir
+ :default: /opt/stack
+
+ The base devstack destination directory
+
+.. zuul:rolevar:: debian_suse_apache_deref_logs
+
+ The apache logs found in the debian/suse locations
+
+.. zuul:rolevar:: redhat_apache_deref_logs
+
+ The apache logs found in the redhat locations
diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/roles/capture-performance-data/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_conf_dir: "{{ devstack_base_dir }}"
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
new file mode 100644
index 0000000..f9bb0f7
--- /dev/null
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -0,0 +1,16 @@
+- name: Generate statistics
+ shell:
+ executable: /bin/bash
+ cmd: |
+ source {{ devstack_conf_dir }}/stackrc
+ python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+ --db-user="$DATABASE_USER" \
+ --db-pass="$DATABASE_PASSWORD" \
+ --db-host="$DATABASE_HOST" \
+ {{ apache_logs }} > {{ stage_dir }}/performance.json
+ vars:
+ apache_logs: >-
+ {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %}
+ --apache-log="{{ i.stat.path }}"
+ {% endfor %}
+ ignore_errors: yes
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
- coredumps
- dns resolver
- listen53
+- services
- unbound.log
- deprecation messages
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # Services status
+ sudo systemctl status --all > services.txt 2>/dev/null
+
# NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
# failed to start due to denials from SELinux — useful for CentOS
# and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst
index 400a8da..3bddf5e 100644
--- a/roles/devstack-ipv6-only-deployments-verification/README.rst
+++ b/roles/devstack-ipv6-only-deployments-verification/README.rst
@@ -1,10 +1,10 @@
-Verify the IPv6-only deployments
+Verify all addresses in IPv6-only deployments
This role needs to be invoked from a playbook that
-run tests. This role verifies the IPv6 setting on
-devstack side and devstack deploy services on IPv6.
-This role is invoked before tests are run so that
-if any missing IPv6 setting or deployments can fail
+runs tests. This role verifies the IPv6 settings on the
+devstack side and that devstack deploys with all addresses
+being IPv6. This role is invoked before tests are run so that
+if there is any missing IPv6 setting, deployments can fail
the job early.
diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml
index 84f33f0..3adff17 100644
--- a/roles/setup-devstack-cache/tasks/main.yaml
+++ b/roles/setup-devstack-cache/tasks/main.yaml
@@ -2,6 +2,7 @@
# This uses hard links to avoid using extra space.
command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;"
become: true
+ ignore_errors: yes
- name: Set ownership of cached files
file:
diff --git a/samples/local.conf b/samples/local.conf
index 8b76137..55b7298 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -49,7 +49,7 @@
# path of the destination log file. A timestamp will be appended to the given name.
LOGFILE=$DEST/logs/stack.sh.log
-# Old log files are automatically removed after 7 days to keep things neat. Change
+# Old log files are automatically removed after 2 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index a4e621f..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-[metadata]
-name = DevStack
-summary = OpenStack DevStack
-description_file =
- README.rst
-author = OpenStack
-author_email = openstack-discuss@lists.openstack.org
-home_page = https://docs.openstack.org/devstack/latest
-classifier =
- Intended Audience :: Developers
- License :: OSI Approved :: Apache Software License
- Operating System :: POSIX :: Linux
diff --git a/setup.py b/setup.py
deleted file mode 100755
index 70c2b3f..0000000
--- a/setup.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
-import setuptools
-
-setuptools.setup(
- setup_requires=['pbr'],
- pbr=True)
diff --git a/stack.sh b/stack.sh
index 48f61fb..c99189e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -67,7 +67,9 @@
umask 022
# Not all distros have sbin in PATH for regular users.
-PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin
+# osc will normally be installed at /usr/local/bin/openstack so ensure
+# /usr/local/bin is also in the path
+PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin
# Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
@@ -227,7 +229,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="focal|f34|opensuse-15.2|opensuse-tumbleweed|rhel8"
+SUPPORTED_DISTROS="bullseye|focal|jammy|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -278,7 +280,6 @@
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
-
# Configure Distro Repositories
# -----------------------------
@@ -300,13 +301,17 @@
}
function _install_rdo {
- if [[ "$TARGET_BRANCH" == "master" ]]; then
- # rdo-release.el8.rpm points to latest RDO release, use that for master
- sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
- else
- # For stable branches use corresponding release rpm
- rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
- sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ if [[ $DISTRO == "rhel8" ]]; then
+ if [[ "$TARGET_BRANCH" == "master" ]]; then
+ # rdo-release.el8.rpm points to latest RDO release, use that for master
+ sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ else
+ # For stable branches use corresponding release rpm
+ rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
+ sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ fi
+ elif [[ $DISTRO == "rhel9" ]]; then
+ sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo
fi
sudo dnf -y update
}
@@ -385,6 +390,10 @@
# RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
# Patch: https://github.com/rpm-software-management/dnf/pull/1448
echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
+elif [[ $DISTRO == "rhel9" ]]; then
+ sudo dnf config-manager --set-enabled crb
+ # rabbitmq and other packages are provided by RDO repositories.
+ _install_rdo
fi
# Ensure python is installed
@@ -683,6 +692,8 @@
# Last chance for the database password. This must be handled here
# because read_password is not a library function.
read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
+
+ define_database_baseurl
else
echo "No database enabled"
fi
@@ -749,7 +760,9 @@
# Bring down global requirements before any use of pip_install. This is
# necessary to ensure that the constraints file is in place before we
# attempt to apply any constraints to pip installs.
-git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+# We always need the master branch in addition to any stable branch, so
+# override GIT_DEPTH here.
+GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
# Install package requirements
# Source it so the entire environment is available
@@ -876,7 +889,7 @@
install_keystonemiddleware
if is_service_enabled keystone; then
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
stack_install_service keystone
configure_keystone
fi
@@ -1063,37 +1076,18 @@
# Keystone
# --------
-# Rather than just export these, we write them out to a
-# intermediate userrc file that can also be used to debug if
-# something goes wrong between here and running
-# tools/create_userrc.sh (this script relies on services other
-# than keystone being available, so we can't call it right now)
-cat > $TOP_DIR/userrc_early <<EOF
-# Use this for debugging issues before files in accrc are created
-
-# Set up password auth credentials now that Keystone is bootstrapped
-export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
-export OS_USERNAME=admin
-export OS_USER_DOMAIN_ID=default
-export OS_PASSWORD=$ADMIN_PASSWORD
-export OS_PROJECT_NAME=admin
-export OS_PROJECT_DOMAIN_ID=default
-export OS_REGION_NAME=$KEYSTONE_REGION_NAME
-
-EOF
-
if is_service_enabled tls-proxy; then
- echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
fi
-source $TOP_DIR/userrc_early
+# Write a clouds.yaml file and use the devstack-admin cloud
+write_clouds_yaml
+export OS_CLOUD=${OS_CLOUD:-devstack-admin}
if is_service_enabled keystone; then
echo_summary "Starting Keystone"
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
init_keystone
start_keystone
bootstrap_keystone
@@ -1118,9 +1112,6 @@
fi
-# Write a clouds.yaml file
-write_clouds_yaml
-
# Horizon
# -------
@@ -1161,7 +1152,8 @@
# ----
if is_service_enabled q-dhcp; then
- # Delete traces of nova networks from prior runs
+ # TODO(frickler): These are remnants from n-net, check which parts are really
+ # still needed for Neutron.
# Do not kill any dnsmasq instance spawned by NetworkManager
netman_pid=$(pidof NetworkManager || true)
if [ -z "$netman_pid" ]; then
@@ -1221,12 +1213,7 @@
echo_summary "Configuring Nova"
init_nova
- # Additional Nova configuration that is dependent on other services
- # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
- # not, remove the if here
- if is_service_enabled neutron; then
- async_runfunc configure_neutron_nova
- fi
+ async_runfunc configure_neutron_nova
fi
@@ -1380,7 +1367,7 @@
# which is helpful in image bundle steps.
if is_service_enabled nova && is_service_enabled keystone; then
- USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+ USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD"
if [ -f $SSL_BUNDLE_FILE ]; then
USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
@@ -1514,6 +1501,19 @@
time_totals
async_print_timing
+if is_service_enabled mysql; then
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+ echo ""
+ echo ""
+ echo "Post-stack database query stats:"
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'SELECT * FROM queries' -t 2>/dev/null
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'DELETE FROM queries' 2>/dev/null
+ fi
+fi
+
+
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
old mode 100755
new mode 100644
index 3dc800a..b3130e5
--- a/stackrc
+++ b/stackrc
@@ -175,21 +175,13 @@
export PS4='+ $(short_source): '
fi
-# Configure Identity API version: 2.0, 3
-IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
+# Configure Identity API version
+# TODO(frickler): Drop this when plugins no longer need it
+IDENTITY_API_VERSION=3
-# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack
-# deployment will be deploying the Identity v2 pipelines. If this option is set
-# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to
-# skip Identity v2 specific tests; and iii) configure Horizon to use Identity
-# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION
-# will to be set to ``3`` in order to make DevStack register the Identity
-# endpoint as v3. This flag is experimental and will be used as basis to
-# identify the projects which still have issues to operate with Identity v3.
-ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
-if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
- IDENTITY_API_VERSION=3
-fi
+# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
+# each services ${SERVICE}_ENFORCE_SCOPE variables
+ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
@@ -205,6 +197,10 @@
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
+# This can be used to turn on various non-default items in the
+# performance_schema that are of interest to us
+MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
+
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
# in the format for timeout(1);
@@ -247,7 +243,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="yoga"
+DEVSTACK_SERIES="zed"
##############
#
@@ -415,6 +411,10 @@
GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH}
+# oslo.limit
+GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git}
+GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH}
+
# oslo.log
GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH}
@@ -602,8 +602,8 @@
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -623,7 +623,8 @@
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
- LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none}
+ LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom}
+ LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
@@ -670,7 +671,7 @@
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
-CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
+CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and
@@ -876,7 +877,31 @@
# This is either 127.0.0.1 for IPv4 or ::1 for IPv6
SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}}
-REGION_NAME=${REGION_NAME:-RegionOne}
+# TUNNEL IP version
+# This is the IP version to use for tunnel endpoints
+TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4}
+
+# Validate TUNNEL_IP_VERSION
+if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then
+ die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6"
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then
+ DEF_TUNNEL_ENDPOINT_IP=$HOST_IP
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then
+ # Only die if the user has not over-ridden the endpoint IP
+ if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then
+ die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6."
+ fi
+
+ DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6
+fi
+
+# Allow the use of an alternate address for tunnel endpoints.
+# Default is dependent on TUNNEL_IP_VERSION above.
+TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}}
# Configure services to use syslog instead of writing to individual log files
SYSLOG=$(trueorfalse False SYSLOG)
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index 6ed1647..6367cde 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -44,6 +44,9 @@
multi = foo1
multi = foo2
+[fff]
+ampersand =
+
[key_with_spaces]
rgw special key = something
@@ -85,7 +88,7 @@
# test iniget_sections
VAL=$(iniget_sections "${TEST_INI}")
-assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \
+assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \
del_separate_options del_same_option del_missing_option \
del_missing_option_multi del_no_options"
@@ -124,6 +127,13 @@
VAL=$(iniget ${TEST_INI} bbb handlers)
assert_equal "$VAL" "33,44" "inset at EOF"
+# Test with ampersand in values
+for i in `seq 3`; do
+ iniset ${TEST_INI} fff ampersand '&y'
+done
+VAL=$(iniget ${TEST_INI} fff ampersand)
+assert_equal "$VAL" "&y" "iniset ampersands in option"
+
# test empty option
if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
passed "ini_has_option: ddd.empty present"
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index ce1b344..839e3a1 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -45,6 +45,7 @@
ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes"
+ALL_LIBS+=" oslo.limit"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 919cacb..cb8d7aa 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -44,6 +44,15 @@
if ! getent passwd $STACK_USER >/dev/null; then
echo "Creating a user called $STACK_USER"
useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER
+ # RHEL based distros create home dir with 700 permissions,
+ # And Ubuntu 21.04+ with 750, i.e missing executable
+ # permission for either group or others
+ # Devstack deploy will have issues with this, fix it by
+ # adding executable permission
+ if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then
+ echo "Executable permission missing for $DEST, adding it"
+ chmod +x $DEST
+ fi
fi
echo "Giving stack user passwordless sudo privileges"
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..5057f0f
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,120 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.db_name = url.database
+ LOG.info('Registered counter for database %s' % self.db_name)
+ new_url = sqlalchemy.engine.URL.create(url.drivername,
+ url.username,
+ url.password,
+ url.host,
+ url.port,
+ 'stats')
+
+ self.engine = sqlalchemy.create_engine(new_url)
+ self.queue = queue.Queue()
+ self.thread = None
+
+ def engine_created(self, engine):
+ """Hook the engine creation process.
+
+ This is the plug point for the sqlalchemy plugin. Using
+ plugin=$this in the URL causes this method to be called when
+ the engine is created, giving us a chance to hook it below.
+ """
+ event.listen(engine, "before_cursor_execute", self._log_event)
+
+ def ensure_writer_thread(self):
+ self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+ self.thread.start()
+
+ def _log_event(self, conn, cursor, statement, parameters, context,
+ executemany):
+ """Queue a "hit" for this operation to be recorded.
+
+ Attepts to determine the operation by the first word of the
+ statement, or 'OTHER' if it cannot be determined.
+ """
+
+ # Start our thread if not running. If we were forked after the
+ # engine was created and this plugin was associated, our
+ # writer thread is gone, so respawn.
+ if not self.thread or not self.thread.is_alive():
+ self.ensure_writer_thread()
+
+ try:
+ op = statement.strip().split(' ', 1)[0] or 'OTHER'
+ except Exception:
+ op = 'OTHER'
+
+ self.queue.put((self.db_name, op))
+
+ def do_incr(self, db, op, count):
+ """Increment the counter for (db,op) by count."""
+
+ query = ('INSERT INTO queries (db, op, count) '
+ ' VALUES (%s, %s, %s) '
+ ' ON DUPLICATE KEY UPDATE count=count+%s')
+ try:
+ with self.engine.begin() as conn:
+ r = conn.execute(query, (db, op, count, count))
+ except Exception as e:
+ LOG.error('Failed to account for access to database %r: %s',
+ db, e)
+
+ def stat_writer(self):
+ """Consume messages from the queue and write them in batches.
+
+ This reads "hists" from from a queue fed by _log_event() and
+ writes (db,op)+=count stats to the database after ten seconds
+ of no activity to avoid triggering a write for every SELECT
+ call. Write no less often than every thirty seconds and/or 100
+ pending hits to avoid being starved by constant activity.
+ """
+ LOG.debug('[%i] Writer thread running' % os.getpid())
+ while True:
+ to_write = {}
+ total = 0
+ last = time.time()
+ while time.time() - last < 30 and total < 100:
+ try:
+ item = self.queue.get(timeout=10)
+ to_write.setdefault(item, 0)
+ to_write[item] += 1
+ total += 1
+ except queue.Empty:
+ break
+
+ if to_write:
+ LOG.debug('[%i] Writing DB stats %s' % (
+ os.getpid(),
+ ','.join(['%s:%s=%i' % (db, op, count)
+ for (db, op), count in to_write.items()])))
+
+ for (db, op), count in to_write.items():
+ self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..12300bf
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+py_modules = dbcounter
+entry_points =
+ [sqlalchemy.plugins]
+ dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 71fba2e..daa1bc6 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -26,39 +26,6 @@
FILES=$TOP_DIR/files
fi
-# Keystone Port Reservation
-# -------------------------
-# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
-# being used as ephemeral ports by the system. The default(s) are 35357 and
-# 35358 which are in the Linux defined ephemeral port range (in disagreement
-# with the IANA ephemeral port range). This is a workaround for bug #1253482
-# where Keystone will try and bind to the port and the port will already be
-# in use as an ephemeral port by another process. This places an explicit
-# exception into the Kernel for the Keystone AUTH ports.
-function fixup_keystone {
- keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-
- # Only do the reserved ports when available, on some system (like containers)
- # where it's not exposed we are almost pretty sure these ports would be
- # exclusive for our DevStack.
- if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
- # Get any currently reserved ports, strip off leading whitespace
- reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
-
- if [[ -z "${reserved_ports}" ]]; then
- # If there are no currently reserved ports, reserve the keystone ports
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
- else
- # If there are currently reserved ports, keep those and also reserve the
- # Keystone specific ports. Duplicate reservations are merged into a single
- # reservation (or range) automatically by the kernel.
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
- fi
- else
- echo_summary "WARNING: unable to reserve keystone ports"
- fi
-}
-
# Python Packages
# ---------------
@@ -116,6 +83,11 @@
if is_package_installed python3-setuptools; then
sudo dnf reinstall -y python3-setuptools
fi
+ # Workaround CentOS 8-stream iputils and systemd Bug
+ # https://bugzilla.redhat.com/show_bug.cgi?id=2037807
+ if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then
+ sudo sysctl -w net.ipv4.ping_group_range='0 2147483647'
+ fi
}
function fixup_suse {
@@ -182,7 +154,6 @@
}
function fixup_all {
- fixup_keystone
fixup_ubuntu
fixup_fedora
fixup_suse
diff --git a/tools/get-stats.py b/tools/get-stats.py
new file mode 100755
index 0000000..b958af6
--- /dev/null
+++ b/tools/get-stats.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python3
+
+import argparse
+import csv
+import datetime
+import glob
+import itertools
+import json
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+
+try:
+ import psutil
+except ImportError:
+ psutil = None
+ print('No psutil, process information will not be included',
+ file=sys.stderr)
+
+try:
+ import pymysql
+except ImportError:
+ pymysql = None
+ print('No pymysql, database information will not be included',
+ file=sys.stderr)
+
+LOG = logging.getLogger('perf')
+
+# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
+
+
+def tryint(value):
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+
+
+def get_service_stats(service):
+ stats = {'MemoryCurrent': 0}
+ output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] +
+ ['-p%s' % stat for stat in stats])
+ for line in output.decode().split('\n'):
+ if not line:
+ continue
+ stat, val = line.split('=')
+ stats[stat] = tryint(val)
+
+ return stats
+
+
+def get_services_stats():
+ services = [os.path.basename(s) for s in
+ glob.glob('/etc/systemd/system/devstack@*.service')] + \
+ ['apache2.service']
+ return [dict(service=service, **get_service_stats(service))
+ for service in services]
+
+
+def get_process_stats(proc):
+ cmdline = proc.cmdline()
+ if 'python' in cmdline[0]:
+ cmdline = cmdline[1:]
+ return {'cmd': cmdline[0],
+ 'pid': proc.pid,
+ 'args': ' '.join(cmdline[1:]),
+ 'rss': proc.memory_info().rss}
+
+
+def get_processes_stats(matches):
+ me = os.getpid()
+ procs = psutil.process_iter()
+
+ def proc_matches(proc):
+ return me != proc.pid and any(
+ re.search(match, ' '.join(proc.cmdline()))
+ for match in matches)
+
+ return [
+ get_process_stats(proc)
+ for proc in procs
+ if proc_matches(proc)]
+
+
+def get_db_stats(host, user, passwd):
+ dbs = []
+ try:
+ db = pymysql.connect(host=host, user=user, password=passwd,
+ database='stats',
+ cursorclass=pymysql.cursors.DictCursor)
+ except pymysql.err.OperationalError as e:
+ if 'Unknown database' in str(e):
+ print('No stats database; assuming devstack failed',
+ file=sys.stderr)
+ return []
+ raise
+
+ with db:
+ with db.cursor() as cur:
+ cur.execute('SELECT db,op,count FROM queries')
+ for row in cur:
+ dbs.append({k: tryint(v) for k, v in row.items()})
+ return dbs
+
+
+def get_http_stats_for_log(logfile):
+ stats = {}
+ apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+ 'length', 'c', 'agent')
+ ignore_agents = ('curl', 'uwsgi', 'nova-status')
+ ignored_services = set()
+ for line in csv.reader(open(logfile), delimiter=' '):
+ fields = dict(zip(apache_fields, line))
+ if len(fields) != len(apache_fields):
+ # Not a combined access log, so we can bail completely
+ return []
+ try:
+ method, url, http = fields['request'].split(' ')
+ except ValueError:
+ method = url = http = ''
+ if 'HTTP' not in http:
+ # Not a combined access log, so we can bail completely
+ return []
+
+ # Tempest's User-Agent is unchanged, but client libraries and
+ # inter-service API calls use proper strings. So assume
+ # 'python-urllib' is tempest so we can tell it apart.
+ if 'python-urllib' in fields['agent'].lower():
+ agent = 'tempest'
+ else:
+ agent = fields['agent'].split(' ')[0]
+ if agent.startswith('python-'):
+ agent = agent.replace('python-', '')
+ if '/' in agent:
+ agent = agent.split('/')[0]
+
+ if agent in ignore_agents:
+ continue
+
+ try:
+ service, rest = url.strip('/').split('/', 1)
+ except ValueError:
+ # Root calls like "GET /identity"
+ service = url.strip('/')
+ rest = ''
+
+ if not service.isalpha():
+ ignored_services.add(service)
+ continue
+
+ method_key = '%s-%s' % (agent, method)
+ try:
+ length = int(fields['length'])
+ except ValueError:
+ LOG.warning('[%s] Failed to parse length %r from line %r' % (
+ logfile, fields['length'], line))
+ length = 0
+ stats.setdefault(service, {'largest': 0})
+ stats[service].setdefault(method_key, 0)
+ stats[service][method_key] += 1
+ stats[service]['largest'] = max(stats[service]['largest'],
+ length)
+
+ if ignored_services:
+ LOG.warning('Ignored services: %s' % ','.join(
+ sorted(ignored_services)))
+
+ # Flatten this for ES
+ return [{'service': service, 'log': os.path.basename(logfile),
+ **vals}
+ for service, vals in stats.items()]
+
+
+def get_http_stats(logfiles):
+ return list(itertools.chain.from_iterable(get_http_stats_for_log(log)
+ for log in logfiles))
+
+
+def get_report_info():
+ return {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ 'hostname': socket.gethostname(),
+ 'version': 2,
+ }
+
+
+if __name__ == '__main__':
+ process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd']
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--db-user', default='root',
+ help=('MySQL user for collecting stats '
+ '(default: "root")'))
+ parser.add_argument('--db-pass', default=None,
+ help='MySQL password for db-user')
+ parser.add_argument('--db-host', default='localhost',
+ help='MySQL hostname')
+ parser.add_argument('--apache-log', action='append', default=[],
+ help='Collect API call stats from this apache log')
+ parser.add_argument('--process', action='append',
+ default=process_defaults,
+ help=('Include process stats for this cmdline regex '
+ '(default is %s)' % ','.join(process_defaults)))
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.WARNING)
+
+ data = {
+ 'services': get_services_stats(),
+ 'db': pymysql and args.db_pass and get_db_stats(args.db_host,
+ args.db_user,
+ args.db_pass) or [],
+ 'processes': psutil and get_processes_stats(args.process) or [],
+ 'api': get_http_stats(args.apache_log),
+ 'report': get_report_info(),
+ }
+
+ print(json.dumps(data, indent=2))
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index c72dc89..7c5d4c6 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -38,7 +38,7 @@
# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip
PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
-LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
+PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"}
GetDistro
echo "Distro: $DISTRO"
@@ -57,12 +57,21 @@
function install_get_pip {
+ if [[ "$PYTHON3_VERSION" = "3.6" ]]; then
+ _pip_url=$PIP_GET_PIP36_URL
+ _local_pip="$FILES/$(basename $_pip_url)-py36"
+ else
+ _pip_url=$PIP_GET_PIP_URL
+ _local_pip="$FILES/$(basename $_pip_url)"
+ fi
+
+
# If get-pip.py isn't python, delete it. This was probably an
# outage on the server.
- if [[ -r $LOCAL_PIP ]]; then
- if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then
- echo "WARNING: Corrupt $LOCAL_PIP found removing"
- rm $LOCAL_PIP
+ if [[ -r $_local_pip ]]; then
+ if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then
+ echo "WARNING: Corrupt $_local_pip found removing"
+ rm $_local_pip
fi
fi
@@ -76,20 +85,20 @@
# Thus we use curl's "-z" feature to always check the modified
# since and only download if a new version is out -- but only if
# it seems we downloaded the file originally.
- if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
+ if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then
# only test freshness if LOCAL_PIP is actually there,
# otherwise we generate a scary warning.
local timecond=""
- if [[ -r $LOCAL_PIP ]]; then
- timecond="-z $LOCAL_PIP"
+ if [[ -r $_local_pip ]]; then
+ timecond="-z $_local_pip"
fi
curl -f --retry 6 --retry-delay 5 \
- $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+ $timecond -o $_local_pip $_pip_url || \
die $LINENO "Download of get-pip.py failed"
- touch $LOCAL_PIP.downloaded
+ touch $_local_pip.downloaded
fi
- sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
+ sudo -H -E python${PYTHON3_VERSION} $_local_pip
}
@@ -118,7 +127,7 @@
configure_pypi_alternative_url
fi
-if is_fedora && [[ ${DISTRO} == f* ]]; then
+if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then
# get-pip.py will not install over the python3-pip package in
# Fedora 34 any more.
# https://bugzilla.redhat.com/show_bug.cgi?id=1988935
@@ -129,14 +138,19 @@
# For general sanity, we just use the packaged pip. It should be
# recent enough anyway. This is included via rpms/general
: # Simply fall through
+elif is_ubuntu; then
+ # pip on Ubuntu 20.04 is new enough, too
+ # drop setuptools from u-c
+ sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt
else
install_get_pip
+
+ # Note setuptools is part of requirements.txt and we want to make sure
+ # we obey any versioning as described there.
+ pip_install_gr setuptools
fi
set -x
-# Note setuptools is part of requirements.txt and we want to make sure
-# we obey any versioning as described there.
-pip_install_gr setuptools
get_versions
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 7be995e..74dcdb2 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -65,7 +65,7 @@
def _read_clouds(self):
try:
with open(self._clouds_path) as clouds_file:
- self._clouds = yaml.load(clouds_file)
+ self._clouds = yaml.safe_load(clouds_file)
except IOError:
# The user doesn't have a clouds.yaml file.
print("The user clouds.yaml file didn't exist.")
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
index 2596395..0f0cba8 100755
--- a/tools/verify-ipv6-only-deployments.sh
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -23,32 +23,43 @@
_service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
local _service_local_host=''
_service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+ local _tunnel_endpoint_ip=''
+ _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d [])
if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
exit 1
fi
+ if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then
+ echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address."
+ exit 1
+ fi
is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
if [[ "$is_service_host_ipv6" != "True" ]]; then
- echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
if [[ "$is_host_ipv6" != "True" ]]; then
- echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
if [[ "$is_service_listen_address" != "True" ]]; then
- echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
if [[ "$is_service_local_host" != "True" ]]; then
- echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
+ exit 1
+ fi
+ is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))')
+ if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then
+ echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address."
exit 1
fi
echo "Devstack is properly configured with IPv6"
- echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+ echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP
}
function sanity_check_system_ipv6_enabled {
@@ -72,7 +83,7 @@
is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
if [[ "$is_endpoint_ipv6" != "True" ]]; then
all_ipv6=False
- echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+ echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address."
continue
fi
endpoints_verified=True
@@ -80,7 +91,7 @@
if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then
exit 1
fi
- echo "All services deployed by devstack is on IPv6 endpoints"
+ echo "All services deployed by devstack are on IPv6 endpoints"
echo $endpoints
}
diff --git a/unstack.sh b/unstack.sh
index d9dca7c..a36af3f 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -176,12 +176,13 @@
# enabled backends. So if Cinder is enabled, and installed successfully we are
# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
if is_service_enabled cinder && is_package_installed lvm2; then
- # Using /bin/true here indicates a BUG - maybe the
- # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should
- # isolate this further down in lib/cinder cleanup.
- clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
clean_lvm_filter
fi
clean_pyc_files
rm -Rf $DEST/async
+
+# Clean any safe.directory items we wrote into the global
+# gitconfig. We can identify the relevant ones by checking that they
+# point to somewhere in our $DEST directory.
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig