Merge "Configure nova unified limits quotas"
diff --git a/.zuul.yaml b/.zuul.yaml
index 00129b5..fc80e6c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,3 +1,11 @@
+- pragma:
+ # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to
+ # be using devstack
+ # TODO(gtema): delete this once r1 branch is merged into master
+ implied-branches:
+ - master
+ - feature/r1
+
- nodeset:
name: openstack-single-node
nodes:
@@ -49,10 +57,20 @@
- controller
- nodeset:
- name: devstack-single-node-centos-8
+ name: devstack-single-node-centos-8-stream
nodes:
- name: controller
- label: centos-8
+ label: centos-8-stream
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-centos-9-stream
+ nodes:
+ - name: controller
+ label: centos-9-stream
groups:
- name: tempest
nodes:
@@ -72,7 +90,27 @@
name: devstack-single-node-fedora-latest
nodes:
- name: controller
- label: fedora-32
+ label: fedora-35
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-debian-bullseye
+ nodes:
+ - name: controller
+ label: debian-bullseye
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-openeuler-20.03-sp2
+ nodes:
+ - name: controller
+ label: openEuler-20-03-LTS-SP2
groups:
- name: tempest
nodes:
@@ -109,6 +147,36 @@
- compute1
- nodeset:
+ name: openstack-two-node-centos-8-stream
+ nodes:
+ - name: controller
+ label: centos-8-stream
+ - name: compute1
+ label: centos-8-stream
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
name: openstack-two-node-focal
nodes:
- name: controller
@@ -328,6 +396,7 @@
/var/log/postgresql: logs
/var/log/mysql: logs
/var/log/libvirt: logs
+ /etc/libvirt: logs
/etc/sudoers: logs
/etc/sudoers.d: logs
'{{ stage_dir }}/iptables.txt': logs
@@ -403,7 +472,7 @@
PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
devstack_services:
# Shared services
- dstat: true
+ dstat: false
etcd3: true
memory_tracker: true
mysql: true
@@ -412,7 +481,7 @@
subnode:
devstack_services:
# Shared services
- dstat: true
+ dstat: false
memory_tracker: true
devstack_localrc:
# Multinode specific settings
@@ -468,6 +537,7 @@
SWIFT_HASH: 1234123412341234
DEBUG_LIBVIRT_COREDUMPS: true
NOVA_VNC_ENABLED: true
+ OVN_DBS_LOG_LEVEL: dbg
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -477,7 +547,7 @@
# Core services enabled for this branch.
# This list replaces the test-matrix.
# Shared services
- dstat: true
+ dstat: false
etcd3: true
memory_tracker: true
mysql: true
@@ -496,13 +566,14 @@
n-sch: true
# Placement service
placement-api: true
+ # OVN services
+ ovn-controller: true
+ ovn-northd: true
+ ovs-vswitchd: true
+ ovsdb-server: true
# Neutron services
- q-agt: true
- q-dhcp: true
- q-l3: true
- q-meta: true
- q-metering: true
q-svc: true
+ q-ovn-metadata-agent: true
# Swift services
s-account: true
s-container: true
@@ -513,7 +584,6 @@
c-bak: true
c-sch: true
c-vol: true
- cinder: true
# Services we don't need.
# This section is not really needed, it's for readability.
horizon: false
@@ -527,15 +597,19 @@
# Core services enabled for this branch.
# This list replaces the test-matrix.
# Shared services
- dstat: true
+ dstat: false
memory_tracker: true
tls-proxy: true
# Nova services
n-cpu: true
# Placement services
placement-client: true
+ # OVN services
+ ovn-controller: true
+ ovs-vswitchd: true
+ ovsdb-server: true
# Neutron services
- q-agt: true
+ q-ovn-metadata-agent: true
# Cinder services
c-bak: true
c-vol: true
@@ -553,6 +627,7 @@
GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292"
Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
NOVA_VNC_ENABLED: true
+ ENABLE_CHASSIS_AS_GW: false
- job:
name: devstack-ipv6
@@ -565,6 +640,18 @@
SERVICE_HOST: ""
- job:
+ name: devstack-enforce-scope
+ parent: devstack
+ description: |
+ This job runs the devstack with scope checks enabled.
+ vars:
+ devstack_localrc:
+ # Keep enabeling the services here to run with system scope
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
+- job:
name: devstack-multinode
parent: devstack
nodeset: openstack-two-node-focal
@@ -577,33 +664,90 @@
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
- name: devstack-platform-centos-8
+ name: devstack-platform-centos-8-stream
parent: tempest-full-py3
- description: Centos 8 platform test
- nodeset: devstack-single-node-centos-8
+ description: CentOS 8 Stream platform test
+ nodeset: devstack-single-node-centos-8-stream
voting: false
timeout: 9000
+ vars:
+ configure_swap_size: 4096
- job:
- name: devstack-platform-bionic
+ name: devstack-platform-centos-9-stream
parent: tempest-full-py3
- description: Ubuntu Bionic platform test
- nodeset: openstack-single-node-bionic
+ description: CentOS 9 Stream platform test
+ nodeset: devstack-single-node-centos-9-stream
voting: false
+ timeout: 9000
vars:
- devstack_localrc:
- CINDER_ISCSI_HELPER: tgtadm
+ configure_swap_size: 4096
- job:
- name: devstack-async
+ name: devstack-platform-debian-bullseye
parent: tempest-full-py3
- description: Async mode enabled
+ description: Debian Bullseye platform test
+ nodeset: devstack-single-node-debian-bullseye
voting: false
+ timeout: 9000
vars:
+ configure_swap_size: 4096
+ # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS
+ # for the time being.
devstack_localrc:
- DEVSTACK_PARALLEL: True
- zuul_copy_output:
- /opt/stack/async: logs
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+
+- job:
+ name: devstack-platform-openEuler-20.03-SP2
+ parent: tempest-full-py3
+ description: openEuler 20.03 SP2 platform test
+ nodeset: devstack-single-node-openeuler-20.03-sp2
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 4096
+ devstack_localrc:
+ # NOTE(wxy): OVN package is not supported by openEuler yet. Build it
+ # from source instead.
+ OVN_BUILD_FROM_SOURCE: True
+
+- job:
+ name: devstack-no-tls-proxy
+ parent: tempest-full-py3
+ description: |
+ Tempest job with tls-proxy off.
+
+ Some gates run devstack like this and it follows different code paths.
+ vars:
+ devstack_services:
+ tls-proxy: false
- job:
name: devstack-platform-fedora-latest
@@ -611,6 +755,12 @@
description: Fedora latest platform test
nodeset: devstack-single-node-fedora-latest
voting: false
+ vars:
+ configure_swap_size: 4096
+ # Python 3.10 dependency issues; see
+ # https://bugs.launchpad.net/horizon/+bug/1960204
+ devstack_services:
+ horizon: false
- job:
name: devstack-platform-fedora-latest-virt-preview
@@ -619,6 +769,7 @@
nodeset: devstack-single-node-fedora-latest
voting: false
vars:
+ configure_swap_size: 4096
devstack_localrc:
ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
@@ -678,6 +829,7 @@
- job:
name: devstack-unit-tests
+ nodeset: ubuntu-focal
description: |
Runs unit tests on devstack project.
@@ -693,32 +845,26 @@
jobs:
- devstack
- devstack-ipv6
+ - devstack-enforce-scope
- devstack-platform-fedora-latest
- - devstack-platform-centos-8
- - devstack-platform-bionic
- - devstack-async
+ - devstack-platform-centos-8-stream
+ - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bullseye
+ - devstack-platform-openEuler-20.03-SP2
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
- voting: false
- - swift-dsvm-functional:
- voting: false
- irrelevant-files: &dsvm-irrelevant-files
- - ^.*\.rst$
- - ^doc/.*$
- - swift-dsvm-functional-py3:
- voting: false
- irrelevant-files: *dsvm-irrelevant-files
+ - ironic-tempest-bios-ipmi-direct-tinyipa
+ - swift-dsvm-functional
- grenade:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-grenade-multinode:
+ - neutron-ovs-grenade-multinode:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-tempest-linuxbridge:
+ - neutron-linuxbridge-tempest:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -748,17 +894,20 @@
jobs:
- devstack
- devstack-ipv6
+ - devstack-enforce-scope
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
- - neutron-grenade-multinode:
+ - neutron-ovs-grenade-multinode:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-tempest-linuxbridge:
+ - neutron-linuxbridge-tempest:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ - ironic-tempest-bios-ipmi-direct-tinyipa
+ - swift-dsvm-functional
- grenade:
irrelevant-files:
- ^.*\.rst$
@@ -808,11 +957,11 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-tempest-dvr:
+ - neutron-ovs-tempest-dvr:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-tempest-dvr-ha-multinode-full:
+ - neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -825,3 +974,7 @@
- ^.*\.rst$
- ^doc/.*$
- devstack-platform-fedora-latest-virt-preview
+ - devstack-no-tls-proxy
+ periodic:
+ jobs:
+ - devstack-no-tls-proxy
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 2d0c894..dd8f21f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -642,6 +642,12 @@
VOLUME_NAME_PREFIX="volume-"
VOLUME_BACKING_FILE_SIZE=24G
+When running highly concurrent tests, the default per-project quotas
+for volumes, backups, or snapshots may be too small. These can be
+adjusted by setting ``CINDER_QUOTA_VOLUMES``, ``CINDER_QUOTA_BACKUPS``,
+or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for
+each is 10.)
+
Keystone
~~~~~~~~
@@ -666,7 +672,6 @@
disable_service horizon
KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
- KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
REGION_NAME=RegionTwo
KEYSTONE_REGION_NAME=RegionOne
@@ -679,15 +684,22 @@
KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit
it in the configuration of RegionOne.
-Disabling Identity API v2
-+++++++++++++++++++++++++
+Glance
+++++++
-The Identity API v2 is deprecated as of Mitaka and it is recommended to only
-use the v3 API. It is possible to setup keystone without v2 API, by doing:
+The default image size quota of 1GiB may be too small if larger images
+are to be used. Change the default at setup time with:
::
- ENABLE_IDENTITY_V2=False
+ GLANCE_LIMIT_IMAGE_SIZE_TOTAL=5000
+
+or at runtime via:
+
+::
+
+ openstack --os-cloud devstack-system-admin registered limit update \
+ --service glance --default-limit 5000 --region RegionOne image_size_total
.. _arch-configuration:
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 5e0df56..4de238f 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -13,7 +13,7 @@
Communication
~~~~~~~~~~~~~
-* IRC channel ``#openstack-qa`` at FreeNode
+* IRC channel ``#openstack-qa`` at OFTC.
* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses)
http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index c0b3f58..81c5945 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -169,7 +169,7 @@
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
GLANCE_HOSTPORT=$SERVICE_HOST:9292
- ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client
+ ENABLED_SERVICES=n-cpu,c-vol,placement-client,ovn-controller,ovs-vswitchd,ovsdb-server,q-ovn-metadata-agent
NOVA_VNC_ENABLED=True
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
VNCSERVER_LISTEN=$HOST_IP
@@ -395,7 +395,7 @@
3. Verify that login via ssh works without a password::
- ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION
+ ssh -i /root/.ssh/id_rsa stack@DESTINATION
In essence, this means that every compute node's root user's public RSA key
must exist in every other compute node's stack user's authorized_keys file and
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 8b8acde..feb50ce 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,9 +38,9 @@
Start with a clean and minimal install of a Linux system. DevStack
attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
+latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler.
-If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the
+If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
most tested, and will probably go the smoothest.
Add Stack User (optional)
@@ -63,7 +63,7 @@
.. code-block:: console
$ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
- $ sudo su - stack
+ $ sudo -u stack -i
Download DevStack
-----------------
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 4e7c2d7..2e8e8f5 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -39,8 +39,6 @@
openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
-openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika <https://opendev.org/openstack/devstack-plugin-pika>`__
-openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq <https://opendev.org/openstack/devstack-plugin-zmq>`__
openstack/ec2-api `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
openstack/freezer `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
openstack/freezer-api `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
@@ -52,8 +50,6 @@
openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector <https://opendev.org/openstack/ironic-inspector>`__
openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter <https://opendev.org/openstack/ironic-prometheus-exporter>`__
openstack/ironic-ui `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
-openstack/karbor `https://opendev.org/openstack/karbor <https://opendev.org/openstack/karbor>`__
-openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard <https://opendev.org/openstack/karbor-dashboard>`__
openstack/keystone `https://opendev.org/openstack/keystone <https://opendev.org/openstack/keystone>`__
openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes <https://opendev.org/openstack/kuryr-kubernetes>`__
openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork <https://opendev.org/openstack/kuryr-libnetwork>`__
@@ -65,26 +61,22 @@
openstack/manila-ui `https://opendev.org/openstack/manila-ui <https://opendev.org/openstack/manila-ui>`__
openstack/masakari `https://opendev.org/openstack/masakari <https://opendev.org/openstack/masakari>`__
openstack/mistral `https://opendev.org/openstack/mistral <https://opendev.org/openstack/mistral>`__
-openstack/monasca-analytics `https://opendev.org/openstack/monasca-analytics <https://opendev.org/openstack/monasca-analytics>`__
openstack/monasca-api `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
-openstack/monasca-ceilometer `https://opendev.org/openstack/monasca-ceilometer <https://opendev.org/openstack/monasca-ceilometer>`__
openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
-openstack/monasca-log-api `https://opendev.org/openstack/monasca-log-api <https://opendev.org/openstack/monasca-log-api>`__
openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
-openstack/monasca-transform `https://opendev.org/openstack/monasca-transform <https://opendev.org/openstack/monasca-transform>`__
openstack/murano `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
-openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw <https://opendev.org/openstack/networking-l2gw>`__
-openstack/networking-midonet `https://opendev.org/openstack/networking-midonet <https://opendev.org/openstack/networking-midonet>`__
openstack/networking-odl `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
openstack/networking-powervm `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
openstack/networking-sfc `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
openstack/neutron `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
+openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
+openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
@@ -93,27 +85,25 @@
openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
openstack/openstacksdk `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
-openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze <https://opendev.org/openstack/os-loganalyze>`__
openstack/osprofiler `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
-openstack/panko `https://opendev.org/openstack/panko <https://opendev.org/openstack/panko>`__
openstack/patrole `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
-openstack/qinling `https://opendev.org/openstack/qinling <https://opendev.org/openstack/qinling>`__
-openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard <https://opendev.org/openstack/qinling-dashboard>`__
openstack/rally-openstack `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
openstack/sahara `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
-openstack/searchlight `https://opendev.org/openstack/searchlight <https://opendev.org/openstack/searchlight>`__
-openstack/searchlight-ui `https://opendev.org/openstack/searchlight-ui <https://opendev.org/openstack/searchlight-ui>`__
openstack/senlin `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
openstack/shade `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
+openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver <https://opendev.org/openstack/skyline-apiserver>`__
openstack/solum `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
openstack/storlets `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
openstack/tacker `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
+openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service <https://opendev.org/openstack/tap-as-a-service>`__
openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin <https://opendev.org/openstack/telemetry-tempest-plugin>`__
openstack/trove `https://opendev.org/openstack/trove <https://opendev.org/openstack/trove>`__
openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard <https://opendev.org/openstack/trove-dashboard>`__
+openstack/venus `https://opendev.org/openstack/venus <https://opendev.org/openstack/venus>`__
+openstack/venus-dashboard `https://opendev.org/openstack/venus-dashboard <https://opendev.org/openstack/venus-dashboard>`__
openstack/vitrage `https://opendev.org/openstack/vitrage <https://opendev.org/openstack/vitrage>`__
openstack/vitrage-dashboard `https://opendev.org/openstack/vitrage-dashboard <https://opendev.org/openstack/vitrage-dashboard>`__
openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin <https://opendev.org/openstack/vitrage-tempest-plugin>`__
@@ -143,6 +133,7 @@
x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs <https://opendev.org/x/devstack-plugin-hdfs>`__
x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu <https://opendev.org/x/devstack-plugin-libvirt-qemu>`__
x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb <https://opendev.org/x/devstack-plugin-mariadb>`__
+x/devstack-plugin-tobiko `https://opendev.org/x/devstack-plugin-tobiko <https://opendev.org/x/devstack-plugin-tobiko>`__
x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax <https://opendev.org/x/devstack-plugin-vmax>`__
x/drbd-devstack `https://opendev.org/x/drbd-devstack <https://opendev.org/x/drbd-devstack>`__
x/fenix `https://opendev.org/x/fenix <https://opendev.org/x/fenix>`__
@@ -169,6 +160,7 @@
x/networking-hpe `https://opendev.org/x/networking-hpe <https://opendev.org/x/networking-hpe>`__
x/networking-huawei `https://opendev.org/x/networking-huawei <https://opendev.org/x/networking-huawei>`__
x/networking-infoblox `https://opendev.org/x/networking-infoblox <https://opendev.org/x/networking-infoblox>`__
+x/networking-l2gw `https://opendev.org/x/networking-l2gw <https://opendev.org/x/networking-l2gw>`__
x/networking-lagopus `https://opendev.org/x/networking-lagopus <https://opendev.org/x/networking-lagopus>`__
x/networking-mlnx `https://opendev.org/x/networking-mlnx <https://opendev.org/x/networking-mlnx>`__
x/networking-nec `https://opendev.org/x/networking-nec <https://opendev.org/x/networking-nec>`__
@@ -190,10 +182,8 @@
x/scalpels `https://opendev.org/x/scalpels <https://opendev.org/x/scalpels>`__
x/slogging `https://opendev.org/x/slogging <https://opendev.org/x/slogging>`__
x/stackube `https://opendev.org/x/stackube <https://opendev.org/x/stackube>`__
-x/tap-as-a-service `https://opendev.org/x/tap-as-a-service <https://opendev.org/x/tap-as-a-service>`__
x/tap-as-a-service-dashboard `https://opendev.org/x/tap-as-a-service-dashboard <https://opendev.org/x/tap-as-a-service-dashboard>`__
x/tatu `https://opendev.org/x/tatu <https://opendev.org/x/tatu>`__
-x/tobiko `https://opendev.org/x/tobiko <https://opendev.org/x/tobiko>`__
x/trio2o `https://opendev.org/x/trio2o <https://opendev.org/x/trio2o>`__
x/valet `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
x/vmware-nsx `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1284360..1a353e5 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,5 +1,4 @@
Listen %PUBLICPORT%
-Listen %ADMINPORT%
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
<Directory %KEYSTONE_BIN%>
@@ -20,20 +19,6 @@
%SSLKEYFILE%
</VirtualHost>
-<VirtualHost *:%ADMINPORT%>
- WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%M"
- ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
%SSLLISTEN%<VirtualHost *:443>
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
@@ -49,13 +34,3 @@
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
-
-Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_admin>
- SetHandler wsgi-script
- Options +ExecCGI
-
- WSGIProcessGroup keystone-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
-</Location>
diff --git a/files/apts b/files/apts
deleted file mode 120000
index ef926de..0000000
--- a/files/apts
+++ /dev/null
@@ -1 +0,0 @@
-debs/
\ No newline at end of file
diff --git a/files/debs/dstat b/files/debs/dstat
index 2b643b8..40d00f4 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1 +1,2 @@
-dstat
+dstat # dist:bionic
+pcp
diff --git a/files/debs/general b/files/debs/general
index 7e481b4..364f3cc 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -14,7 +14,6 @@
libapache2-mod-proxy-uwsgi
libffi-dev # for pyOpenSSL
libjpeg-dev # Pillow 3.0.0
-libmysqlclient-dev # MySQL-python
libpcre3-dev # for python-pcre
libpq-dev # psycopg2
libssl-dev # for pyOpenSSL
diff --git a/files/debs/neutron-common b/files/debs/neutron-common
index e548396..f6afc5b 100644
--- a/files/debs/neutron-common
+++ b/files/debs/neutron-common
@@ -6,7 +6,6 @@
iptables
iputils-arping
iputils-ping
-libmysqlclient-dev
mysql-server #NOPRIME
postgresql-server-dev-all
python3-mysqldb
diff --git a/files/debs/nova b/files/debs/nova
index e194414..0194f00 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -8,7 +8,6 @@
iputils-arping
kpartx
libjs-jquery-tablesorter # Needed for coverage html reports
-libmysqlclient-dev
libvirt-clients # NOPRIME
libvirt-daemon-system # NOPRIME
libvirt-dev # NOPRIME
diff --git a/files/debs/swift b/files/debs/swift
index 4b8ac3d..67c6c8d 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -2,5 +2,6 @@
liberasurecode-dev
make
memcached
+rsync
sqlite3
xfsprogs
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index 2f1f139..d3b9be8 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,4 +1,4 @@
-dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
+dn: olcDatabase={${LDAP_OLCDB_NUMBER}}${LDAP_OLCDB_TYPE},cn=config
changetype: modify
replace: olcSuffix
olcSuffix: ${BASE_DN}
diff --git a/files/lvm-backing-file.template b/files/lvm-backing-file.template
new file mode 100644
index 0000000..dc519d7
--- /dev/null
+++ b/files/lvm-backing-file.template
@@ -0,0 +1,16 @@
+[Unit]
+Description=Activate LVM backing file %BACKING_FILE%
+DefaultDependencies=no
+After=systemd-udev-settle.service
+Before=lvm2-activation-early.service
+Wants=systemd-udev-settle.service
+
+[Service]
+ExecStart=/sbin/losetup --find --show %DIRECTIO% %BACKING_FILE%
+ExecStop=/bin/sh -c '/sbin/losetup -d $$(/sbin/losetup --associated %BACKING_FILE% -O NAME -n)'
+RemainAfterExit=yes
+Type=oneshot
+
+[Install]
+WantedBy=local-fs.target
+Also=systemd-udev-settle.service
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 64befc5..93b5746 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
ceph # NOPRIME
-redhat-lsb-core
+redhat-lsb-core # not:rhel9,openEuler-20.03
xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 33da0a5..163a7c8 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -16,6 +16,7 @@
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
+make # dist:openEuler-20.03
net-tools
openssh-server
openssl
@@ -27,7 +28,8 @@
python3-devel
python3-pip
python3-systemd
-redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
+redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376
+systemd-devel # dist:openEuler-20.03
tar
tcpdump
unzip
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 68e5472..7ce5a72 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,9 +1,10 @@
cryptsetup
dosfstools
-genisoimage
+genisoimage # not:rhel9
iscsi-initiator-utils
libosinfo
lvm2
sg3_utils
# Stuff for diablo volumes
sysfsutils
+xorriso # not:rhel8
diff --git a/files/rpms/nova b/files/rpms/nova
index 8ea8ccc..9e8621c 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,13 +3,14 @@
dnsmasq # for q-dhcp
dnsmasq-utils # for dhcp_release
ebtables
-genisoimage # required for config_drive
+genisoimage # not:rhel9 required for config_drive
iptables
iputils
-kernel-modules
+kernel-modules # not:openEuler-20.03
kpartx
parted
polkit
rabbitmq-server # NOPRIME
sqlite
sudo
+xorriso # not:rhel8
diff --git a/files/rpms/swift b/files/rpms/swift
index 376c6f3..a838d78 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,7 +1,7 @@
curl
-liberasurecode-devel
+liberasurecode-devel # not:openEuler-20.03
memcached
rsync-daemon
sqlite
xfsprogs
-xinetd
+xinetd # not:f35,rhel9
diff --git a/functions-common b/functions-common
index 340da75..b2cf9d9 100644
--- a/functions-common
+++ b/functions-common
@@ -85,7 +85,7 @@
if [ -f "$SSL_BUNDLE_FILE" ]; then
CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
fi
- # demo -> devstack
+ # devstack: user with the member role on demo project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack \
@@ -96,18 +96,7 @@
--os-password $ADMIN_PASSWORD \
--os-project-name demo
- # alt_demo -> devstack-alt
- $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack-alt \
- --os-region-name $REGION_NAME \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_SERVICE_URI \
- --os-username alt_demo \
- --os-password $ADMIN_PASSWORD \
- --os-project-name alt_demo
-
- # admin -> devstack-admin
+ # devstack-admin: user with the admin role on the admin project
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-admin \
@@ -118,7 +107,62 @@
--os-password $ADMIN_PASSWORD \
--os-project-name admin
- # admin with a system-scoped token -> devstack-system
+ # devstack-admin-demo: user with the admin role on the demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-admin-demo \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username admin \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+
+ # devstack-alt: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-member: user with the member role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-alt-reader: user with the reader role on alt_demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-alt-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username alt_demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name alt_demo
+
+ # devstack-reader: user with the reader role on demo project
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username demo_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+
+ # devstack-system-admin: user with the admin role on the system
$PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
--file $CLOUDS_YAML \
--os-cloud devstack-system-admin \
@@ -129,6 +173,28 @@
--os-password $ADMIN_PASSWORD \
--os-system-scope all
+ # devstack-system-member: user with the member role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-member \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_member \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
+ # devstack-system-reader: user with the reader role on the system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-reader \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username system_reader \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
cat >> $CLOUDS_YAML <<EOF
functional:
image_name: $DEFAULT_IMAGE_NAME
@@ -333,7 +399,7 @@
elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
sudo zypper -n install lsb-release
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
- sudo dnf install -y redhat-lsb-core
+ sudo dnf install -y redhat-lsb-core || sudo dnf install -y openeuler-lsb
else
die $LINENO "Unable to find or auto-install lsb_release"
fi
@@ -346,12 +412,19 @@
# - os_VENDOR
# - os_PACKAGE
function GetOSVersion {
- # We only support distros that provide a sane lsb_release
- _ensure_lsb_release
+ # CentOS Stream 9 does not provide lsb_release
+ source /etc/os-release
+ if [[ "${ID}${VERSION}" == "centos9" ]]; then
+ os_RELEASE=${VERSION_ID}
+ os_CODENAME="n/a"
+ os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+ else
+ _ensure_lsb_release
- os_RELEASE=$(lsb_release -r -s)
- os_CODENAME=$(lsb_release -c -s)
- os_VENDOR=$(lsb_release -i -s)
+ os_RELEASE=$(lsb_release -r -s)
+ os_CODENAME=$(lsb_release -c -s)
+ os_VENDOR=$(lsb_release -i -s)
+ fi
if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
os_PACKAGE="deb"
@@ -391,12 +464,17 @@
DISTRO="sle${os_RELEASE%.*}"
elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
"$os_VENDOR" =~ (CentOS) || \
+ "$os_VENDOR" =~ (AlmaLinux) || \
"$os_VENDOR" =~ (Scientific) || \
"$os_VENDOR" =~ (OracleServer) || \
"$os_VENDOR" =~ (Virtuozzo) ]]; then
# Drop the . release as we assume it's compatible
# XXX re-evaluate when we get RHEL10
DISTRO="rhel${os_RELEASE::1}"
+ elif [[ "$os_VENDOR" =~ (openEuler) ]]; then
+ # The DISTRO here is `openEuler-20.03`. While, actually only openEuler
+ # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs.
+ DISTRO="openEuler-$os_RELEASE"
else
# We can't make a good choice here. Setting a sensible DISTRO
# is part of the problem, but not the major issue -- we really
@@ -448,9 +526,11 @@
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
+ [ "$os_VENDOR" = "openEuler" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "RedHatEnterprise" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
+ [ "$os_VENDOR" = "AlmaLinux" ] || \
[ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
}
@@ -496,7 +576,12 @@
[ "$os_PACKAGE" = "deb" ]
}
-
+function is_openeuler {
+ if [[ -z "$os_PACKAGE" ]]; then
+ GetOSVersion
+ fi
+ [ "$os_VENDOR" = "openEuler" ]
+}
# Git Functions
# =============
@@ -547,7 +632,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to 'required-projects' in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
git_timed clone $git_clone_flags $git_remote $git_dest
fi
@@ -559,7 +644,7 @@
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
echo "the project to the \$PROJECTS variable in the job definition."
- die $LINENO "Cloning not allowed in this configuration"
+ die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
# '--branch' can also take tags
git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
@@ -793,10 +878,10 @@
# Gets domain id
domain_id=$(
# Gets domain id
- openstack domain show $1 \
+ openstack --os-cloud devstack-system-admin domain show $1 \
-f value -c id 2>/dev/null ||
# Creates new domain
- openstack domain create $1 \
+ openstack --os-cloud devstack-system-admin domain create $1 \
--description "$2" \
-f value -c id
)
@@ -811,7 +896,7 @@
# Gets group id
group_id=$(
# Creates new group with --or-show
- openstack group create $1 \
+ openstack --os-cloud devstack-system-admin group create $1 \
--domain $2 --description "$desc" --or-show \
-f value -c id
)
@@ -830,7 +915,7 @@
# Gets user id
user_id=$(
# Creates new user with --or-show
- openstack user create \
+ openstack --os-cloud devstack-system-admin user create \
$1 \
--password "$2" \
--domain=$3 \
@@ -847,7 +932,7 @@
local project_id
project_id=$(
# Creates new project with --or-show
- openstack project create $1 \
+ openstack --os-cloud devstack-system-admin project create $1 \
--domain=$2 \
--or-show -f value -c id
)
@@ -860,7 +945,7 @@
local role_id
role_id=$(
# Creates role with --or-show
- openstack role create $1 \
+ openstack --os-cloud devstack-system-admin role create $1 \
--or-show -f value -c id
)
echo $role_id
@@ -890,7 +975,7 @@
domain_args=$(_get_domain_args $4 $5)
# Gets user role id
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--project $3 \
@@ -898,11 +983,11 @@
| grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--user $2 \
--project $3 \
$domain_args
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--project $3 \
@@ -917,17 +1002,17 @@
function get_or_add_user_domain_role {
local user_role_id
# Gets user role id
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--domain $3 \
| grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--user $2 \
--domain $3
- user_role_id=$(openstack role assignment list \
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--domain $3 \
@@ -936,22 +1021,53 @@
echo $user_role_id
}
+# Gets or adds user role to system
+# Usage: get_or_add_user_system_role <role> <user> <system> [<user_domain>]
+function get_or_add_user_system_role {
+ local user_role_id
+ local domain_args
+
+ domain_args=$(_get_domain_args $4)
+
+ # Gets user role id
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ if [[ -z "$user_role_id" ]]; then
+ # Adds role to user and get it
+ openstack --os-cloud devstack-system-admin role add $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args
+ user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
+ --role $1 \
+ --user $2 \
+ --system $3 \
+ $domain_args \
+ -f value -c Role)
+ fi
+ echo $user_role_id
+}
+
# Gets or adds group role to project
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
local group_role_id
# Gets group role id
- group_role_id=$(openstack role assignment list \
+ group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--group $2 \
--project $3 \
-f value)
if [[ -z "$group_role_id" ]]; then
# Adds role to group and get it
- openstack role add $1 \
+ openstack --os-cloud devstack-system-admin role add $1 \
--group $2 \
--project $3
- group_role_id=$(openstack role assignment list \
+ group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--group $2 \
--project $3 \
@@ -967,9 +1083,9 @@
# Gets service id
service_id=$(
# Gets service id
- openstack service show $2 -f value -c id 2>/dev/null ||
+ openstack --os-cloud devstack-system-admin service show $2 -f value -c id 2>/dev/null ||
# Creates new service if not exists
- openstack service create \
+ openstack --os-cloud devstack-system-admin service create \
$2 \
--name $1 \
--description="$3" \
@@ -982,14 +1098,14 @@
# Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
function _get_or_create_endpoint_with_interface {
local endpoint_id
- endpoint_id=$(openstack endpoint list \
+ endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint list \
--service $1 \
--interface $2 \
--region $4 \
-c ID -f value)
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
- endpoint_id=$(openstack endpoint create \
+ endpoint_id=$(openstack --os-cloud devstack-system-admin endpoint create \
$1 $2 $3 --region $4 -f value -c id)
fi
@@ -1023,7 +1139,7 @@
# Get a URL from the identity service
# Usage: get_endpoint_url <service> <interface>
function get_endpoint_url {
- echo $(openstack endpoint list \
+ echo $(openstack --os-cloud devstack-system-admin endpoint list \
--service $1 --interface $2 \
-c URL -f value)
}
@@ -1037,6 +1153,11 @@
return 1
}
+function is_ironic_enforce_scope {
+ is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+ return 1
+}
+
# Package Functions
# =================
diff --git a/inc/python b/inc/python
index 8941fd0..9382d35 100644
--- a/inc/python
+++ b/inc/python
@@ -378,12 +378,13 @@
project_dir=$(cd $project_dir && pwd)
if [ -n "$REQUIREMENTS_DIR" ]; then
- # Constrain this package to this project directory from here on out.
+ # Remove this package from constraints before we install it.
+ # That way, later installs won't "downgrade" the install from
+ # source we are about to do.
local name
name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
- $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
- "$flags file://$project_dir#egg=$name"
+ $REQUIREMENTS_DIR/upper-constraints.txt -- $name
fi
setup_package $bindep $project_dir "$flags" $extras
diff --git a/lib/apache b/lib/apache
index 870a65a..f29c7ea 100644
--- a/lib/apache
+++ b/lib/apache
@@ -82,22 +82,15 @@
apxs="apxs"
fi
- # This varies based on packaged/installed. If we've
- # pip_installed, then the pip setup will only build a "python"
- # module that will be either python2 or python3 depending on what
- # it was built with.
- #
- # For package installs, the distro ships both plugins and you need
- # to select the right one ... it will not be autodetected.
- UWSGI_PYTHON_PLUGIN=python3
-
if is_ubuntu; then
local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
- if [[ "$DISTRO" == 'bionic' ]]; then
- pkg_list="${pkg_list} uwsgi-plugin-python"
- fi
install_package ${pkg_list}
- elif is_fedora; then
+ # NOTE(ianw) 2022-02-03 : Fedora 35 needs to skip this and fall
+ # into the install-from-source because the upstream packages
+ # didn't fix Python 3.10 compatibility before release. Should be
+ # fixed in uwsgi 4.9.0; can remove this when packages available
+ # or we drop this release
+ elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then
# Note httpd comes with mod_proxy_uwsgi and it is loaded by
# default; the mod_proxy_uwsgi package actually conflicts now.
# See:
@@ -125,7 +118,6 @@
popd
# delete the temp directory
sudo rm -rf $dir
- UWSGI_PYTHON_PLUGIN=python
fi
if is_ubuntu || is_suse ; then
@@ -286,7 +278,7 @@
# configured after graceful shutdown
iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
+ iniset "$file" uwsgi plugins http,python3
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -306,7 +298,7 @@
apache_conf=$(apache_site_config_for $name)
iniset "$file" uwsgi socket "$socket"
iniset "$file" uwsgi chmod-socket 666
- echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf
+ echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
fi
@@ -339,7 +331,7 @@
iniset "$file" uwsgi die-on-term true
iniset "$file" uwsgi exit-on-reload false
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
+ iniset "$file" uwsgi plugins http,python3
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
diff --git a/lib/cinder b/lib/cinder
index 34d6186..b029fa0 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -91,12 +91,6 @@
# Default to lioadm
CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
-# Bionic needs to default to tgtadm until support is dropped within devstack
-# as the rtslib-fb-targetctl service doesn't start after installing lioadm.
-if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then
- CINDER_ISCSI_HELPER=tgtadm
-fi
-
# EL and SUSE should only use lioadm
if is_fedora || is_suse; then
if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
@@ -104,6 +98,22 @@
fi
fi
+# When Cinder is used as a backend for Glance, it can be configured to clone
+# the volume containing image data directly in the backend instead of
+# transferring data from volume to volume. Value is a comma separated list of
+# schemes (currently only 'file' and 'cinder' are supported). The default
+# configuration in Cinder is empty (that is, do not use this feature). NOTE:
+# to use this feature you must also enable GLANCE_SHOW_DIRECT_URL and/or
+# GLANCE_SHOW_MULTIPLE_LOCATIONS for glance-api.conf.
+CINDER_ALLOWED_DIRECT_URL_SCHEMES=${CINDER_ALLOWED_DIRECT_URL_SCHEMES:-}
+if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
+ if [[ "${GLANCE_SHOW_DIRECT_URL:-False}" != "True" \
+ && "${GLANCE_SHOW_MULTIPLE_LOCATIONS:-False}" != "True" ]]; then
+ warn $LINENO "CINDER_ALLOWED_DIRECT_URL_SCHEMES is set, but neither \
+GLANCE_SHOW_DIRECT_URL nor GLANCE_SHOW_MULTIPLE_LOCATIONS is True"
+ fi
+fi
+
# For backward compatibility
# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured
# along with ceph backend driver.
@@ -150,6 +160,12 @@
# enable the cache for all cinder backends.
CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the Volume API policies to start checking the scope of token. by default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+CINDER_ENFORCE_SCOPE=$(trueorfalse False CINDER_ENFORCE_SCOPE)
+
# Functions
# ---------
@@ -266,6 +282,14 @@
fi
iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
+ if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
+ iniset $CINDER_CONF DEFAULT allowed_direct_url_schemes $CINDER_ALLOWED_DIRECT_URL_SCHEMES
+ fi
+
+ # set default quotas
+ iniset $CINDER_CONF DEFAULT quota_volumes ${CINDER_QUOTA_VOLUMES:-10}
+ iniset $CINDER_CONF DEFAULT quota_backups ${CINDER_QUOTA_BACKUPS:-10}
+ iniset $CINDER_CONF DEFAULT quota_snapshots ${CINDER_QUOTA_SNAPSHOTS:-10}
# Avoid RPC timeouts in slow CI and test environments by doubling the
# default response timeout set by RPC clients. See bug #1873234 for more
@@ -286,9 +310,6 @@
default_name=$be_name
fi
enabled_backends+=$be_name,
-
- iniset $CINDER_CONF $be_name volume_clear $CINDER_VOLUME_CLEAR
-
done
iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
if [[ -n "$default_name" ]]; then
@@ -332,7 +353,9 @@
# Format logging
setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI
- write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ if is_service_enabled c-api; then
+ write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
@@ -356,6 +379,11 @@
elif is_service_enabled etcd3; then
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
+
+ if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+ iniset $CINDER_CONF oslo_policy enforce_scope true
+ iniset $CINDER_CONF oslo_policy enforce_new_defaults true
+ fi
}
# create_cinder_accounts() - Set up common required cinder accounts
@@ -379,12 +407,6 @@
"$REGION_NAME" \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
- get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
- get_or_create_endpoint \
- "volumev2" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s"
-
get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
get_or_create_endpoint \
"volumev3" \
@@ -396,12 +418,6 @@
"$REGION_NAME" \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
- get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
- get_or_create_endpoint \
- "volumev2" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s"
-
get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
get_or_create_endpoint \
"volumev3" \
@@ -431,10 +447,6 @@
be_type=${be%%:*}
be_name=${be##*:}
if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
- # Always init the default volume group for lvm.
- if [[ "$be_type" == "lvm" ]]; then
- init_default_lvm_volume_group
- fi
init_cinder_backend_${be_type} ${be_name}
fi
done
diff --git a/lib/cinder_backends/ceph_iscsi b/lib/cinder_backends/ceph_iscsi
new file mode 100644
index 0000000..94412e0
--- /dev/null
+++ b/lib/cinder_backends/ceph_iscsi
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# lib/cinder_backends/ceph_iscsi
+# Configure the ceph_iscsi backend
+
+# Enable with:
+#
+# CINDER_ENABLED_BACKENDS+=,ceph_iscsi:ceph_iscsi
+#
+# Optional paramteters:
+# CEPH_ISCSI_API_URL=<url to the rbd-target-api service>
+#
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_ceph_backend_ceph_iscsi - called from configure_cinder()
+
+
+# Save trace setting
+_XTRACE_CINDER_CEPH_ISCSI=$(set +o | grep xtrace)
+set +o xtrace
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_ceph_iscsi - Set config files, create data dirs, etc
+# configure_cinder_backend_ceph_iscsi $name
+function configure_cinder_backend_ceph_iscsi {
+ local be_name=$1
+
+ CEPH_ISCSI_API_URL=${CEPH_ISCSI_API_URL:-http://$CEPH_ISCSI_API_HOST:$CEPH_ISCSI_API_PORT}
+
+ iniset $CINDER_CONF $be_name volume_backend_name $be_name
+ iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver"
+ iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE"
+ iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
+ iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
+ iniset $CINDER_CONF $be_name rbd_iscsi_api_user "$CEPH_ISCSI_API_USER"
+ iniset $CINDER_CONF $be_name rbd_iscsi_api_password "$CEPH_ISCSI_API_PASSWORD"
+ iniset $CINDER_CONF $be_name rbd_iscsi_api_url "$CEPH_ISCSI_API_URL"
+ iniset $CINDER_CONF $be_name rbd_iscsi_target_iqn "$CEPH_ISCSI_TARGET_IQN"
+ iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
+ iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
+ iniset $CINDER_CONF DEFAULT glance_api_version 2
+
+ pip_install rbd-iscsi-client
+}
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH_ISCSI
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 497081c..e03ef14 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -52,7 +52,7 @@
iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
-
+ iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
}
# init_cinder_backend_lvm - Initialize volume group
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
index d7c977e..c7ec306 100644
--- a/lib/cinder_backups/swift
+++ b/lib/cinder_backups/swift
@@ -24,6 +24,9 @@
# to use it.
iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+ if is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT backup_swift_ca_cert_file $SSL_BUNDLE_FILE
+ fi
}
# init_cinder_backup_swift: nothing to do
diff --git a/lib/database b/lib/database
index 7940cf2..78563f6 100644
--- a/lib/database
+++ b/lib/database
@@ -89,6 +89,10 @@
DATABASE_PASSWORD=$MYSQL_PASSWORD
fi
+ return 0
+}
+
+function define_database_baseurl {
# We configure Nova, Horizon, Glance and Keystone to use MySQL as their
# database server. While they share a single server, each has their own
# database and tables.
@@ -100,8 +104,6 @@
# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services
BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
-
- return 0
}
# Recreate a given database
diff --git a/lib/databases/mysql b/lib/databases/mysql
index d4969d7..30e4b7c 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -12,6 +12,7 @@
set +o xtrace
MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL}
+INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES)
register_database mysql
@@ -25,6 +26,8 @@
# provide a mysql.service symlink for backwards-compatibility, but
# let's not rely on that.
MYSQL_SERVICE_NAME=mariadb
+ elif [[ "$DISTRO" == "bullseye" ]]; then
+ MYSQL_SERVICE_NAME=mariadb
fi
fi
@@ -105,7 +108,7 @@
# In mariadb e.g. on Ubuntu socket plugin is used for authentication
# as root so it works only as sudo. To restore old "mysql like" behaviour,
# we need to change auth plugin for root user
- if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
fi
@@ -173,18 +176,20 @@
chmod 0600 $HOME/.my.cnf
fi
# Install mysql-server
- if is_oraclelinux; then
- install_package mysql-community-server
- elif is_fedora; then
- install_package mariadb-server mariadb-devel
- sudo systemctl enable $MYSQL_SERVICE_NAME
- elif is_suse; then
- install_package mariadb-server
- sudo systemctl enable $MYSQL_SERVICE_NAME
- elif is_ubuntu; then
- install_package $MYSQL_SERVICE_NAME-server
- else
- exit_distro_not_supported "mysql installation"
+ if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then
+ if is_oraclelinux; then
+ install_package mysql-community-server
+ elif is_fedora; then
+ install_package mariadb-server mariadb-devel mariadb
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_suse; then
+ install_package mariadb-server
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_ubuntu; then
+ install_package $MYSQL_SERVICE_NAME-server
+ else
+ exit_distro_not_supported "mysql installation"
+ fi
fi
}
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 618834b..4f0a5a0 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -13,7 +13,7 @@
MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200}
-
+INSTALL_DATABASE_SERVER_PACKAGES=$(trueorfalse True INSTALL_DATABASE_SERVER_PACKAGES)
register_database postgresql
@@ -95,7 +95,6 @@
function install_database_postgresql {
echo_summary "Installing postgresql"
- deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle"
local pgpass=$HOME/.pgpass
if [[ ! -e $pgpass ]]; then
cat <<EOF > $pgpass
@@ -105,15 +104,17 @@
else
sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass
fi
- if is_ubuntu; then
- install_package postgresql
- elif is_fedora || is_suse; then
- install_package postgresql-server
- if is_fedora; then
- sudo systemctl enable postgresql
+ if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then
+ if is_ubuntu; then
+ install_package postgresql
+ elif is_fedora || is_suse; then
+ install_package postgresql-server
+ if is_fedora; then
+ sudo systemctl enable postgresql
+ fi
+ else
+ exit_distro_not_supported "postgresql installation"
fi
- else
- exit_distro_not_supported "postgresql installation"
fi
}
diff --git a/lib/glance b/lib/glance
index e789aff..9bba938 100644
--- a/lib/glance
+++ b/lib/glance
@@ -51,6 +51,18 @@
if is_opensuse; then
GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance
fi
+# When Cinder is used as a glance store, you can optionally configure cinder to
+# optimize bootable volume creation by allowing volumes to be cloned directly
+# in the backend instead of transferring data via Glance. To use this feature,
+# set CINDER_ALLOWED_DIRECT_URL_SCHEMES for cinder.conf and enable
+# GLANCE_SHOW_DIRECT_URL and/or GLANCE_SHOW_MULTIPLE_LOCATIONS for Glance. The
+# default value for both of these is False, because for some backends they
+# present a grave security risk (though not for Cinder, because all that's
+# exposed is the volume_id where the image data is stored.) See OSSN-0065 for
+# more information: https://wiki.openstack.org/wiki/OSSN/OSSN-0065
+GLANCE_SHOW_DIRECT_URL=$(trueorfalse False GLANCE_SHOW_DIRECT_URL)
+GLANCE_SHOW_MULTIPLE_LOCATIONS=$(trueorfalse False GLANCE_SHOW_MULTIPLE_LOCATIONS)
+
# Glance multi-store configuration
# Boolean flag to enable multiple store configuration for glance
GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES)
@@ -84,6 +96,13 @@
GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store}
GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW)
+GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS)
+
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the Image API policies to start checking the scope of token. By Default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE)
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
@@ -107,6 +126,10 @@
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api
GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini
+
+# Glance default limit for Devstack
+GLANCE_LIMIT_IMAGE_SIZE_TOTAL=${GLANCE_LIMIT_IMAGE_SIZE_TOTAL:-1000}
+
# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet
# TODO(mtreinish): Remove the eventlet path here and in all the similar
# conditionals below after the Pike release
@@ -263,6 +286,38 @@
fi
}
+function configure_glance_quotas {
+
+ # Registered limit resources in keystone are system-specific resources.
+ # Make sure we use a system-scoped token to interact with this API.
+
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_size_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit $GLANCE_LIMIT_IMAGE_SIZE_TOTAL --region $REGION_NAME image_stage_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_total
+ openstack --os-cloud devstack-system-admin registered limit create --service glance \
+ --default-limit 100 --region $REGION_NAME image_count_uploading
+
+ # Tell glance to use these limits
+ iniset $GLANCE_API_CONF DEFAULT use_keystone_limits True
+
+ # Configure oslo_limit so it can talk to keystone
+ iniset $GLANCE_API_CONF oslo_limit user_domain_name $SERVICE_DOMAIN_NAME
+ iniset $GLANCE_API_CONF oslo_limit password $SERVICE_PASSWORD
+ iniset $GLANCE_API_CONF oslo_limit username glance
+ iniset $GLANCE_API_CONF oslo_limit auth_type password
+ iniset $GLANCE_API_CONF oslo_limit auth_url $KEYSTONE_SERVICE_URI
+ iniset $GLANCE_API_CONF oslo_limit system_scope "'all'"
+ iniset $GLANCE_API_CONF oslo_limit endpoint_id \
+ $(openstack --os-cloud devstack-system-admin endpoint list --service glance -f value -c ID)
+
+ # Allow the glance service user to read quotas
+ openstack --os-cloud devstack-system-admin role add --user glance --user-domain Default \
+ --system all reader
+}
+
# configure_glance() - Set config files, create data dirs, etc
function configure_glance {
sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
@@ -283,6 +338,9 @@
if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then
iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop"
fi
+ # Only use these if you know what you are doing! See OSSN-0065
+ iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL
+ iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS
# Configure glance_store
configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES
@@ -373,6 +431,12 @@
iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
+
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+ iniset $GLANCE_API_CONF oslo_policy enforce_scope true
+ iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
+ iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
+ fi
}
# create_glance_accounts() - Set up common required glance accounts
@@ -403,6 +467,11 @@
service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
+
+ if [[ "$GLANCE_ENABLE_QUOTAS" = True ]]; then
+ configure_glance_quotas
+ fi
+
fi
}
@@ -491,6 +560,11 @@
iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \
filesystem_store_datadir "${remote_data}/os_glance_tasks_store"
+ # Point this worker to use different cache dir
+ mkdir -p "$remote_data/cache"
+ iniset $(glance_remote_conf "$GLANCE_API_CONF") DEFAULT \
+ image_cache_dir "${remote_data}/cache"
+
# Change our uwsgi to our new port
sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \
"$glance_remote_uwsgi"
diff --git a/lib/keystone b/lib/keystone
index 66e867c..a4c8a52 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -9,7 +9,6 @@
# - ``tls`` file
# - ``DEST``, ``STACK_USER``
# - ``FILES``
-# - ``IDENTITY_API_VERSION``
# - ``BASE_SQL_CONN``
# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
# - ``S3_SERVICE_PORT`` (template backend only)
@@ -50,9 +49,7 @@
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
-KEYSTONE_ADMIN_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-admin.ini
KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
-KEYSTONE_ADMIN_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-admin
# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values:
# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi
@@ -81,21 +78,12 @@
KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
-# Set Keystone interface configuration
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
-
# Public facing bits
KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-# Bind hosts
-KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST}
-
# Set the project for service accounts in Keystone
SERVICE_DOMAIN_NAME=${SERVICE_DOMAIN_NAME:-Default}
SERVICE_PROJECT_NAME=${SERVICE_PROJECT_NAME:-service}
@@ -106,7 +94,6 @@
# if we are running with SSL use https protocols
if is_service_enabled tls-proxy; then
- KEYSTONE_AUTH_PROTOCOL="https"
KEYSTONE_SERVICE_PROTOCOL="https"
fi
@@ -134,6 +121,15 @@
# Cache settings
KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True}
+# Whether to create a keystone admin endpoint for legacy applications
+KEYSTONE_ADMIN_ENDPOINT=$(trueorfalse False KEYSTONE_ADMIN_ENDPOINT)
+
+# Flag to set the oslo_policy.enforce_scope. This is used to switch
+# the Identity API policies to start checking the scope of token. By Default,
+# this flag is False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+KEYSTONE_ENFORCE_SCOPE=$(trueorfalse False KEYSTONE_ENFORCE_SCOPE)
+
# Functions
# ---------
@@ -154,11 +150,8 @@
sudo rm -f $(apache_site_config_for keystone)
else
stop_process "keystone"
- # TODO: remove admin at pike-2
remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
sudo rm -f $(apache_site_config_for keystone-wsgi-public)
- sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
fi
}
@@ -171,12 +164,10 @@
local keystone_certfile=""
local keystone_keyfile=""
local keystone_service_port=$KEYSTONE_SERVICE_PORT
- local keystone_auth_port=$KEYSTONE_AUTH_PORT
local venv_path=""
if is_service_enabled tls-proxy; then
keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
- keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
fi
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
@@ -185,7 +176,6 @@
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$keystone_service_port|g;
- s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%SSLLISTEN%|$keystone_ssl_listen|g;
s|%SSLENGINE%|$keystone_ssl|g;
@@ -223,22 +213,17 @@
iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_port=$KEYSTONE_AUTH_PORT
if is_service_enabled tls-proxy; then
# Set the service ports for a proxy to take the originals
service_port=$KEYSTONE_SERVICE_PORT_INT
- auth_port=$KEYSTONE_AUTH_PORT_INT
fi
- # Override the endpoints advertised by keystone (the public_endpoint and
- # admin_endpoint) so that clients use the correct endpoint. By default, the
- # keystone server uses the public_port and admin_port which isn't going to
- # work when you want to use a different port (in the case of proxy), or you
- # don't want the port (in the case of putting keystone on a path in
- # apache).
+ # Override the endpoints advertised by keystone so that clients use the correct
+ # endpoint. By default, the keystone server uses the public_port which isn't
+ # going to work when you want to use a different port (in the case of proxy),
+ # or you don't want the port (in the case of putting keystone on a path in apache).
iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
- iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
@@ -261,7 +246,6 @@
_config_keystone_apache_wsgi
else # uwsgi
write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity"
- write_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI" "/identity_admin"
fi
iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
@@ -281,6 +265,11 @@
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
fi
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ iniset $KEYSTONE_CONF oslo_policy enforce_scope true
+ iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
+ iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+ fi
}
# create_keystone_accounts() - Sets up common required keystone accounts
@@ -303,20 +292,28 @@
# admins admin admin admin
# nonadmins demo, alt_demo member, anotherrole demo, alt_demo
+# System User Roles
+# ------------------------------------------------------------------
+# all admin admin
+# all system_reader reader
+# all system_member member
+
# Migrated from keystone_data.sh
function create_keystone_accounts {
# The keystone bootstrapping process (performed via keystone-manage
- # bootstrap) creates an admin user, admin role, member role, and admin
+ # bootstrap) creates an admin user and an admin
# project. As a sanity check we exercise the CLI to retrieve the IDs for
# these values.
local admin_project
admin_project=$(openstack project show "admin" -f value -c id)
local admin_user
admin_user=$(openstack user show "admin" -f value -c id)
+ # These roles are also created during bootstrap but we don't need their IDs
local admin_role="admin"
local member_role="member"
+ local reader_role="reader"
async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
@@ -352,21 +349,53 @@
async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+
async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
- # alt_demo
+ # Create a user to act as a reader on project demo
+ local demo_reader
+ demo_reader=$(get_or_create_user "demo_reader" \
+ "$ADMIN_PASSWORD" "default" "demo_reader@example.com")
+
+ async_run ks-demo-reader get_or_add_user_project_role $reader_role $demo_reader $demo_project
+
+ # Create a different project called alt_demo
local alt_demo_project
alt_demo_project=$(get_or_create_project "alt_demo" default)
+ # Create a user to act as member, admin and anotherrole on project alt_demo
local alt_demo_user
alt_demo_user=$(get_or_create_user "alt_demo" \
"$ADMIN_PASSWORD" "default" "alt_demo@example.com")
- async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
- async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+ async_run ks-alt-admin get_or_add_user_project_role $admin_role $alt_demo_user $alt_demo_project
async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+ # Create another user to act as a member on project alt_demo
+ local alt_demo_member
+ alt_demo_member=$(get_or_create_user "alt_demo_member" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_member@example.com")
+ async_run ks-alt-member-user get_or_add_user_project_role $member_role $alt_demo_member $alt_demo_project
+
+ # Create another user to act as a reader on project alt_demo
+ local alt_demo_reader
+ alt_demo_reader=$(get_or_create_user "alt_demo_reader" \
+ "$ADMIN_PASSWORD" "default" "alt_demo_reader@example.com")
+ async_run ks-alt-reader-user get_or_add_user_project_role $reader_role $alt_demo_reader $alt_demo_project
+
+ # Create two users, give one the member role on the system and the other the
+ # reader role on the system. These two users model system-member and
+ # system-reader personas. The admin user already has the admin role on the
+ # system and we can re-use this user as a system-admin.
+ system_member_user=$(get_or_create_user "system_member" \
+ "$ADMIN_PASSWORD" "default" "system_member@example.com")
+ async_run ks-system-member get_or_add_user_system_role $member_role $system_member_user "all"
+
+ system_reader_user=$(get_or_create_user "system_reader" \
+ "$ADMIN_PASSWORD" "default" "system_reader@example.com")
+ async_run ks-system-reader get_or_add_user_system_role $reader_role $system_reader_user "all"
+
# groups
local admin_group
admin_group=$(get_or_create_group "admins" \
@@ -381,8 +410,9 @@
async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
- async_wait ks-demo-{member,admin,another,invis}
- async_wait ks-alt-{member,admin,another}
+ async_wait ks-demo-{member,admin,another,invis,reader}
+ async_wait ks-alt-{admin,another,member-user,reader-user}
+ async_wait ks-system-{member,reader}
async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
if is_service_enabled ldap; then
@@ -518,7 +548,7 @@
function start_keystone {
# Get right service port for testing
local service_port=$KEYSTONE_SERVICE_PORT
- local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
+ local auth_protocol=$KEYSTONE_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$KEYSTONE_SERVICE_PORT_INT
auth_protocol="http"
@@ -537,7 +567,7 @@
# unencryted traffic at this point.
# If running in Apache, use the path rather than port.
- local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/
+ local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v3/
if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
die $LINENO "keystone did not start"
@@ -546,7 +576,6 @@
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
- start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
fi
# (re)start memcached to make sure we have a clean memcache.
@@ -567,11 +596,8 @@
# This function uses the following GLOBAL variables:
# - ``KEYSTONE_BIN_DIR``
# - ``ADMIN_PASSWORD``
-# - ``IDENTITY_API_VERSION``
# - ``REGION_NAME``
-# - ``KEYSTONE_SERVICE_PROTOCOL``
-# - ``KEYSTONE_SERVICE_HOST``
-# - ``KEYSTONE_SERVICE_PORT``
+# - ``KEYSTONE_SERVICE_URI``
function bootstrap_keystone {
$KEYSTONE_BIN_DIR/keystone-manage bootstrap \
--bootstrap-username admin \
@@ -580,8 +606,16 @@
--bootstrap-role-name admin \
--bootstrap-service-name keystone \
--bootstrap-region-id "$REGION_NAME" \
- --bootstrap-admin-url "$KEYSTONE_AUTH_URI" \
--bootstrap-public-url "$KEYSTONE_SERVICE_URI"
+ if [ "$KEYSTONE_ADMIN_ENDPOINT" == "True" ]; then
+ openstack endpoint create --region "$REGION_NAME" \
+ --os-username admin \
+ --os-user-domain-id default \
+ --os-password "$ADMIN_PASSWORD" \
+ --os-project-name admin \
+ --os-project-domain-id default \
+ keystone admin "$KEYSTONE_SERVICE_URI"
+ fi
}
# create_ldap_domain() - Create domain file and initialize domain with a user
diff --git a/lib/ldap b/lib/ldap
index 5a53d0e..ea5faa1 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -33,14 +33,17 @@
if is_ubuntu; then
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=mdb
LDAP_ROOTPW_COMMAND=replace
elif is_fedora; then
LDAP_OLCDB_NUMBER=2
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
elif is_suse; then
# SUSE has slappasswd in /usr/sbin/
PATH=$PATH:/usr/sbin/
LDAP_OLCDB_NUMBER=1
+ LDAP_OLCDB_TYPE=hdb
LDAP_ROOTPW_COMMAND=add
LDAP_SERVICE_NAME=ldap
fi
@@ -56,6 +59,7 @@
local slappass=$2
sed -e "
s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${LDAP_OLCDB_TYPE}|$LDAP_OLCDB_TYPE|
s|\${SLAPPASS}|$slappass|
s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
s|\${BASE_DC}|$LDAP_BASE_DC|
@@ -157,7 +161,7 @@
slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION
slapd slapd/domain string Users
slapd shared/organization string $LDAP_DOMAIN
- slapd slapd/backend string HDB
+ slapd slapd/backend string ${LDAP_OLCDB_TYPE^^}
slapd slapd/purge_database boolean true
slapd slapd/move_old_database boolean true
slapd slapd/allow_ldap_v2 boolean false
diff --git a/lib/libraries b/lib/libraries
old mode 100644
new mode 100755
index c7aa815..9ea3230
--- a/lib/libraries
+++ b/lib/libraries
@@ -38,6 +38,7 @@
GITDIR["oslo.context"]=$DEST/oslo.context
GITDIR["oslo.db"]=$DEST/oslo.db
GITDIR["oslo.i18n"]=$DEST/oslo.i18n
+GITDIR["oslo.limit"]=$DEST/oslo.limit
GITDIR["oslo.log"]=$DEST/oslo.log
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
@@ -59,6 +60,7 @@
# Non oslo libraries are welcomed below as well, this prevents
# duplication of this code.
GITDIR["os-brick"]=$DEST/os-brick
+GITDIR["os-resource-classes"]=$DEST/os-resource-classes
GITDIR["os-traits"]=$DEST/os-traits
# Support entry points installation of console scripts
@@ -101,6 +103,7 @@
_install_lib_from_source "oslo.context"
_install_lib_from_source "oslo.db"
_install_lib_from_source "oslo.i18n"
+ _install_lib_from_source "oslo.limit"
_install_lib_from_source "oslo.log"
_install_lib_from_source "oslo.messaging"
_install_lib_from_source "oslo.middleware"
@@ -122,6 +125,7 @@
#
# os-traits for nova
_install_lib_from_source "os-brick"
+ _install_lib_from_source "os-resource-classes"
_install_lib_from_source "os-traits"
#
# python client libraries we might need from git can go here
diff --git a/lib/lvm b/lib/lvm
index b826c1b..d3f6bf1 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -53,28 +53,10 @@
sudo vgremove -f $vg
}
-# _clean_lvm_backing_file() removes the backing file of the
-# volume group
-#
-# Usage: _clean_lvm_backing_file() $backing_file
-function _clean_lvm_backing_file {
- local backing_file=$1
-
- # If the backing physical device is a loop device, it was probably setup by DevStack
- if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev
- vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
- if [[ -n "$vg_dev" ]]; then
- sudo losetup -d $vg_dev
- fi
- rm -f $backing_file
- fi
-}
-
# clean_lvm_volume_group() cleans up the volume group and removes the
# backing file
#
-# Usage: clean_lvm_volume_group $vg
+# Usage: clean_lvm_volume_group() $vg
function clean_lvm_volume_group {
local vg=$1
@@ -83,11 +65,22 @@
# if there is no logical volume left, it's safe to attempt a cleanup
# of the backing file
if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
- _clean_lvm_backing_file $DATA_DIR/$vg$BACKING_FILE_SUFFIX
+ local backing_file=$DATA_DIR/$vg$BACKING_FILE_SUFFIX
+
+ if [[ -n "$vg$BACKING_FILE_SUFFIX" ]] && \
+ [[ -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+ sudo systemctl disable --now $vg$BACKING_FILE_SUFFIX.service
+ sudo rm -f /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+ sudo systemctl daemon-reload
+ fi
+
+ # If the backing physical device is a loop device, it was probably setup by DevStack
+ if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
+ rm -f $backing_file
+ fi
fi
}
-
# _create_lvm_volume_group creates default volume group
#
# Usage: _create_lvm_volume_group() $vg $size
@@ -106,8 +99,20 @@
directio="--direct-io=on"
fi
+ # Only create systemd service if it doesn't already exists
+ if [[ ! -e "/etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service" ]]; then
+ sed -e "
+ s|%DIRECTIO%|${directio}|g;
+ s|%BACKING_FILE%|${backing_file}|g;
+ " $FILES/lvm-backing-file.template | sudo tee \
+ /etc/systemd/system/$vg$BACKING_FILE_SUFFIX.service
+
+ sudo systemctl daemon-reload
+ sudo systemctl enable --now $vg$BACKING_FILE_SUFFIX.service
+ fi
+
local vg_dev
- vg_dev=$(sudo losetup -f --show $directio $backing_file)
+ vg_dev=$(sudo losetup --associated $backing_file -O NAME -n)
# Only create volume group if it doesn't already exist
if ! sudo vgs $vg; then
diff --git a/lib/neutron b/lib/neutron
index 885df97..e7719d4 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -37,6 +37,11 @@
NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
NEUTRON_DIR=$DEST/neutron
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
# Distributed Virtual Router (DVR) configuration
# Can be:
@@ -141,6 +146,7 @@
# cleanup_neutron() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
if is_neutron_ovs_base_plugin; then
neutron_ovs_base_cleanup
@@ -164,6 +170,7 @@
# configure_neutron() - Set config files, create data dirs, etc
function configure_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
(cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
@@ -232,6 +239,7 @@
if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
neutron_ml2_extension_driver_add port_security
fi
+ configure_rbac_policies
fi
# Neutron OVS or LB agent
@@ -353,6 +361,7 @@
# Takes a single optional argument which is the config file to update,
# if not passed $NOVA_CONF is used.
function configure_neutron_nova_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local conf=${1:-$NOVA_CONF}
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
@@ -379,6 +388,7 @@
# create_neutron_accounts() - Create required service accounts
function create_neutron_accounts_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local neutron_url
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
@@ -402,6 +412,7 @@
# init_neutron() - Initialize databases, etc.
function init_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
recreate_database neutron
time_start "dbsync"
@@ -412,6 +423,7 @@
# install_neutron() - Collect source and prepare
function install_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
setup_develop $NEUTRON_DIR
@@ -485,6 +497,7 @@
# start_neutron() - Start running processes
function start_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
# Start up the neutron agents if enabled
# TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
# can resolve the $NEUTRON_AGENT_BINARY
@@ -522,6 +535,7 @@
# stop_neutron() - Stop running processes
function stop_neutron_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
for serv in neutron-api neutron-agent neutron-l3; do
stop_process $serv
done
@@ -544,6 +558,7 @@
# neutron_service_plugin_class_add() - add service plugin class
function neutron_service_plugin_class_add_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local service_plugin_class=$1
local plugins=""
@@ -568,11 +583,13 @@
}
function neutron_server_config_add_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
}
# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
function neutron_deploy_rootwrap_filters_new {
+ deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
local srcdir=$1
sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
@@ -612,6 +629,19 @@
fi
}
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
+}
+
+
function configure_neutron_nova {
if is_neutron_legacy_enabled; then
# Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 791ff18..b906a1b 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -90,6 +90,11 @@
NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
+# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
+# of the new RBAC policies and scopes.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+
# Agent binaries. Note, binary paths for other agents are set in per-service
# scripts in lib/neutron_plugins/services/
AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -275,6 +280,12 @@
# L3 Service functions
source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
+
# Use security group or not
if has_neutron_plugin_security_group; then
Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
@@ -369,6 +380,21 @@
configure_ovn_plugin
fi
+ # Configure Neutron's advanced services
+ if is_service_enabled q-placement neutron-placement; then
+ configure_placement_extension
+ fi
+ if is_service_enabled q-trunk neutron-trunk; then
+ configure_trunk_extension
+ fi
+ if is_service_enabled q-qos neutron-qos; then
+ configure_qos
+ if is_service_enabled q-l3 neutron-l3; then
+ configure_l3_agent_extension_fip_qos
+ configure_l3_agent_extension_gateway_ip_qos
+ fi
+ fi
+
iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
# devstack is not a tool for running uber scale OpenStack
# clouds, therefore running without a dedicated RPC worker
@@ -468,6 +494,19 @@
if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
+ configure_rbac_policies
+}
+
+# configure_rbac_policies() - Configure Neutron to enforce new RBAC
+# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
+function configure_rbac_policies {
+ if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
+ iniset $NEUTRON_CONF oslo_policy enforce_scope True
+ else
+ iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
+ iniset $NEUTRON_CONF oslo_policy enforce_scope False
+ fi
}
# Start running OVN processes
@@ -543,11 +582,7 @@
function start_mutnauq_other_agents {
run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
- if is_service_enabled neutron-vpnaas; then
- : # Started by plugin
- else
- run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
- fi
+ run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
@@ -663,6 +698,27 @@
fi
}
+# _configure_public_network_connectivity() - Configures connectivity to the
+# external network using $PUBLIC_INTERFACE or NAT on the single interface
+# machines
+function _configure_public_network_connectivity {
+ # If we've given a PUBLIC_INTERFACE to take over, then we assume
+ # that we can own the whole thing, and privot it into the OVS
+ # bridge. If we are not, we're probably on a single interface
+ # machine, and we just setup NAT so that fixed guests can get out.
+ if [[ -n "$PUBLIC_INTERFACE" ]]; then
+ _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+
+ if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+ _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+ fi
+ else
+ for d in $default_v4_route_devs; do
+ sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+ done
+ fi
+}
+
# cleanup_mutnauq() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_mutnauq {
@@ -1001,6 +1057,15 @@
test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
}
+function plugin_agent_add_l2_agent_extension {
+ local l2_agent_extension=$1
+ if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+ L2_AGENT_EXTENSIONS=$l2_agent_extension
+ elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+ L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
+ fi
+}
+
# Restore xtrace
$_XTRACE_NEUTRON
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index ae4b251..f00feac 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -7,15 +7,16 @@
_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace)
set +o xtrace
-# Default openvswitch L2 agent
-Q_AGENT=${Q_AGENT:-openvswitch}
+# Default OVN L2 agent
+Q_AGENT=${Q_AGENT:-ovn}
if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
fi
# Enable this to simply and quickly enable tunneling with ML2.
-# Select either 'gre', 'vxlan', or 'gre,vxlan'
-Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"}
+# For ML2/OVS select either 'gre', 'vxlan', or 'gre,vxlan'.
+# For ML2/OVN use 'geneve'.
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
# This has to be set here since the agent will set this in the config file
if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then
Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE
@@ -24,7 +25,7 @@
fi
# List of MechanismDrivers to load
-Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge}
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn}
# Default GRE TypeDriver options
Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES}
# Default VXLAN TypeDriver options
@@ -155,5 +156,9 @@
return 0
}
+function configure_qos_ml2 {
+ neutron_ml2_extension_driver_add "qos"
+}
+
# Restore xtrace
$_XTRACE_NEUTRON_ML2
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index e4d0d75..927896b 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -21,14 +21,8 @@
source ${TOP_DIR}/lib/neutron_plugins/ovs_base
source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent
-# Load devstack ovs base functions
-source $NEUTRON_DIR/devstack/lib/ovs
-
-
-# Defaults
-# --------
-
-Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT)
+# Load devstack ovs compliation and loading functions
+source ${TOP_DIR}/lib/neutron_plugins/ovs_source
# Set variables for building OVN from source
OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git}
@@ -75,6 +69,9 @@
# unless the distro kernel includes ovs+conntrack support.
OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES)
OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE)
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ Q_BUILD_OVS_FROM_GIT=True
+fi
# Whether or not to install the ovs python module from ovs source. This can be
# used to test and validate new ovs python features. This should only be used
@@ -88,12 +85,19 @@
# configure the MTU DHCP option.
OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38}
-# The log level of the OVN databases (north and south)
+# The log level of the OVN databases (north and south).
+# Supported log levels are: off, emer, err, warn, info or dbg.
+# More information about log levels can be found at
+# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt
OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
+# If True (default) the node will be considered a gateway node.
+ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW)
+OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
+
export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
@@ -113,7 +117,13 @@
OVS_DATADIR=$DATA_DIR/ovs
OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
-OVN_DATADIR=$DATA_DIR/ovn
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ OVN_DATADIR=$DATA_DIR/ovn
+else
+ # When using OVN from packages, the data dir for OVN DBs is
+ # /var/lib/ovn
+ OVN_DATADIR=/var/lib/ovn
+fi
OVN_SHAREDIR=$OVS_PREFIX/share/ovn
OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts
OVN_RUNDIR=$OVS_PREFIX/var/run/ovn
@@ -171,6 +181,9 @@
}
function use_new_ovn_repository {
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then
+ return 0
+ fi
if [ -z "$is_new_ovn" ]; then
local ovs_repo_dir=$DEST/$OVS_REPO_NAME
if [ ! -d $ovs_repo_dir ]; then
@@ -240,7 +253,12 @@
local testcmd="test -e $OVS_RUNDIR/$service.pid"
test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
- sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info
+ local service_ctl_file
+ service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+ if [ -z "$service_ctl_file" ]; then
+ die $LINENO "ctl file for service $service is not present."
+ fi
+ sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
}
function clone_repository {
@@ -253,48 +271,11 @@
ERROR_ON_CLONE=false git_clone $repo $dir $branch
}
-function get_ext_gw_interface {
- # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH
- # This function is copied directly from the devstack neutron-legacy script
- if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then
- echo $Q_PUBLIC_VETH_EX
- else
- # Disable in-band as we are going to use local port
- # to communicate with VMs
- sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \
- other_config:disable-in-band=true
- echo $PUBLIC_BRIDGE
- fi
-}
-
function create_public_bridge {
# Create the public bridge that OVN will use
- # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6
- local ext_gw_ifc
- ext_gw_ifc=$(get_ext_gw_interface)
-
- sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15
- sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc
- if [ -n "$FLOATING_RANGE" ]; then
- local cidr_len=${FLOATING_RANGE#*/}
- sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc
- fi
-
- # Ensure IPv6 RAs are accepted on the interface with the default route.
- # This is needed for neutron-based devstack clouds to work in
- # IPv6-only clouds in the gate. Please do not remove this without
- # talking to folks in Infra. This fix is based on a devstack fix for
- # neutron L3 agent: https://review.openstack.org/#/c/359490/.
- default_route_dev=$(ip route | grep ^default | awk '{print $5}')
- sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2
-
- sudo sysctl -w net.ipv6.conf.all.forwarding=1
- if [ -n "$IPV6_PUBLIC_RANGE" ]; then
- local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
- sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc
- fi
-
- sudo ip link set $ext_gw_ifc up
+ sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15
+ sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE
+ _configure_public_network_connectivity
}
function _disable_libvirt_apparmor {
@@ -316,16 +297,13 @@
# compile_ovn() - Compile OVN from source and load needed modules
# Accepts three parameters:
-# - first optional is False by default and means that
-# modules are built and installed.
-# - second optional parameter defines prefix for
+# - first optional parameter defines prefix for
# ovn compilation
-# - third optional parameter defines localstatedir for
+# - second optional parameter defines localstatedir for
# ovn single machine runtime
function compile_ovn {
- local build_modules=${1:-False}
- local prefix=$2
- local localstatedir=$3
+ local prefix=$1
+ local localstatedir=$2
if [ -n "$prefix" ]; then
prefix="--prefix=$prefix"
@@ -369,11 +347,6 @@
# install_ovn() - Collect source and prepare
function install_ovn {
- if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then
- echo "Installation of OVS from source disabled."
- return 0
- fi
-
echo "Installing OVN and dependent packages"
# Check the OVN configuration
@@ -403,7 +376,7 @@
compile_ovs $OVN_BUILD_MODULES
if use_new_ovn_repository; then
- compile_ovn $OVN_BUILD_MODULES
+ compile_ovn
fi
sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
@@ -589,14 +562,19 @@
# create new ones on each devstack run.
_disable_libvirt_apparmor
+ local mkdir_cmd="mkdir -p ${OVN_DATADIR}"
- mkdir -p $OVN_DATADIR
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]]; then
+ mkdir_cmd="sudo ${mkdir_cmd}"
+ fi
+
+ $mkdir_cmd
mkdir -p $OVS_DATADIR
rm -f $OVS_DATADIR/*.db
rm -f $OVS_DATADIR/.*.db.~lock~
- rm -f $OVN_DATADIR/*.db
- rm -f $OVN_DATADIR/.*.db.~lock~
+ sudo rm -f $OVN_DATADIR/*.db
+ sudo rm -f $OVN_DATADIR/.*.db.~lock~
}
function _start_ovs {
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 2e63fe3..cc41a8c 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -7,6 +7,12 @@
_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace)
set +o xtrace
+# Load devstack ovs compliation and loading functions
+source ${TOP_DIR}/lib/neutron_plugins/ovs_source
+
+# Defaults
+# --------
+
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
# OVS recognize default 'system' datapath or 'netdev' for userspace datapath
OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system}
@@ -60,26 +66,33 @@
}
function _neutron_ovs_base_install_agent_packages {
- # Install deps
- install_package $(get_packages "openvswitch")
- if is_ubuntu; then
- _neutron_ovs_base_install_ubuntu_dkms
- restart_service openvswitch-switch
- elif is_fedora; then
- restart_service openvswitch
- sudo systemctl enable openvswitch
- elif is_suse; then
- if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
+ if [ "$Q_BUILD_OVS_FROM_GIT" == "True" ]; then
+ remove_ovs_packages
+ compile_ovs False /usr/local /var
+ load_conntrack_gre_module
+ start_new_ovs
+ else
+ # Install deps
+ install_package $(get_packages "openvswitch")
+ if is_ubuntu; then
+ _neutron_ovs_base_install_ubuntu_dkms
restart_service openvswitch-switch
- else
- # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
- if [[ $DISTRO =~ "tumbleweed" ]]; then
- sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
+ elif is_fedora; then
+ restart_service openvswitch
+ sudo systemctl enable openvswitch
+ elif is_suse; then
+ if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
+ restart_service openvswitch-switch
+ else
+ # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
+ if [[ $DISTRO =~ "tumbleweed" ]]; then
+ sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
+ fi
+ restart_service openvswitch || {
+ journalctl -xe || :
+ systemctl status openvswitch
+ }
fi
- restart_service openvswitch || {
- journalctl -xe || :
- systemctl status openvswitch
- }
fi
fi
}
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
new file mode 100644
index 0000000..9ae5555
--- /dev/null
+++ b/lib/neutron_plugins/ovs_source
@@ -0,0 +1,216 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Defaults
+# --------
+Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT)
+
+# Set variables for building OVS from source
+OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git}
+OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.')
+OVS_REPO_NAME=${OVS_REPO_NAME:-ovs}
+OVS_BRANCH=${OVS_BRANCH:-0047ca3a0290f1ef954f2c76b31477cf4b9755f5}
+
+# Functions
+
+# load_module() - Load module using modprobe module given by argument and dies
+# on failure
+# - fatal argument is optional and says whether function should
+# exit if module can't be loaded
+function load_module {
+ local module=$1
+ local fatal=$2
+
+ if [ "$(trueorfalse True fatal)" == "True" ]; then
+ sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module")
+ else
+ sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg)
+ fi
+}
+
+# prepare_for_compilation() - Fetch ovs git repository and install packages needed for
+# compilation.
+function prepare_for_ovs_compilation {
+ local build_modules=${1:-False}
+ OVS_DIR=$DEST/$OVS_REPO_NAME
+
+ if [ ! -d $OVS_DIR ] ; then
+ # We can't use git_clone here because we want to ignore ERROR_ON_CLONE
+ git_timed clone $OVS_REPO $OVS_DIR
+ cd $OVS_DIR
+ git checkout $OVS_BRANCH
+ else
+ # Even though the directory already exists, call git_clone to update it
+ # if needed based on the RECLONE option
+ git_clone $OVS_REPO $OVS_DIR $OVS_BRANCH
+ cd $OVS_DIR
+ fi
+
+ # TODO: Can you create package list files like you can inside devstack?
+ install_package autoconf automake libtool gcc patch make
+
+ # If build_modules is False, we don't need to install the kernel-*
+ # packages. Just return.
+ if [[ "$build_modules" == "False" ]]; then
+ return
+ fi
+
+ KERNEL_VERSION=`uname -r`
+ if is_fedora ; then
+ # is_fedora covers Fedora, RHEL, CentOS, etc...
+ if [[ "$os_VENDOR" == "Fedora" ]]; then
+ install_package elfutils-libelf-devel
+ KERNEL_VERSION=`echo $KERNEL_VERSION | cut --delimiter='-' --field 1`
+ elif [[ ${KERNEL_VERSION:0:2} != "3." ]]; then
+ # dash is illegal character in rpm version so replace
+ # them with underscore like it is done in the kernel
+ # https://github.com/torvalds/linux/blob/master/scripts/package/mkspec#L25
+ # but only for latest series of the kernel, not 3.x
+
+ KERNEL_VERSION=`echo $KERNEL_VERSION | tr - _`
+ fi
+
+ echo NOTE: if kernel-devel-$KERNEL_VERSION or kernel-headers-$KERNEL_VERSION installation
+ echo failed, please, provide a repository with the package, or yum update / reboot
+ echo your machine to get the latest kernel.
+
+ install_package kernel-devel-$KERNEL_VERSION
+ install_package kernel-headers-$KERNEL_VERSION
+
+ elif is_ubuntu ; then
+ install_package linux-headers-$KERNEL_VERSION
+ fi
+}
+
+# load_ovs_kernel_modules() - load openvswitch kernel module
+function load_ovs_kernel_modules {
+ load_module openvswitch
+ load_module vport-geneve False
+ dmesg | tail
+}
+
+# reload_ovs_kernel_modules() - reload openvswitch kernel module
+function reload_ovs_kernel_modules {
+ set +e
+ ovs_system=$(sudo ovs-dpctl dump-dps | grep ovs-system)
+ if [ -n "$ovs_system" ]; then
+ sudo ovs-dpctl del-dp ovs-system
+ fi
+ set -e
+ sudo modprobe -r vport_geneve
+ sudo modprobe -r openvswitch
+ load_ovs_kernel_modules
+}
+
+# compile_ovs() - Compile OVS from source and load needed modules.
+# Accepts two parameters:
+# - first one is False by default and means that modules are not built and installed.
+# - second optional parameter defines prefix for ovs compilation
+# - third optional parameter defines localstatedir for ovs single machine runtime
+# Env variables OVS_REPO_NAME, OVS_REPO and OVS_BRANCH must be set
+function compile_ovs {
+ local _pwd=$PWD
+ local build_modules=${1:-False}
+ local prefix=$2
+ local localstatedir=$3
+
+ if [ -n "$prefix" ]; then
+ prefix="--prefix=$prefix"
+ fi
+
+ if [ -n "$localstatedir" ]; then
+ localstatedir="--localstatedir=$localstatedir"
+ fi
+
+ prepare_for_ovs_compilation $build_modules
+
+ KERNEL_VERSION=$(uname -r)
+ major_version=$(echo "${KERNEL_VERSION}" | cut -d '.' -f1)
+ patch_level=$(echo "${KERNEL_VERSION}" | cut -d '.' -f2)
+ if [ "${major_version}" -gt 5 ] || [ "${major_version}" == 5 ] && [ "${patch_level}" -gt 5 ]; then
+ echo "NOTE: KERNEL VERSION is ${KERNEL_VERSION} and OVS doesn't support compiling "
+ echo "Kernel module for version higher than 5.5. Skipping module compilation..."
+ build_modules="False"
+ fi
+
+ if [ ! -f configure ] ; then
+ ./boot.sh
+ fi
+ if [ ! -f config.status ] || [ configure -nt config.status ] ; then
+ if [[ "$build_modules" == "True" ]]; then
+ ./configure $prefix $localstatedir --with-linux=/lib/modules/$(uname -r)/build
+ else
+ ./configure $prefix $localstatedir
+ fi
+ fi
+ make -j$(($(nproc) + 1))
+ sudo make install
+ if [[ "$build_modules" == "True" ]]; then
+ sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install
+ reload_ovs_kernel_modules
+ else
+ load_ovs_kernel_modules
+ fi
+
+ cd $_pwd
+}
+
+# action_service - call an action over openvswitch service
+# Accepts one parameter that can be either
+# 'start', 'restart' and 'stop'.
+function action_openvswitch {
+ local action=$1
+
+ if is_ubuntu; then
+ ${action}_service openvswitch-switch
+ elif is_fedora; then
+ ${action}_service openvswitch
+ elif is_suse; then
+ if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+ ${action}_service openvswitch-switch
+ else
+ ${action}_service openvswitch
+ fi
+ fi
+}
+
+# start_new_ovs() - removes old ovs database, creates a new one and starts ovs
+function start_new_ovs {
+ sudo rm -f /etc/openvswitch/conf.db /etc/openvswitch/.conf.db~lock~
+ sudo /usr/local/share/openvswitch/scripts/ovs-ctl start
+}
+
+# stop_new_ovs() - stops ovs
+function stop_new_ovs {
+ local ovs_ctl='/usr/local/share/openvswitch/scripts/ovs-ctl'
+
+ if [ -x $ovs_ctl ] ; then
+ sudo $ovs_ctl stop
+ fi
+}
+
+# remove_ovs_packages() - removes old ovs packages from the system
+function remove_ovs_packages {
+ for package in openvswitch openvswitch-switch openvswitch-common; do
+ if is_package_installed $package; then
+ uninstall_package $package
+ fi
+ done
+}
+
+
+# load_conntrack_gre_module() - loads nf_conntrack_proto_gre kernel module
+function load_conntrack_gre_module {
+ load_module nf_conntrack_proto_gre False
+}
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 75a3567..cd98115 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -123,21 +123,7 @@
neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE
- # If we've given a PUBLIC_INTERFACE to take over, then we assume
- # that we can own the whole thing, and privot it into the OVS
- # bridge. If we are not, we're probably on a single interface
- # machine, and we just setup NAT so that fixed guests can get out.
- if [[ -n "$PUBLIC_INTERFACE" ]]; then
- _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
-
- if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
- fi
- else
- for d in $default_v4_route_devs; do
- sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
- done
- fi
+ _configure_public_network_connectivity
}
# Explicitly set router id in l3 agent configuration
@@ -184,14 +170,14 @@
if is_provider_network; then
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
if [[ "$IP_VERSION" =~ 4.* ]]; then
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
+ SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
fi
@@ -201,7 +187,7 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
fi
- IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
+ IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
fi
@@ -211,7 +197,7 @@
sudo ip link set $PUBLIC_INTERFACE up
fi
else
- NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -229,7 +215,7 @@
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
@@ -267,16 +253,16 @@
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- local subnet_params="--project $project_id "
- subnet_params+="--ip-version 4 "
+ local subnet_params="--ip-version 4 "
if [[ -n "$NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $NETWORK_GATEWAY "
fi
+
subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
local subnet_id
- subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
echo $subnet_id
}
@@ -290,8 +276,7 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$FIXED_RANGE_V6
fi
- local subnet_params="--project $project_id "
- subnet_params+="--ip-version 6 "
+ local subnet_params="--ip-version 6 "
if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
fi
@@ -299,7 +284,7 @@
subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} "
subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
local ipv6_subnet_id
- ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
echo $ipv6_subnet_id
}
@@ -333,7 +318,7 @@
# Configure neutron router for IPv4 public access
function _neutron_configure_router_v4 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
# Create a public subnet on the external network
local id_and_ext_gw_ip
id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -341,10 +326,10 @@
ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
# Configure the external network as the default router gateway
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
- # This logic is specific to using the l3-agent for layer 3
- if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+ # This logic is specific to using OVN or the l3-agent for layer 3
+ if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
# Configure and enable public bridge
local ext_gw_interface="none"
if is_neutron_ovs_base_plugin; then
@@ -377,7 +362,7 @@
# Configure neutron router for IPv6 public access
function _neutron_configure_router_v6 {
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
# Create a public subnet on the external network
local ipv6_id_and_ext_gw_ip
ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -389,11 +374,11 @@
# If the external network has not already been set as the default router
# gateway when configuring an IPv4 public subnet, do so now
if [[ "$IP_VERSION" == "6" ]]; then
- openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
+ openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
fi
- # This logic is specific to using the l3-agent for layer 3
- if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+ # This logic is specific to using OVN or the l3-agent for layer 3
+ if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
# if the Linux host considers itself to be a router then it will
# ignore all router advertisements
# Ensure IPv6 RAs are accepted on interfaces with a default route.
@@ -410,7 +395,13 @@
sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Configure and enable public bridge
# Override global IPV6_ROUTER_GW_IP with the true value from neutron
- IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
+ # NOTE(slaweq): when enforce scopes is enabled in Neutron, router's
+ # gateway ports aren't visible in API because such ports don't belongs
+ # to any tenant. Because of that, at least temporary we need to find
+ # IPv6 address of the router's gateway in a bit different way.
+ # It can be reverted when bug
+ # https://bugs.launchpad.net/neutron/+bug/1959332 will be fixed
+ IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router show $ROUTER_ID -c external_gateway_info -f json | grep -C 1 $ipv6_pub_subnet_id | grep ip_address | awk '{print $2}' | tr -d '"')
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
if is_neutron_ovs_base_plugin; then
@@ -420,6 +411,11 @@
# Configure interface for public bridge
sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
+ # Any IPv6 private subnet that uses the default IPV6 subnet pool
+ # and that is plugged into the default router (Q_ROUTER_NAME) will
+ # be reachable from the devstack node (ex: ipv6-private-subnet).
+ # Some scenario tests (such as octavia-tempest-plugin) rely heavily
+ # on this feature.
local replace_range=${SUBNETPOOL_PREFIX_V6}
if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then
replace_range=${FIXED_RANGE_V6}
@@ -436,3 +432,12 @@
EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
[[ $EXT_LIST =~ $extension ]] && return 0
}
+
+function plugin_agent_add_l3_agent_extension {
+ local l3_agent_extension=$1
+ if [[ -z "$L3_AGENT_EXTENSIONS" ]]; then
+ L3_AGENT_EXTENSIONS=$l3_agent_extension
+ elif [[ ! ,${L3_AGENT_EXTENSIONS}, =~ ,${l3_agent_extension}, ]]; then
+ L3_AGENT_EXTENSIONS+=",$l3_agent_extension"
+ fi
+}
diff --git a/lib/neutron_plugins/services/placement b/lib/neutron_plugins/services/placement
new file mode 100644
index 0000000..3ec185b
--- /dev/null
+++ b/lib/neutron_plugins/services/placement
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+function configure_placement_service_plugin {
+ neutron_service_plugin_class_add "placement"
+}
+
+function configure_placement_neutron {
+ iniset $NEUTRON_CONF placement auth_type "$NEUTRON_PLACEMENT_AUTH_TYPE"
+ iniset $NEUTRON_CONF placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $NEUTRON_CONF placement username "$NEUTRON_PLACEMENT_USERNAME"
+ iniset $NEUTRON_CONF placement password "$SERVICE_PASSWORD"
+ iniset $NEUTRON_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement project_name "$SERVICE_TENANT_NAME"
+ iniset $NEUTRON_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NEUTRON_CONF placement region_name "$REGION_NAME"
+}
+
+function configure_placement_extension {
+ configure_placement_service_plugin
+ configure_placement_neutron
+}
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
new file mode 100644
index 0000000..af9eb3d
--- /dev/null
+++ b/lib/neutron_plugins/services/qos
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+function configure_qos_service_plugin {
+ neutron_service_plugin_class_add "qos"
+}
+
+
+function configure_qos_core_plugin {
+ configure_qos_$NEUTRON_CORE_PLUGIN
+}
+
+
+function configure_qos_l2_agent {
+ plugin_agent_add_l2_agent_extension "qos"
+}
+
+
+function configure_qos {
+ configure_qos_service_plugin
+ configure_qos_core_plugin
+ configure_qos_l2_agent
+}
+
+function configure_l3_agent_extension_fip_qos {
+ plugin_agent_add_l3_agent_extension "fip_qos"
+}
+
+function configure_l3_agent_extension_gateway_ip_qos {
+ plugin_agent_add_l3_agent_extension "gateway_ip_qos"
+}
diff --git a/lib/neutron_plugins/services/trunk b/lib/neutron_plugins/services/trunk
new file mode 100644
index 0000000..8e0f694
--- /dev/null
+++ b/lib/neutron_plugins/services/trunk
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+function configure_trunk_extension {
+ neutron_service_plugin_class_add "trunk"
+}
diff --git a/lib/nova b/lib/nova
index 9039c6b..90289b1 100644
--- a/lib/nova
+++ b/lib/nova
@@ -236,6 +236,10 @@
stop_process "n-api-meta"
remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
+
+ if [[ "$NOVA_BACKEND" == "LVM" ]]; then
+ clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME
+ fi
}
# configure_nova() - Set config files, create data dirs, etc
@@ -263,7 +267,8 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
- LIBVIRT_CPU_MODE=none
+ LIBVIRT_CPU_MODE=custom
+ LIBVIRT_CPU_MODEL=Nehalem
if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
@@ -301,11 +306,8 @@
fi
fi
- if is_fedora && [[ $DISTRO =~ f31] ]]; then
- # For f31 use the rebased 2.1.0 version of the package.
- sudo dnf copr enable -y lyarwood/iscsi-initiator-utils
- sudo dnf update -y
- fi
+ # Ensure each compute host uses a unique iSCSI initiator
+ echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
if [[ ${ISCSID_DEBUG} == "True" ]]; then
# Install an override that starts iscsid with debugging
@@ -320,6 +322,14 @@
sudo systemctl daemon-reload
fi
+ # set chap algorithms. The default chap_algorithm is md5 which will
+ # not work under FIPS.
+ # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in
+ # and determine the correct solution for openeuler here
+ if ! is_openeuler; then
+ iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
+ fi
+
# ensure that iscsid is started, even when disabled by default
restart_service iscsid
fi
@@ -491,7 +501,8 @@
fi
# nova defaults to genisoimage but only mkisofs is available for 15.0+
- if is_suse; then
+ # rhel provides mkisofs symlink to genisoimage or xorriso appropiately
+ if is_suse || is_fedora; then
iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
fi
@@ -500,8 +511,13 @@
iniset $NOVA_CONF upgrade_levels compute "auto"
- write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
- write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ if is_service_enabled n-api; then
+ write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+ fi
+
+ if is_service_enabled n-api-meta; then
+ write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ fi
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -899,7 +915,7 @@
NOVNC_WEB_DIR=/usr/share/novnc
install_package novnc
else
- NOVNC_WEB_DIR=$DEST/noVNC
+ NOVNC_WEB_DIR=$DEST/novnc
git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
fi
fi
@@ -998,6 +1014,11 @@
iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
fi
+ # Workaround bug #1939108
+ if [[ "$VIRT_DRIVER" == "libvirt" && "$LIBVIRT_TYPE" == "qemu" ]]; then
+ iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True
+ fi
+
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# ``sg`` is used in run_process to execute nova-compute as a member of the
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index d3827c3..3e7d280 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -56,15 +56,23 @@
# Installs required distro-specific libvirt packages.
function install_libvirt {
+ # NOTE(yoctozepto): The common consensus [1] is that libvirt-python should
+ # be installed from distro packages. However, various projects might be
+ # trying to ensure it is installed using pip AND use upper-constraints
+ # with that, causing pip to try to upgrade it and to fail.
+ # The following line removes libvirt-python from upper-constraints and
+ # avoids the situation described above. Now only if installed packages
+ # explicitly depend on a newer (or, in general, incompatible) libvirt-python
+ # version, will pip try to reinstall it.
+ # [1] https://review.opendev.org/c/openstack/devstack/+/798514
+ $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
+ $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python
if is_ubuntu; then
- install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev
+ install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt
if is_arch "aarch64"; then
install_package qemu-efi
fi
- # uninstall in case the libvirt version changed
- pip_uninstall libvirt-python
- pip_install_gr libvirt-python
#pip_install_gr <there-si-no-guestfs-in-pypi>
elif is_fedora || is_suse; then
@@ -79,14 +87,11 @@
# as the base system version is too old. We should have
# pre-installed these
install_package qemu-kvm
+ install_package libvirt libvirt-devel python3-libvirt
- install_package libvirt libvirt-devel
if is_arch "aarch64"; then
- install_package edk2.git-aarch64
+ install_package edk2-aarch64
fi
-
- pip_uninstall libvirt-python
- pip_install_gr libvirt-python
fi
if [[ $DEBUG_LIBVIRT_COREDUMPS == True ]]; then
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index bda6ef6..f058e9b 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -47,9 +47,13 @@
iniset $NOVA_CONF ironic username admin
iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI
- iniset $NOVA_CONF ironic project_domain_id default
+ if is_ironic_enforce_scope; then
+ iniset $NOVA_CONF ironic system_scope all
+ else
+ iniset $NOVA_CONF ironic project_domain_id default
+ iniset $NOVA_CONF ironic project_name demo
+ fi
iniset $NOVA_CONF ironic user_domain_id default
- iniset $NOVA_CONF ironic project_name demo
iniset $NOVA_CONF ironic region_name $REGION_NAME
# These are used with crufty legacy ironicclient
@@ -82,7 +86,6 @@
:
}
-
# Restore xtrace
$_XTRACE_HYP_IRONIC
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 321775d..c1cd132 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,6 +40,9 @@
configure_libvirt
iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
+ if [ "$LIBVIRT_CPU_MODE" == "custom" ] ; then
+ iniset $NOVA_CONF libvirt cpu_model "$LIBVIRT_CPU_MODEL"
+ fi
# Do not enable USB tablet input devices to avoid QEMU CPU overhead.
iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
diff --git a/lib/os-vif b/lib/os-vif
new file mode 100644
index 0000000..865645c
--- /dev/null
+++ b/lib/os-vif
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# support vsctl or native.
+# until bug #1929446 is resolved we override the os-vif default
+# and fall back to the legacy "vsctl" driver.
+OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"}
+
+function is_ml2_ovs {
+ if [[ "${Q_AGENT}" == "openvswitch" ]]; then
+ echo "True"
+ fi
+ echo "False"
+}
+
+# This should be true for any ml2/ovs job but should be set to false for
+# all other ovs based jobs e.g. ml2/ovn
+OS_VIF_OVS_ISOLATE_VIF=${OS_VIF_OVS_ISOLATE_VIF:=$(is_ml2_ovs)}
+OS_VIF_OVS_ISOLATE_VIF=$(trueorfalse False OS_VIF_OVS_ISOLATE_VIF)
+
+function configure_os_vif {
+ if [[ -e ${NOVA_CONF} ]]; then
+ iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
+ iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
+ fi
+ if [[ -e ${NEUTRON_CONF} ]]; then
+ iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
+ iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
+ fi
+}
diff --git a/lib/swift b/lib/swift
index 790fb99..ba92f3d 100644
--- a/lib/swift
+++ b/lib/swift
@@ -179,12 +179,9 @@
# cleanup_swift() - Remove residual data files
function cleanup_swift {
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
- if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
- fi
- if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
- rm ${SWIFT_DISK_IMAGE}
- fi
+
+ destroy_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
+
rm -rf ${SWIFT_DATA_DIR}/run/
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
_cleanup_swift_apache_wsgi
@@ -335,7 +332,6 @@
local node_number
local swift_node_config
local swift_log_dir
- local user_group
# Make sure to kill all swift processes first
$SWIFT_BIN_DIR/swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
@@ -353,7 +349,7 @@
# partitions (which make more sense when you have a multi-node
# setup) we configure it with our version of rsync.
sed -e "
- s/%GROUP%/${USER_GROUP}/;
+ s/%GROUP%/$(id -g -n ${STACK_USER})/;
s/%USER%/${STACK_USER}/;
s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
" $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
@@ -431,7 +427,7 @@
swift_pipeline+=" authtoken"
if is_service_enabled s3api;then
swift_pipeline+=" s3token"
- iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_SERVICE_URI_V3}
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
fi
swift_pipeline+=" keystoneauth"
@@ -522,7 +518,7 @@
local auth_vers
auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
- if [[ "$KEYSTONE_AUTH_PROTOCOL" == "https" ]]; then
+ if [[ "$KEYSTONE_SERVICE_PROTOCOL" == "https" ]]; then
iniset ${testfile} func_test auth_port 443
else
iniset ${testfile} func_test auth_port 80
@@ -576,28 +572,7 @@
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
# Create a loopback disk and format it to XFS.
- if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
- if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
- sudo rm -f ${SWIFT_DISK_IMAGE}
- fi
- fi
-
- mkdir -p ${SWIFT_DATA_DIR}/drives/images
- sudo touch ${SWIFT_DISK_IMAGE}
- sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE}
-
- truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
-
- # Make a fresh XFS filesystem
- /sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE}
-
- # Mount the disk with mount options to make it as efficient as possible
- mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
- if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
- ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
- fi
+ create_disk ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1 ${SWIFT_LOOPBACK_DISK_SIZE}
# Create a link to the above mount and
# create all of the directories needed to emulate a few different servers
@@ -867,12 +842,15 @@
function swift_configure_tempurls {
# note we are using swift credentials!
- OS_USERNAME=swift \
- OS_PASSWORD=$SERVICE_PASSWORD \
- OS_USER_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- OS_PROJECT_NAME=$SERVICE_PROJECT_NAME \
- OS_PROJECT_DOMAIN_NAME=$SERVICE_DOMAIN_NAME \
- openstack object store account \
+ openstack --os-cloud "" \
+ --os-region-name $REGION_NAME \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username=swift \
+ --os-password=$SERVICE_PASSWORD \
+ --os-user-domain-name=$SERVICE_DOMAIN_NAME \
+ --os-project-name=$SERVICE_PROJECT_NAME \
+ --os-project-domain-name=$SERVICE_DOMAIN_NAME \
+ object store account \
set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
}
diff --git a/lib/tempest b/lib/tempest
index 29a6229..4504663 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -90,7 +90,6 @@
# it will run tempest with
TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
-
# Functions
# ---------
@@ -107,7 +106,7 @@
# Takes an image ID parameter as input
function image_size_in_gib {
local size
- size=$(openstack image show $1 -c size -f value)
+ size=$(openstack --os-cloud devstack-admin image show $1 -c size -f value)
echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
}
@@ -115,7 +114,9 @@
local tmp_c
tmp_c=$1
if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
- (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+ (cd $REQUIREMENTS_DIR &&
+ git show master:upper-constraints.txt 2>/dev/null ||
+ git show origin/master:upper-constraints.txt) > $tmp_c
else
echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -173,7 +174,7 @@
image_uuid_alt="$IMAGE_UUID"
fi
images+=($IMAGE_UUID)
- done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
case "${#images[*]}" in
0)
@@ -209,23 +210,23 @@
local alt_username=${ALT_USERNAME:-alt_demo}
local alt_project_name=${ALT_TENANT_NAME:-alt_demo}
local admin_project_id
- admin_project_id=$(openstack project list | awk "/ admin / { print \$2 }")
+ admin_project_id=$(openstack --os-cloud devstack-admin project list | awk "/ admin / { print \$2 }")
if is_service_enabled nova; then
# If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior
# Tempest creates its own instance types
- available_flavors=$(nova flavor-list)
+ available_flavors=$(openstack --os-cloud devstack-admin flavor list)
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
# Determine the flavor disk size based on the image size.
disk=$(image_size_in_gib $image_uuid)
- openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
+ openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
fi
flavor_ref=42
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
# Determine the alt flavor disk size based on the alt image size.
disk=$(image_size_in_gib $image_uuid_alt)
- openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
+ openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
fi
flavor_ref_alt=84
else
@@ -251,7 +252,7 @@
fi
flavor_ref=${flavors[0]}
flavor_ref_alt=$flavor_ref
- flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}")
+ flavor_ref_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${flavor_ref}")
# Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values.
# Some resize instance in tempest tests depends on this.
@@ -264,7 +265,7 @@
# flavor selected as default, e.g. m1.small,
# we need to perform additional check.
#
- flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}")
+ flavor_ref_alt_size=$(openstack --os-cloud devstack-admin flavor show --format value --column disk "${f}")
if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then
continue
fi
@@ -285,10 +286,10 @@
# If NEUTRON_CREATE_INITIAL_NETWORKS is not true, there is no network created
# and the public_network_id should not be set.
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
- public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
+ public_network_id=$(openstack --os-cloud devstack-admin network show -f value -c id $PUBLIC_NETWORK_NAME)
# make sure shared network presence does not confuses the tempest tests
- openstack network create --share shared
- openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --share shared
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
fi
iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -419,6 +420,9 @@
iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False}
iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
+ # Starting Wallaby, nova sanitizes instance hostnames having freeform characters with dashes
+ iniset $TEMPEST_CONFIG compute-feature-enabled hostname_fqdn_sanitization True
+
if [[ -n "$NOVA_FILTERS" ]]; then
iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS}
fi
@@ -440,6 +444,8 @@
iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
+ iniset $TEMPEST_CONFIG enforce_scope neutron "$NEUTRON_ENFORCE_SCOPE"
+
# Scenario
SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
@@ -459,13 +465,6 @@
iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
# Volume
- # Set the service catalog entry for Tempest to run on. Typically
- # used to try different Volume API version targets. The tempest
- # default it to 'volumev3'(v3 APIs endpoint) , so only set this
- # if you want to change it.
- if [[ -n "$TEMPEST_VOLUME_TYPE" ]]; then
- iniset $TEMPEST_CONFIG volume catalog_type $TEMPEST_VOLUME_TYPE
- fi
# Only turn on TEMPEST_VOLUME_MANAGE_SNAPSHOT by default for "lvm" backends
if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then
TEMPEST_VOLUME_MANAGE_SNAPSHOT=${TEMPEST_VOLUME_MANAGE_SNAPSHOT:-True}
@@ -489,12 +488,6 @@
iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
- # Reset microversions to None where v2 is running which does not support microversion.
- # Both "None" means no microversion testing.
- if [[ "$TEMPEST_VOLUME_TYPE" == "volumev2" ]]; then
- tempest_volume_min_microversion=None
- tempest_volume_max_microversion=None
- fi
if [ "$tempest_volume_min_microversion" == "None" ]; then
inicomment $TEMPEST_CONFIG volume min_microversion
else
@@ -610,6 +603,19 @@
fi
done
+ # ``enforce_scope``
+ # If services enable the enforce_scope for their policy
+ # we need to enable the same on Tempest side so that
+ # test can be run with scoped token.
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope keystone true
+ iniset $TEMPEST_CONFIG auth admin_system 'all'
+ iniset $TEMPEST_CONFIG auth admin_project_name ''
+ fi
+ iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
+
+ iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+
if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
# libvirt-lxc does not support boot from volume or attaching volumes
# so basically anything with cinder is out of the question.
@@ -715,9 +721,6 @@
set_tempest_venv_constraints $tmp_u_c_m
tox -r --notest -efull
- # TODO: remove the trailing pip constraint when a proper fix
- # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
- $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt
# NOTE(mtreinish) Respect constraints in the tempest full venv, things that
# are using a tox job other than full will not be respecting constraints but
# running pip install -U on tempest requirements
diff --git a/openrc b/openrc
index beeaebe..6d488bb 100644
--- a/openrc
+++ b/openrc
@@ -74,7 +74,7 @@
fi
# Identity API version
-export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
+export OS_IDENTITY_API_VERSION=3
# Ask keystoneauth1 to use keystone
export OS_AUTH_TYPE=password
diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml
index bd64574..6b7ea37 100644
--- a/roles/apache-logs-conf/tasks/main.yaml
+++ b/roles/apache-logs-conf/tasks/main.yaml
@@ -64,6 +64,7 @@
'Debian': '/etc/apache2/sites-enabled/'
'Suse': '/etc/apache2/conf.d/'
'RedHat': '/etc/httpd/conf.d/'
+ 'openEuler': '/etc/httpd/conf.d/'
- name: Discover configurations
find:
diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst
new file mode 100644
index 0000000..400a8da
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/README.rst
@@ -0,0 +1,16 @@
+Verify the IPv6-only deployments
+
+This role needs to be invoked from a playbook that
+run tests. This role verifies the IPv6 setting on
+devstack side and devstack deploy services on IPv6.
+This role is invoked before tests are run so that
+if any missing IPv6 setting or deployments can fail
+the job early.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml
new file mode 100644
index 0000000..59d3b79
--- /dev/null
+++ b/roles/devstack-ipv6-only-deployments-verification/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Verify the ipv6-only deployments
+ become: true
+ become_user: stack
+ shell: "{{ devstack_base_dir }}/devstack/tools/verify-ipv6-only-deployments.sh"
diff --git a/setup.cfg b/setup.cfg
index 146f010..a4e621f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,11 +1,11 @@
[metadata]
name = DevStack
summary = OpenStack DevStack
-description-file =
+description_file =
README.rst
author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://docs.openstack.org/devstack/latest
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://docs.openstack.org/devstack/latest
classifier =
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
diff --git a/stack.sh b/stack.sh
index 163fc5b..0082b99 100755
--- a/stack.sh
+++ b/stack.sh
@@ -227,7 +227,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8"
+SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -278,6 +278,12 @@
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
+# TODO(wxy): Currently some base packages are not installed by default in
+# openEuler. Remove the code below once the packaged are installed by default
+# in the future.
+if [[ $DISTRO == "openEuler-20.03" ]]; then
+ install_package hostname
+fi
# Configure Distro Repositories
# -----------------------------
@@ -300,10 +306,18 @@
}
function _install_rdo {
- # NOTE(ianw) 2020-04-30 : when we have future branches, we
- # probably want to install the relevant branch RDO release as
- # well. But for now it's all master.
- sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ if [[ $DISTRO == "rhel8" ]]; then
+ if [[ "$TARGET_BRANCH" == "master" ]]; then
+ # rdo-release.el8.rpm points to latest RDO release, use that for master
+ sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ else
+ # For stable branches use corresponding release rpm
+ rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
+ sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ fi
+ elif [[ $DISTRO == "rhel9" ]]; then
+ sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo
+ fi
sudo dnf -y update
}
@@ -381,6 +395,10 @@
# RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
# Patch: https://github.com/rpm-software-management/dnf/pull/1448
echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
+elif [[ $DISTRO == "rhel9" ]]; then
+ sudo dnf config-manager --set-enabled crb
+ # rabbitmq and other packages are provided by RDO repositories.
+ _install_rdo
fi
# Ensure python is installed
@@ -597,6 +615,7 @@
source $TOP_DIR/lib/dstat
source $TOP_DIR/lib/tcpdump
source $TOP_DIR/lib/etcd3
+source $TOP_DIR/lib/os-vif
# Extras Source
# --------------
@@ -678,6 +697,8 @@
# Last chance for the database password. This must be handled here
# because read_password is not a library function.
read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
+
+ define_database_baseurl
else
echo "No database enabled"
fi
@@ -744,14 +765,28 @@
# Bring down global requirements before any use of pip_install. This is
# necessary to ensure that the constraints file is in place before we
# attempt to apply any constraints to pip installs.
-git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+# We always need the master branch in addition to any stable branch, so
+# override GIT_DEPTH here.
+GIT_DEPTH=0 git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
# Install package requirements
# Source it so the entire environment is available
echo_summary "Installing package prerequisites"
source $TOP_DIR/tools/install_prereqs.sh
-# Configure an appropriate Python environment
+# Configure an appropriate Python environment.
+#
+# NOTE(ianw) 2021-08-11 : We install the latest pip here because pip
+# is very active and changes are not generally reflected in the LTS
+# distros. This often involves important things like dependency or
+# conflict resolution, and has often been required because the
+# complicated constraints etc. used by openstack have tickled bugs in
+# distro versions of pip. We want to find these problems as they
+# happen, rather than years later when we try to update our LTS
+# distro. Whilst it is clear that global installations of upstream
+# pip are less and less common, with virtualenv's being the general
+# approach now; there are a lot of devstack plugins that assume a
+# global install environment.
if [[ "$OFFLINE" != "True" ]]; then
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
fi
@@ -859,7 +894,7 @@
install_keystonemiddleware
if is_service_enabled keystone; then
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
stack_install_service keystone
configure_keystone
fi
@@ -1046,37 +1081,18 @@
# Keystone
# --------
-# Rather than just export these, we write them out to a
-# intermediate userrc file that can also be used to debug if
-# something goes wrong between here and running
-# tools/create_userrc.sh (this script relies on services other
-# than keystone being available, so we can't call it right now)
-cat > $TOP_DIR/userrc_early <<EOF
-# Use this for debugging issues before files in accrc are created
-
-# Set up password auth credentials now that Keystone is bootstrapped
-export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
-export OS_USERNAME=admin
-export OS_USER_DOMAIN_ID=default
-export OS_PASSWORD=$ADMIN_PASSWORD
-export OS_PROJECT_NAME=admin
-export OS_PROJECT_DOMAIN_ID=default
-export OS_REGION_NAME=$KEYSTONE_REGION_NAME
-
-EOF
-
if is_service_enabled tls-proxy; then
- echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
fi
-source $TOP_DIR/userrc_early
+# Write a clouds.yaml file and use the devstack-admin cloud
+write_clouds_yaml
+export OS_CLOUD=${OS_CLOUD:-devstack-admin}
if is_service_enabled keystone; then
echo_summary "Starting Keystone"
- if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+ if [ "$KEYSTONE_SERVICE_HOST" == "$SERVICE_HOST" ]; then
init_keystone
start_keystone
bootstrap_keystone
@@ -1101,9 +1117,6 @@
fi
-# Write a clouds.yaml file
-write_clouds_yaml
-
# Horizon
# -------
@@ -1159,6 +1172,11 @@
sudo sysctl -w net.ipv4.ip_forward=1
fi
+# os-vif
+# ------
+if is_service_enabled nova neutron; then
+ configure_os_vif
+fi
# Storage Service
# ---------------
@@ -1341,6 +1359,7 @@
done
fi
+async_wait create_flavors
if is_service_enabled horizon; then
echo_summary "Starting Horizon"
@@ -1348,8 +1367,6 @@
start_horizon
fi
-async_wait create_flavors
-
# Create account rc files
# =======================
@@ -1359,7 +1376,7 @@
# which is helpful in image bundle steps.
if is_service_enabled nova && is_service_enabled keystone; then
- USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+ USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc --os-password $ADMIN_PASSWORD"
if [ -f $SSL_BUNDLE_FILE ]; then
USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
diff --git a/stackrc b/stackrc
index 196f61f..e48fd81 100644
--- a/stackrc
+++ b/stackrc
@@ -72,8 +72,10 @@
ENABLED_SERVICES+=,g-api
# Cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol
+ # OVN
+ ENABLED_SERVICES+=,ovn-controller,ovn-northd,ovs-vswitchd,ovsdb-server
# Neutron
- ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+ ENABLED_SERVICES+=,q-svc,q-ovn-metadata-agent
# Dashboard
ENABLED_SERVICES+=,horizon
# Additional services
@@ -173,21 +175,9 @@
export PS4='+ $(short_source): '
fi
-# Configure Identity API version: 2.0, 3
-IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
-
-# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack
-# deployment will be deploying the Identity v2 pipelines. If this option is set
-# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to
-# skip Identity v2 specific tests; and iii) configure Horizon to use Identity
-# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION
-# will to be set to ``3`` in order to make DevStack register the Identity
-# endpoint as v3. This flag is experimental and will be used as basis to
-# identify the projects which still have issues to operate with Identity v3.
-ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
-if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
- IDENTITY_API_VERSION=3
-fi
+# Configure Identity API version
+# TODO(frickler): Drop this when plugins no longer need it
+IDENTITY_API_VERSION=3
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
@@ -245,7 +235,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="xena"
+DEVSTACK_SERIES="yoga"
##############
#
@@ -413,6 +403,10 @@
GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH}
+# oslo.limit
+GITREPO["oslo.limit"]=${OSLOLIMIT_REPO:-${GIT_BASE}/openstack/oslo.limit.git}
+GITBRANCH["oslo.limit"]=${OSLOLIMIT_BRANCH:-$TARGET_BRANCH}
+
# oslo.log
GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH}
@@ -546,6 +540,10 @@
GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH}
GITDIR["neutron-lib"]=$DEST/neutron-lib
+# os-resource-classes library containing a list of standardized resource classes for OpenStack
+GITREPO["os-resource-classes"]=${OS_RESOURCE_CLASSES_REPO:-${GIT_BASE}/openstack/os-resource-classes.git}
+GITBRANCH["os-resource-classes"]=${OS_RESOURCE_CLASSES_BRANCH:-$TARGET_BRANCH}
+
# os-traits library for resource provider traits in the placement service
GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git}
GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH}
@@ -596,8 +594,8 @@
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/novnc.git}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.3.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -617,7 +615,8 @@
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
- LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none}
+ LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-custom}
+ LIBVIRT_CPU_MODEL=${LIBVIRT_CPU_MODEL:-Nehalem}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
@@ -664,7 +663,7 @@
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
-CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
+CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 5b53389..839e3a1 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -44,7 +44,8 @@
ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive"
ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
-ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken"
+ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken os-resource-classes"
+ALL_LIBS+=" oslo.limit"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh
index b2bc0a2..71d8d51 100755
--- a/tests/test_write_devstack_local_conf_role.sh
+++ b/tests/test_write_devstack_local_conf_role.sh
@@ -6,4 +6,4 @@
source $TOP/functions
source $TOP/tests/unittest.sh
-python ./roles/write-devstack-local-conf/library/test.py
+${PYTHON} $TOP/roles/write-devstack-local-conf/library/test.py
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 3703ece..fced2ab 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -17,6 +17,8 @@
PASS=0
FAILED_FUNCS=""
+export PYTHON=$(which python3 2>/dev/null)
+
# pass a test, printing out MSG
# usage: passed message
function passed {
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
deleted file mode 100644
index 8ee551b..0000000
--- a/tools/cap-pip.txt
+++ /dev/null
@@ -1 +0,0 @@
-pip<20.3
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 25f7268..f24ac40 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -26,76 +26,6 @@
FILES=$TOP_DIR/files
fi
-# Keystone Port Reservation
-# -------------------------
-# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
-# being used as ephemeral ports by the system. The default(s) are 35357 and
-# 35358 which are in the Linux defined ephemeral port range (in disagreement
-# with the IANA ephemeral port range). This is a workaround for bug #1253482
-# where Keystone will try and bind to the port and the port will already be
-# in use as an ephemeral port by another process. This places an explicit
-# exception into the Kernel for the Keystone AUTH ports.
-function fixup_keystone {
- keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-
- # Only do the reserved ports when available, on some system (like containers)
- # where it's not exposed we are almost pretty sure these ports would be
- # exclusive for our DevStack.
- if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
- # Get any currently reserved ports, strip off leading whitespace
- reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
-
- if [[ -z "${reserved_ports}" ]]; then
- # If there are no currently reserved ports, reserve the keystone ports
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
- else
- # If there are currently reserved ports, keep those and also reserve the
- # Keystone specific ports. Duplicate reservations are merged into a single
- # reservation (or range) automatically by the kernel.
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
- fi
- else
- echo_summary "WARNING: unable to reserve keystone ports"
- fi
-}
-
-# Ubuntu Repositories
-#--------------------
-# Enable universe for bionic since it is missing when installing from ISO.
-function fixup_ubuntu {
- if [[ "$DISTRO" != "bionic" ]]; then
- return
- fi
-
- # This pulls in apt-add-repository
- install_package "software-properties-common"
-
- # Enable universe
- sudo add-apt-repository -y universe
-
- if [[ -f /etc/ci/mirror_info.sh ]] ; then
- # If we are on a nodepool provided host and it has told us about
- # where we can find local mirrors then use that mirror.
- source /etc/ci/mirror_info.sh
- sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main"
- else
- # Enable UCA:ussuri for updated versions of QEMU and libvirt
- sudo add-apt-repository -y cloud-archive:ussuri
- fi
- REPOS_UPDATED=False
- apt_get_update
-
- # Since pip10, pip will refuse to uninstall files from packages
- # that were created with distutils (rather than more modern
- # setuptools). This is because it technically doesn't have a
- # manifest of what to remove. However, in most cases, simply
- # overwriting works. So this hacks around those packages that
- # have been dragged in by some other system dependency
- sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info
- sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
- sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
-}
-
# Python Packages
# ---------------
@@ -143,6 +73,21 @@
# overwriting works. So this hacks around those packages that
# have been dragged in by some other system dependency
sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info
+
+ # After updating setuptools based on the requirements, the files from the
+ # python3-setuptools RPM are deleted, it breaks some tools such as semanage
+ # (used in diskimage-builder) that use the -s flag of the python
+ # interpreter, enforcing the use of the packages from /usr/lib.
+ # Importing setuptools/pkg_resources in a such environment fails.
+ # Enforce the package re-installation to fix those applications.
+ if is_package_installed python3-setuptools; then
+ sudo dnf reinstall -y python3-setuptools
+ fi
+ # Workaround CentOS 8-stream iputils and systemd Bug
+ # https://bugzilla.redhat.com/show_bug.cgi?id=2037807
+ if [[ $os_VENDOR == "CentOSStream" && $os_RELEASE -eq 8 ]]; then
+ sudo sysctl -w net.ipv4.ping_group_range='0 2147483647'
+ fi
}
function fixup_suse {
@@ -192,9 +137,48 @@
yum_install centos-release-openstack-victoria
}
+function fixup_ubuntu {
+ if ! is_ubuntu; then
+ return
+ fi
+
+ # Since pip10, pip will refuse to uninstall files from packages
+ # that were created with distutils (rather than more modern
+ # setuptools). This is because it technically doesn't have a
+ # manifest of what to remove. However, in most cases, simply
+ # overwriting works. So this hacks around those packages that
+ # have been dragged in by some other system dependency
+ sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
+}
+
+function fixup_openeuler {
+ if ! is_openeuler; then
+ return
+ fi
+
+ if is_arch "x86_64"; then
+ arch="x86_64"
+ elif is_arch "aarch64"; then
+ arch="aarch64"
+ fi
+
+ # Some packages' version in openEuler are too old, use the newer ones we
+ # provide in oepkg. (oepkg is an openEuler third part yum repo which is
+ # endorsed by openEuler community)
+ (echo '[openstack-ci]'
+ echo 'name=openstack'
+ echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/'
+ echo 'enabled=1'
+ echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null
+
+ yum_install liberasurecode-devel
+}
+
function fixup_all {
- fixup_keystone
fixup_ubuntu
fixup_fedora
fixup_suse
+ fixup_openeuler
}
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 9afd2e5..e9c52ea 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -38,7 +38,7 @@
# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip
PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
-LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
+PIP_GET_PIP36_URL=${PIP_GET_PIP36_URL:-"https://bootstrap.pypa.io/pip/3.6/get-pip.py"}
GetDistro
echo "Distro: $DISTRO"
@@ -46,25 +46,32 @@
function get_versions {
# FIXME(dhellmann): Deal with multiple python versions here? This
# is just used for reporting, so maybe not?
- PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true)
+ PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || which pip3 2>/dev/null || true)
if [[ -n $PIP ]]; then
PIP_VERSION=$($PIP --version | awk '{ print $2}')
echo "pip: $PIP_VERSION"
else
echo "pip: Not Installed"
fi
- # Show python3 module version
- python${PYTHON3_VERSION} -m pip --version
}
function install_get_pip {
+ if [[ "$PYTHON3_VERSION" = "3.6" ]]; then
+ _pip_url=$PIP_GET_PIP36_URL
+ _local_pip="$FILES/$(basename $_pip_url)-py36"
+ else
+ _pip_url=$PIP_GET_PIP_URL
+ _local_pip="$FILES/$(basename $_pip_url)"
+ fi
+
+
# If get-pip.py isn't python, delete it. This was probably an
# outage on the server.
- if [[ -r $LOCAL_PIP ]]; then
- if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then
- echo "WARNING: Corrupt $LOCAL_PIP found removing"
- rm $LOCAL_PIP
+ if [[ -r $_local_pip ]]; then
+ if ! head -1 $_local_pip | grep -q '#!/usr/bin/env python'; then
+ echo "WARNING: Corrupt $_local_pip found removing"
+ rm $_local_pip
fi
fi
@@ -78,22 +85,20 @@
# Thus we use curl's "-z" feature to always check the modified
# since and only download if a new version is out -- but only if
# it seems we downloaded the file originally.
- if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
+ if [[ ! -r $_local_pip || -r $_local_pip.downloaded ]]; then
# only test freshness if LOCAL_PIP is actually there,
# otherwise we generate a scary warning.
local timecond=""
- if [[ -r $LOCAL_PIP ]]; then
- timecond="-z $LOCAL_PIP"
+ if [[ -r $_local_pip ]]; then
+ timecond="-z $_local_pip"
fi
curl -f --retry 6 --retry-delay 5 \
- $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+ $timecond -o $_local_pip $_pip_url || \
die $LINENO "Download of get-pip.py failed"
- touch $LOCAL_PIP.downloaded
+ touch $_local_pip.downloaded
fi
- # TODO: remove the trailing pip constraint when a proper fix
- # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
- sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
+ sudo -H -E python${PYTHON3_VERSION} $_local_pip
}
@@ -115,14 +120,6 @@
}
-# Setuptools 8 implements PEP 440, and 8.0.4 adds a warning triggered any time
-# pkg_resources inspects the list of installed Python packages if there are
-# non-compliant version numbers in the egg-info (for example, from distro
-# system packaged Python libraries). This is off by default after 8.2 but can
-# be enabled by uncommenting the lines below.
-#PYTHONWARNINGS=$PYTHONWARNINGS,always::RuntimeWarning:pkg_resources
-#export PYTHONWARNINGS
-
# Show starting versions
get_versions
@@ -130,24 +127,23 @@
configure_pypi_alternative_url
fi
-# Just use system pkgs on Focal
-if [[ "$DISTRO" == focal ]]; then
- exit 0
+if is_fedora && [[ ${DISTRO} == f* || ${DISTRO} == rhel9 ]]; then
+ # get-pip.py will not install over the python3-pip package in
+ # Fedora 34 any more.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1988935
+ # https://github.com/pypa/pip/issues/9904
+ # You can still install using get-pip.py if python3-pip is *not*
+ # installed; this *should* remain separate under /usr/local and not break
+ # if python3-pip is later installed.
+ # For general sanity, we just use the packaged pip. It should be
+ # recent enough anyway. This is included via rpms/general
+ : # Simply fall through
+elif is_ubuntu; then
+ : # pip on Ubuntu 20.04 is new enough, too
+else
+ install_get_pip
fi
-# Eradicate any and all system packages
-
-# Python in fedora/suse depends on the python-pip package so removing it
-# results in a nonfunctional system. pip on fedora installs to /usr so pip
-# can safely override the system pip for all versions of fedora
-if ! is_fedora && ! is_suse; then
- if is_package_installed python3-pip ; then
- uninstall_package python3-pip
- fi
-fi
-
-install_get_pip
-
set -x
# Note setuptools is part of requirements.txt and we want to make sure
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index e91464f..0212d00 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -27,7 +27,7 @@
}
CN=$1
-if [ -z "$CN" ]]; then
+if [ -z "$CN" ]; then
usage
fi
ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME}
@@ -52,5 +52,5 @@
make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME
# Create a cert bundle
-cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
-
+cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
+ $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 7be995e..74dcdb2 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -65,7 +65,7 @@
def _read_clouds(self):
try:
with open(self._clouds_path) as clouds_file:
- self._clouds = yaml.load(clouds_file)
+ self._clouds = yaml.safe_load(clouds_file)
except IOError:
# The user doesn't have a clouds.yaml file.
print("The user clouds.yaml file didn't exist.")
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
new file mode 100755
index 0000000..2596395
--- /dev/null
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+#
+#
+# NOTE(gmann): This script is used in 'devstack-tempest-ipv6' zuul job to verify that
+# services are deployed on IPv6 properly or not. This will capture if any devstck or devstack
+# plugins are missing the required setting to listen on IPv6 address. This is run as part of
+# run phase of zuul job and before test run. Child job of 'devstack-tempest-ipv6'
+# can expand the IPv6 verification specific to project by defining the new post-run script which
+# will run along with this base script.
+# If there are more common verification for IPv6 then we can always extent this script.
+
+# Keep track of the DevStack directory
+TOP_DIR=$(cd $(dirname "$0")/../../devstack && pwd)
+source $TOP_DIR/stackrc
+source $TOP_DIR/openrc admin admin
+
+function verify_devstack_ipv6_setting {
+ local _service_host=''
+ _service_host=$(echo $SERVICE_HOST | tr -d [])
+ local _host_ipv6=''
+ _host_ipv6=$(echo $HOST_IPV6 | tr -d [])
+ local _service_listen_address=''
+ _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
+ local _service_local_host=''
+ _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+ if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
+ echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
+ exit 1
+ fi
+ is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
+ if [[ "$is_service_host_ipv6" != "True" ]]; then
+ echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
+ if [[ "$is_host_ipv6" != "True" ]]; then
+ echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
+ if [[ "$is_service_listen_address" != "True" ]]; then
+ echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
+ if [[ "$is_service_local_host" != "True" ]]; then
+ echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ exit 1
+ fi
+ echo "Devstack is properly configured with IPv6"
+ echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+}
+
+function sanity_check_system_ipv6_enabled {
+ system_ipv6_enabled=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_ipv6_enabled())')
+ if [[ $system_ipv6_enabled != "True" ]]; then
+ echo "IPv6 is disabled in system"
+ exit 1
+ fi
+ echo "IPv6 is enabled in system"
+}
+
+function verify_service_listen_address_is_ipv6 {
+ local endpoints_verified=False
+ local all_ipv6=True
+ endpoints=$(openstack endpoint list -f value -c URL)
+ for endpoint in ${endpoints}; do
+ local endpoint_address=''
+ endpoint_address=$(echo "$endpoint" | awk -F/ '{print $3}' | awk -F] '{print $1}')
+ endpoint_address=$(echo $endpoint_address | tr -d [])
+ local is_endpoint_ipv6=''
+ is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
+ if [[ "$is_endpoint_ipv6" != "True" ]]; then
+ all_ipv6=False
+ echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+ continue
+ fi
+ endpoints_verified=True
+ done
+ if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then
+ exit 1
+ fi
+ echo "All services deployed by devstack is on IPv6 endpoints"
+ echo $endpoints
+}
+
+#First thing to verify if system has IPv6 enabled or not
+sanity_check_system_ipv6_enabled
+#Verify whether devstack is configured properly with IPv6 setting
+verify_devstack_ipv6_setting
+#Get all registrfed endpoints by devstack in keystone and verify that each endpoints address is IPv6.
+verify_service_listen_address_is_ipv6
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 22770f1..e292173 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -134,7 +134,7 @@
def ebtables_dump():
- tables = ['filter', 'nat', 'broute']
+ tables = ['filter', 'nat']
_header("EB Tables Dump")
if not _find_cmd('ebtables'):
return
diff --git a/tox.ini b/tox.ini
index ed28636..ec764ab 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-minversion = 1.6
+minversion = 3.18.0
skipsdist = True
envlist = bashate
@@ -13,7 +13,7 @@
# modified bashate tree
deps =
{env:BASHATE_INSTALL_PATH:bashate==2.0.0}
-whitelist_externals = bash
+allowlist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
-not \( -type d -name doc -prune \) \
@@ -34,8 +34,10 @@
-print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
-deps = -r{toxinidir}/doc/requirements.txt
-whitelist_externals = bash
+deps =
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/doc/requirements.txt
+allowlist_externals = bash
setenv =
TOP_DIR={toxinidir}
commands =
@@ -43,7 +45,7 @@
[testenv:pdf-docs]
deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
make
commands =
sphinx-build -W -b latex doc/source doc/build/pdf
diff --git a/unstack.sh b/unstack.sh
index d9dca7c..4b57b6e 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -176,10 +176,6 @@
# enabled backends. So if Cinder is enabled, and installed successfully we are
# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
if is_service_enabled cinder && is_package_installed lvm2; then
- # Using /bin/true here indicates a BUG - maybe the
- # DEFAULT_VOLUME_GROUP_NAME doesn't exist? We should
- # isolate this further down in lib/cinder cleanup.
- clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
clean_lvm_filter
fi