Merge "Add manila service to configuration"
diff --git a/.gitignore b/.gitignore
index 8fe56ad..ad153f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,3 +38,5 @@
userrc_early
AUTHORS
ChangeLog
+tools/dbcounter/build/
+tools/dbcounter/dbcounter.egg-info/
diff --git a/.zuul.yaml b/.zuul.yaml
index 37625f3..2fbfa04 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,18 +1,18 @@
- nodeset:
- name: openstack-single-node
+ name: openstack-single-node-jammy
nodes:
- name: controller
- label: ubuntu-xenial
+ label: ubuntu-jammy
groups:
- name: tempest
nodes:
- controller
- nodeset:
- name: openstack-single-node-jammy
+ name: openstack-single-node-noble
nodes:
- name: controller
- label: ubuntu-jammy
+ label: ubuntu-noble
groups:
- name: tempest
nodes:
@@ -39,26 +39,6 @@
- controller
- nodeset:
- name: openstack-single-node-xenial
- nodes:
- - name: controller
- label: ubuntu-xenial
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
- name: devstack-single-node-centos-7
- nodes:
- - name: controller
- label: centos-7
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
name: devstack-single-node-centos-9-stream
nodes:
- name: controller
@@ -79,15 +59,18 @@
- controller
- nodeset:
- name: devstack-single-node-fedora-latest
+ name: devstack-single-node-debian-bookworm
nodes:
- name: controller
- label: fedora-36
+ label: debian-bookworm
groups:
- name: tempest
nodes:
- controller
+# Note(sean-k-mooney): this is still used by horizon for
+# horizon-integration-tests, horizon-integration-pytest and
+# horizon-ui-pytest, remove when horizon is updated.
- nodeset:
name: devstack-single-node-debian-bullseye
nodes:
@@ -109,46 +92,6 @@
- controller
- nodeset:
- name: devstack-single-node-openeuler-22.03
- nodes:
- - name: controller
- label: openEuler-22-03-LTS
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
- name: openstack-two-node
- nodes:
- - name: controller
- label: ubuntu-xenial
- - name: compute1
- label: ubuntu-xenial
- groups:
- # Node where tests are executed and test results collected
- - name: tempest
- nodes:
- - controller
- # Nodes running the compute service
- - name: compute
- nodes:
- - controller
- - compute1
- # Nodes that are not the controller
- - name: subnode
- nodes:
- - compute1
- # Switch node for multinode networking setup
- - name: switch
- nodes:
- - controller
- # Peer nodes for multinode networking setup
- - name: peers
- nodes:
- - compute1
-
-- nodeset:
name: openstack-two-node-centos-9-stream
nodes:
- name: controller
@@ -209,6 +152,36 @@
- compute1
- nodeset:
+ name: openstack-two-node-noble
+ nodes:
+ - name: controller
+ label: ubuntu-noble
+ - name: compute1
+ label: ubuntu-noble
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
name: openstack-two-node-focal
nodes:
- name: controller
@@ -269,36 +242,6 @@
- compute1
- nodeset:
- name: openstack-two-node-xenial
- nodes:
- - name: controller
- label: ubuntu-xenial
- - name: compute1
- label: ubuntu-xenial
- groups:
- # Node where tests are executed and test results collected
- - name: tempest
- nodes:
- - controller
- # Nodes running the compute service
- - name: compute
- nodes:
- - controller
- - compute1
- # Nodes that are not the controller
- - name: subnode
- nodes:
- - compute1
- # Switch node for multinode networking setup
- - name: switch
- nodes:
- - controller
- # Peer nodes for multinode networking setup
- - name: peers
- nodes:
- - compute1
-
-- nodeset:
name: openstack-three-node-focal
nodes:
- name: controller
@@ -430,6 +373,7 @@
/var/log/mysql: logs
/var/log/libvirt: logs
/etc/libvirt: logs
+ /etc/lvm: logs
/etc/sudoers: logs
/etc/sudoers.d: logs
'{{ stage_dir }}/iptables.txt': logs
@@ -488,6 +432,8 @@
- ^releasenotes/.*$
# Translations
- ^.*/locale/.*po$
+ # pre-commit config
+ - ^.pre-commit-config.yaml$
- job:
name: devstack-minimal
@@ -495,7 +441,7 @@
description: |
Minimal devstack base job, intended for use by jobs that need
less than the normal minimum set of required-projects.
- nodeset: openstack-single-node-jammy
+ nodeset: openstack-single-node-noble
required-projects:
- opendev.org/openstack/requirements
vars:
@@ -512,6 +458,7 @@
file_tracker: true
mysql: true
rabbit: true
+ openstack-cli-server: true
group-vars:
subnode:
devstack_services:
@@ -519,6 +466,7 @@
dstat: false
memory_tracker: true
file_tracker: true
+ openstack-cli-server: true
devstack_localrc:
# Multinode specific settings
HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -564,9 +512,17 @@
- opendev.org/openstack/nova
- opendev.org/openstack/placement
- opendev.org/openstack/swift
+ - opendev.org/openstack/os-test-images
timeout: 7200
vars:
- configure_swap_size: 4096
+ # based on observation of the integrated gate
+ # tempest-integrated-compute was only using ~1.7GB of swap
+ # when zswap and the host turning are enabled that increase
+ # slightly to ~2GB. we are setting the swap size to 8GB to
+ # be safe and account for more complex scenarios.
+ # we should revisit this value after some time to see if we
+ # can reduce it.
+ configure_swap_size: 8192
devstack_localrc:
# Common OpenStack services settings
SWIFT_REPLICAS: 1
@@ -575,6 +531,26 @@
DEBUG_LIBVIRT_COREDUMPS: true
NOVA_VNC_ENABLED: true
OVN_DBS_LOG_LEVEL: dbg
+ # tune the host to optimize memory usage and hide io latency
+ # these setting will configure the kernel to treat the host page
+ # cache and swap with equal priority, and prefer deferring writes
+ # changing the default swappiness, dirty_ratio and
+ # the vfs_cache_pressure
+ ENABLE_SYSCTL_MEM_TUNING: true
+ # the net tuning optimizes ipv4 tcp fast open and config the default
+ # qdisk policy to pfifo_fast which effectively disable all qos.
+ # this minimizes the cpu load of the host network stack
+ ENABLE_SYSCTL_NET_TUNING: true
+ # zswap allows the kernel to compress pages in memory before swapping
+ # them to disk. this can reduce the amount of swap used and improve
+ # performance. effectively this trades a small amount of cpu for an
+ # increase in swap performance by reducing the amount of data
+ # written to disk. the overall speedup is proportional to the
+ # compression ratio and the speed of the swap device.
+ # NOTE: this option is ignored when not using nova with the libvirt
+ # virt driver.
+ NOVA_LIBVIRT_TB_CACHE_SIZE: 128
+ ENABLE_ZSWAP: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -667,6 +643,26 @@
Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
NOVA_VNC_ENABLED: true
ENABLE_CHASSIS_AS_GW: false
+ # tune the host to optimize memory usage and hide io latency
+ # these setting will configure the kernel to treat the host page
+ # cache and swap with equal priority, and prefer deferring writes
+ # changing the default swappiness, dirty_ratio and
+ # the vfs_cache_pressure
+ ENABLE_SYSCTL_MEM_TUNING: true
+ # the net tuning optimizes ipv4 tcp fast open and config the default
+ # qdisk policy to pfifo_fast which effectively disable all qos.
+ # this minimizes the cpu load of the host network stack
+ ENABLE_SYSCTL_NET_TUNING: true
+ # zswap allows the kernel to compress pages in memory before swapping
+ # them to disk. this can reduce the amount of swap used and improve
+ # performance. effectivly this trades a small amount of cpu for an
+ # increase in swap performance by reducing the amount of data
+ # written to disk. the overall speedup is porportional to the
+ # compression ratio and the speed of the swap device.
+ ENABLE_ZSWAP: true
+ # NOTE: this option is ignored when not using nova with the libvirt
+ # virt driver.
+ NOVA_LIBVIRT_TB_CACHE_SIZE: 128
- job:
name: devstack-ipv6
@@ -692,13 +688,10 @@
- job:
name: devstack-multinode
parent: devstack
- nodeset: openstack-two-node-jammy
+ nodeset: openstack-two-node-noble
description: |
Simple multinode test to verify multinode functionality on devstack side.
This is not meant to be used as a parent job.
- vars:
- devstack_localrc:
- MYSQL_REDUCE_MEMORY: true
# NOTE(ianw) Platform tests have traditionally been non-voting because
# we often have to rush things through devstack to stabilise the gate,
@@ -710,15 +703,13 @@
description: CentOS 9 Stream platform test
nodeset: devstack-single-node-centos-9-stream
timeout: 9000
- # TODO(kopecmartin) n-v until the following is resolved:
- # https://bugs.launchpad.net/neutron/+bug/1979047
voting: false
- job:
- name: devstack-platform-debian-bullseye
+ name: devstack-platform-debian-bookworm
parent: tempest-full-py3
- description: Debian Bullseye platform test
- nodeset: devstack-single-node-debian-bullseye
+ description: Debian Bookworm platform test
+ nodeset: devstack-single-node-debian-bookworm
timeout: 9000
vars:
configure_swap_size: 4096
@@ -729,89 +720,40 @@
description: Rocky Linux 9 Blue Onyx platform test
nodeset: devstack-single-node-rockylinux-9
timeout: 9000
+ # NOTE(danms): This has been failing lately with some repository metadata
+ # errors. We're marking this as non-voting until it appears to have
+ # stabilized:
+ # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0
+ voting: false
vars:
configure_swap_size: 4096
- job:
- name: devstack-platform-ubuntu-focal
+ name: devstack-platform-ubuntu-jammy
parent: tempest-full-py3
- description: Ubuntu 20.04 LTS (focal) platform test
- nodeset: openstack-single-node-focal
- timeout: 9000
-
-- job:
- name: devstack-platform-ubuntu-jammy-ovn-source
- parent: devstack-platform-ubuntu-jammy
- description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
- voting: false
- vars:
- devstack_localrc:
- OVN_BUILD_FROM_SOURCE: True
- OVN_BRANCH: "v21.06.0"
- OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
- OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
-
-- job:
- name: devstack-platform-ubuntu-jammy-ovs
- parent: tempest-full-py3
- description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+ description: Ubuntu 22.04 LTS (Jammy) platform test
nodeset: openstack-single-node-jammy
- voting: false
timeout: 9000
vars:
configure_swap_size: 8192
- devstack_localrc:
- Q_AGENT: openvswitch
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
- Q_ML2_TENANT_NETWORK_TYPE: vxlan
- devstack_services:
- # Disable OVN services
- ovn-northd: false
- ovn-controller: false
- ovs-vswitchd: false
- ovsdb-server: false
- # Disable Neutron ML2/OVN services
- q-ovn-metadata-agent: false
- # Enable Neutron ML2/OVS services
- q-agt: true
- q-dhcp: true
- q-l3: true
- q-meta: true
- q-metering: true
- group-vars:
- subnode:
- devstack_services:
- # Disable OVN services
- ovn-controller: false
- ovs-vswitchd: false
- ovsdb-server: false
- # Disable Neutron ML2/OVN services
- q-ovn-metadata-agent: false
- # Enable Neutron ML2/OVS services
- q-agt: true
- job:
- name: devstack-platform-openEuler-22.03-ovn-source
- parent: tempest-full-py3
- description: openEuler 22.03 LTS platform test (OVN)
- nodeset: devstack-single-node-openeuler-22.03
+ name: devstack-platform-ubuntu-noble-ovn-source
+ parent: devstack-platform-ubuntu-noble
+ description: Ubuntu 24.04 LTS (noble) platform test (OVN from source)
voting: false
- timeout: 9000
vars:
- configure_swap_size: 4096
devstack_localrc:
- # NOTE(wxy): OVN package is not supported by openEuler yet. Build it
- # from source instead.
OVN_BUILD_FROM_SOURCE: True
OVN_BRANCH: "v21.06.0"
OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
- job:
- name: devstack-platform-openEuler-22.03-ovs
+ name: devstack-platform-ubuntu-noble-ovs
parent: tempest-full-py3
- description: openEuler 22.03 LTS platform test (OVS)
- nodeset: devstack-single-node-openeuler-22.03
+ description: Ubuntu 24.04 LTS (noble) platform test (OVS)
+ nodeset: openstack-single-node-noble
voting: false
timeout: 9000
vars:
@@ -858,23 +800,6 @@
tls-proxy: false
- job:
- name: devstack-platform-fedora-latest
- parent: tempest-full-py3
- description: Fedora latest platform test
- nodeset: devstack-single-node-fedora-latest
- voting: false
-
-- job:
- name: devstack-platform-fedora-latest-virt-preview
- parent: tempest-full-py3
- description: Fedora latest platform test using the virt-preview repo.
- nodeset: devstack-single-node-fedora-latest
- voting: false
- vars:
- devstack_localrc:
- ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
-
-- job:
name: devstack-tox-base
parent: devstack
description: |
@@ -930,7 +855,7 @@
- job:
name: devstack-unit-tests
- nodeset: ubuntu-jammy
+ nodeset: ubuntu-noble
description: |
Runs unit tests on devstack project.
@@ -947,15 +872,12 @@
- devstack
- devstack-ipv6
- devstack-enforce-scope
- - devstack-platform-fedora-latest
- devstack-platform-centos-9-stream
- - devstack-platform-debian-bullseye
+ - devstack-platform-debian-bookworm
- devstack-platform-rocky-blue-onyx
- - devstack-platform-ubuntu-focal
- - devstack-platform-ubuntu-jammy-ovn-source
- - devstack-platform-ubuntu-jammy-ovs
- - devstack-platform-openEuler-22.03-ovn-source
- - devstack-platform-openEuler-22.03-ovs
+ - devstack-platform-ubuntu-noble-ovn-source
+ - devstack-platform-ubuntu-noble-ovs
+ - devstack-platform-ubuntu-jammy
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
@@ -969,10 +891,6 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-linuxbridge-tempest:
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- neutron-ovn-tempest-ovs-release:
voting: false
irrelevant-files:
@@ -999,11 +917,11 @@
jobs:
- devstack
- devstack-ipv6
- # TODO(kopecmartin) n-v until the following is resolved:
- # https://bugs.launchpad.net/neutron/+bug/1979047
- # - devstack-platform-centos-9-stream
- - devstack-platform-debian-bullseye
- - devstack-platform-ubuntu-focal
+ - devstack-platform-debian-bookworm
+ - devstack-platform-ubuntu-noble
+ # NOTE(danms): Disabled due to instability, see comment in the job
+ # definition above.
+ # - devstack-platform-rocky-blue-onyx
- devstack-enforce-scope
- devstack-multinode
- devstack-unit-tests
@@ -1012,10 +930,6 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-linuxbridge-tempest:
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- ironic-tempest-bios-ipmi-direct-tinyipa
- swift-dsvm-functional
- grenade:
@@ -1039,30 +953,16 @@
# pruned.
#
# * nova-next: maintained by nova for unreleased/undefaulted
- # things
- # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
- # when neutron-api is served by uwsgi, it's in exprimental for testing.
- # the next cycle we can remove this job if things turn out to be
- # stable enough.
- # * neutron-functional-with-uwsgi: maintained by neutron for functional
- # test. Next cycle we can remove this one if things turn out to be
- # stable engouh with uwsgi.
- # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
- # Next cycle we can remove this if everything run out stable enough.
- # * nova-multi-cell: maintained by nova and currently non-voting in the
+ # things, this job is not experimental but often is used to test
+ # things that are not yet production ready or to test what will be
+ # the new default after a deprecation period has ended.
+ # * nova-multi-cell: maintained by nova and now is voting in the
# check queue for nova changes but relies on devstack configuration
- # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood
- # for Nova to allow early testing of the latest versions of Libvirt and
- # QEMU. Should only graduate out of experimental if it ever moves into
- # the check queue for Nova.
experimental:
jobs:
- nova-multi-cell
- nova-next
- - neutron-fullstack-with-uwsgi
- - neutron-functional-with-uwsgi
- - neutron-tempest-with-uwsgi
- devstack-plugin-ceph-tempest-py3:
irrelevant-files:
- ^.*\.rst$
@@ -1083,12 +983,15 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - devstack-platform-fedora-latest-virt-preview
- devstack-no-tls-proxy
periodic:
jobs:
- devstack-no-tls-proxy
periodic-weekly:
jobs:
- - devstack-platform-openEuler-22.03-ovn-source
- - devstack-platform-openEuler-22.03-ovs
+ - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bookworm
+ - devstack-platform-rocky-blue-onyx
+ - devstack-platform-ubuntu-noble-ovn-source
+ - devstack-platform-ubuntu-noble-ovs
+ - devstack-platform-ubuntu-jammy
diff --git a/README.rst b/README.rst
index f3a585a..86b85da 100644
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@
Goals
=====
-* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora
+* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux
environment
* To describe working configurations of OpenStack (which code branches
work together? what do config files look like for those branches?)
@@ -28,9 +28,9 @@
The DevStack master branch generally points to trunk versions of OpenStack
components. For older, stable versions, look for branches named
stable/[release] in the DevStack repo. For example, you can do the
-following to create a Pike OpenStack cloud::
+following to create a Zed OpenStack cloud::
- git checkout stable/pike
+ git checkout stable/zed
./stack.sh
You can also pick specific OpenStack project releases by setting the appropriate
@@ -55,7 +55,7 @@
endpoints, like so:
* Horizon: http://myhost/
-* Keystone: http://myhost/identity/v2.0/
+* Keystone: http://myhost/identity/v3/
We also provide an environment file that you can use to interact with your
cloud via CLI::
diff --git a/clean.sh b/clean.sh
index 6a31cc6..092f557 100755
--- a/clean.sh
+++ b/clean.sh
@@ -40,7 +40,7 @@
source $TOP_DIR/lib/tls
-source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/libraries
source $TOP_DIR/lib/lvm
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/keystone
diff --git a/doc/requirements.txt b/doc/requirements.txt
index ffce3ff..7980b93 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -4,8 +4,4 @@
docutils
sphinx>=2.0.0,!=2.1.0 # BSD
openstackdocstheme>=2.2.1 # Apache-2.0
-nwdiag
-blockdiag
-sphinxcontrib-blockdiag
-sphinxcontrib-nwdiag
zuul-sphinx>=0.2.0
diff --git a/doc/source/assets/images/neutron-network-1.png b/doc/source/assets/images/neutron-network-1.png
new file mode 100644
index 0000000..7730ca9
--- /dev/null
+++ b/doc/source/assets/images/neutron-network-1.png
Binary files differ
diff --git a/doc/source/assets/images/neutron-network-2.png b/doc/source/assets/images/neutron-network-2.png
new file mode 100644
index 0000000..9199351
--- /dev/null
+++ b/doc/source/assets/images/neutron-network-2.png
Binary files differ
diff --git a/doc/source/assets/images/neutron-network-3.png b/doc/source/assets/images/neutron-network-3.png
new file mode 100644
index 0000000..34f03ed
--- /dev/null
+++ b/doc/source/assets/images/neutron-network-3.png
Binary files differ
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2e17da1..bb03572 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -23,14 +23,14 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'sphinx.ext.autodoc',
- 'zuul_sphinx',
- 'openstackdocstheme',
- 'sphinxcontrib.blockdiag',
- 'sphinxcontrib.nwdiag' ]
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'zuul_sphinx',
+ 'openstackdocstheme',
+]
# openstackdocstheme options
-openstackdocs_repo_name = 'openstack-dev/devstack'
+openstackdocs_repo_name = 'openstack/devstack'
openstackdocs_pdf_link = True
openstackdocs_bug_project = 'devstack'
openstackdocs_bug_tag = ''
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index a83b2de..3cfba71 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -323,7 +323,7 @@
[[local|localrc]]
DEST=/opt/stack/
- LOGFILE=$LOGDIR/stack.sh.log
+ LOGFILE=$DEST/stack.sh.log
LOG_COLOR=False
Database Backend
@@ -351,30 +351,21 @@
disable_service rabbit
-
Apache Frontend
---------------
-The Apache web server can be enabled for wsgi services that support
-being deployed under HTTPD + mod_wsgi. By default, services that
-recommend running under HTTPD + mod_wsgi are deployed under Apache. To
-use an alternative deployment strategy (e.g. eventlet) for services
-that support an alternative to HTTPD + mod_wsgi set
-``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your
-``local.conf``.
+The Apache web server is enabled for services that support via WSGI. Today this
+means HTTPD and uWSGI but historically this meant HTTPD + mod_wsgi. This
+historical legacy is captured by the naming of many variables, which include
+``MOD_WSGI`` rather than ``UWSGI``.
-Each service that can be run under HTTPD + mod_wsgi also has an
-override toggle available that can be set in your ``local.conf``.
-
-Keystone is run under Apache with ``mod_wsgi`` by default.
-
-Example (Keystone)::
-
- KEYSTONE_USE_MOD_WSGI="True"
-
-Example (Nova)::
-
- NOVA_USE_MOD_WSGI="True"
+Some services support alternative deployment strategies (e.g. eventlet). You
+can enable these ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your
+``local.conf``. In addition, each service that can be run under HTTPD +
+mod_wsgi also has an override toggle available that can be set in your
+``local.conf``. These are, however, slowly being removed as services have
+adopted standardized deployment mechanisms and more generally moved away from
+eventlet.
Example (Swift)::
@@ -384,11 +375,6 @@
HEAT_USE_MOD_WSGI="True"
-Example (Cinder)::
-
- CINDER_USE_MOD_WSGI="True"
-
-
Libraries from Git
------------------
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 2c25a1c..fb36b3e 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -41,19 +41,8 @@
`local.conf` exhibited here assumes that 1500 is a reasonable MTU to
use on that network.
-.. nwdiag::
-
- nwdiag {
- inet [ shape = cloud ];
- router;
- inet -- router;
-
- network hardware_network {
- address = "172.18.161.0/24"
- router [ address = "172.18.161.1" ];
- devstack-1 [ address = "172.18.161.6" ];
- }
- }
+.. image:: /assets/images/neutron-network-1.png
+ :alt: Network configuration for a single DevStack node
DevStack Configuration
@@ -100,21 +89,8 @@
Physical Network Setup
~~~~~~~~~~~~~~~~~~~~~~
-.. nwdiag::
-
- nwdiag {
- inet [ shape = cloud ];
- router;
- inet -- router;
-
- network hardware_network {
- address = "172.18.161.0/24"
- router [ address = "172.18.161.1" ];
- devstack-1 [ address = "172.18.161.6" ];
- devstack-2 [ address = "172.18.161.7" ];
- }
- }
-
+.. image:: /assets/images/neutron-network-2.png
+ :alt: Network configuration for multiple DevStack nodes
After DevStack installs and configures Neutron, traffic from guest VMs
flows out of `devstack-2` (the compute node) and is encapsulated in a
@@ -222,8 +198,6 @@
used so that project network traffic, using the VXLAN tunneling
protocol, flows between each compute node where project instances run.
-
-
DevStack Compute Configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -268,30 +242,8 @@
Physical Network Setup
----------------------
-.. nwdiag::
-
- nwdiag {
- inet [ shape = cloud ];
- router;
- inet -- router;
-
- network provider_net {
- address = "203.0.113.0/24"
- router [ address = "203.0.113.1" ];
- controller;
- compute1;
- compute2;
- }
-
- network control_plane {
- router [ address = "10.0.0.1" ]
- address = "10.0.0.0/24"
- controller [ address = "10.0.0.2" ]
- compute1 [ address = "10.0.0.3" ]
- compute2 [ address = "10.0.0.4" ]
- }
- }
-
+.. image:: /assets/images/neutron-network-3.png
+ :alt: Network configuration for provider networks
On a compute node, the first interface, eth0 is used for the OpenStack
management (API, message bus, etc) as well as for ssh for an
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 5b42797..6b8aabf 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -122,7 +122,7 @@
.. code-block:: shell
$ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \
- --image cirros-0.3.5-x86_64-disk --nic none --wait test-server
+ --image cirros-0.6.3-x86_64-disk --nic none --wait test-server
.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is
required to use ``--nic=none``.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ccd0fef..70871ef 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -37,8 +37,8 @@
-------------
Start with a clean and minimal install of a Linux system. DevStack
-attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL/Rocky Linux 9 and openEuler.
+attempts to support the two latest LTS releases of Ubuntu,
+Rocky Linux 9 and openEuler.
If you do not have a preference, Ubuntu 22.04 (Jammy) is the
most tested, and will probably go the smoothest.
@@ -113,7 +113,7 @@
$ ./stack.sh
-This will take a 15 - 20 minutes, largely depending on the speed of
+This will take 15 - 30 minutes, largely depending on the speed of
your internet connection. Many git trees and packages will be
installed during this process.
@@ -133,6 +133,8 @@
You can ``source openrc`` in your shell, and then use the
``openstack`` command line tool to manage your devstack.
+You can :ref:`create a VM and SSH into it <ssh>`.
+
You can ``cd /opt/stack/tempest`` and run tempest tests that have
been configured to work with your devstack.
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index e65c7ef..05b4f34 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -68,7 +68,7 @@
.. warning::
This is not a recommended configuration. Because of interactions
- between ovs and bridging, if you reboot your box with active
+ between OVS and bridging, if you reboot your box with active
networking you may lose network connectivity to your system.
If you need your guests accessible on the network, but only have 1
@@ -114,3 +114,125 @@
``FIXED_RANGE_V6`` will just use the value of that directly.
``SUBNETPOOL_PREFIX_V6`` will just default to the value of
``IPV6_ADDRS_SAFE_TO_USE`` directly.
+
+.. _ssh:
+
+SSH access to instances
+=======================
+
+To validate connectivity, you can create an instance using the
+``$PRIVATE_NETWORK_NAME`` network (default: ``private``), create a floating IP
+using the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``), and attach
+this floating IP to the instance:
+
+.. code-block:: shell
+
+ openstack keypair create --public-key ~/.ssh/id_rsa.pub test-keypair
+ openstack server create --network private --key-name test-keypair ... test-server
+ fip_id=$(openstack floating ip create public -f value -c id)
+ openstack server add floating ip test-server ${fip_id}
+
+Once done, ensure you have enabled SSH and ICMP (ping) access for the security
+group used for the instance. You can either create a custom security group and
+specify it when creating the instance or add it after creation, or you can
+modify the ``default`` security group created by default for each project.
+Let's do the latter:
+
+.. code-block:: shell
+
+ openstack security group rule create --proto icmp --dst-port 0 default
+ openstack security group rule create --proto tcp --dst-port 22 default
+
+Finally, SSH into the instance. If you used the Cirros instance uploaded by
+default, then you can run the following:
+
+.. code-block:: shell
+
+ openstack server ssh test-server -- -l cirros
+
+This will connect using the ``cirros`` user and the keypair you configured when
+creating the instance.
+
+Remote SSH access to instances
+==============================
+
+You can also SSH to created instances on your DevStack host from other hosts.
+This can be helpful if you are e.g. deploying DevStack in a VM on an existing
+cloud and wish to do development on your local machine. There are a few ways to
+do this.
+
+.. rubric:: Configure instances to be locally accessible
+
+The most obvious way is to configure guests to be locally accessible, as
+described `above <Locally Accessible Guests>`__. This has the advantage of
+requiring no further effort on the client. However, it is more involved and
+requires either support from your cloud or some inadvisable workarounds.
+
+.. rubric:: Use your DevStack host as a jump host
+
+You can choose to use your DevStack host as a jump host. To SSH to a instance
+this way, pass the standard ``-J`` option to the ``openstack ssh`` / ``ssh``
+command. For example:
+
+.. code-block::
+
+ openstack server ssh test-server -- -l cirros -J username@devstack-host
+
+(where ``test-server`` is name of an existing instance, as described
+:ref:`previously <ssh>`, and ``username`` and ``devstack-host`` are the
+username and hostname of your DevStack host).
+
+This can also be configured via your ``~/.ssh/config`` file, making it rather
+effortless. However, it only allows SSH access. If you want to access e.g. a
+web application on the instance, you will need to configure an SSH tunnel and
+forward select ports using the ``-L`` option. For example, to forward HTTP
+traffic:
+
+.. code-block::
+
+ openstack server ssh test-server -- -l cirros -L 8080:username@devstack-host:80
+
+(where ``test-server`` is name of an existing instance, as described
+:ref:`previously <ssh>`, and ``username`` and ``devstack-host`` are the
+username and hostname of your DevStack host).
+
+As you can imagine, this can quickly get out of hand, particularly for more
+complex guest applications with multiple ports.
+
+.. rubric:: Use a proxy or VPN tool
+
+You can use a proxy or VPN tool to enable tunneling for the floating IP
+address range of the ``$PUBLIC_NETWORK_NAME`` network (default: ``public``)
+defined by ``$FLOATING_RANGE`` (default: ``172.24.4.0/24``). There are many
+such tools available to do this. For example, we could use a useful utility
+called `shuttle`__. To enable tunneling using ``shuttle``, first ensure you
+have allowed SSH and HTTP(S) traffic to your DevStack host. Allowing HTTP(S)
+traffic is necessary so you can use the OpenStack APIs remotely. How you do
+this will depend on where your DevStack host is running. Once this is done,
+install ``sshuttle`` on your localhost:
+
+.. code-block:: bash
+
+ sudo apt-get install sshuttle || yum install sshuttle
+
+Finally, start ``sshuttle`` on your localhost using the floating IP address
+range. For example, assuming you are using the default value for
+``$FLOATING_RANGE``, you can do:
+
+.. code-block:: bash
+
+ sshuttle -r username@devstack-host 172.24.4.0/24
+
+(where ``username`` and ``devstack-host`` are the username and hostname of your
+DevStack host).
+
+You should now be able to create an instance and SSH into it:
+
+.. code-block:: bash
+
+ openstack server ssh test-server -- -l cirros
+
+(where ``test-server`` is name of an existing instance, as described
+:ref:`previously <ssh>`)
+
+.. __: https://github.com/sshuttle/sshuttle
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index a609333..4384081 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -23,13 +23,12 @@
release.*
- Ubuntu: current LTS release plus current development release
-- Fedora: current release plus previous release
-- RHEL/CentOS: current major release
+- RHEL/CentOS/RockyLinux: current major release
- Other OS platforms may continue to be included but the maintenance of
those platforms shall not be assumed simply due to their presence.
Having a listed point-of-contact for each additional OS will greatly
increase its chance of being well-maintained.
-- Patches for Ubuntu and/or Fedora will not be held up due to
+- Patches for Ubuntu and/or RockyLinux will not be held up due to
side-effects on other OS platforms.
Databases
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index b244ca5..2984a5c 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -28,18 +28,16 @@
openstack/barbican `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
openstack/blazar `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
openstack/ceilometer `https://opendev.org/openstack/ceilometer <https://opendev.org/openstack/ceilometer>`__
-openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
-openstack/cinderlib `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
openstack/cloudkitty `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
openstack/cyborg `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
openstack/designate `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
+openstack/designate-tempest-plugin `https://opendev.org/openstack/designate-tempest-plugin <https://opendev.org/openstack/designate-tempest-plugin>`__
openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 <https://opendev.org/openstack/devstack-plugin-amqp1>`__
openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph <https://opendev.org/openstack/devstack-plugin-ceph>`__
openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container <https://opendev.org/openstack/devstack-plugin-container>`__
openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
-openstack/ec2-api `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
openstack/freezer `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
openstack/freezer-api `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
openstack/freezer-tempest-plugin `https://opendev.org/openstack/freezer-tempest-plugin <https://opendev.org/openstack/freezer-tempest-plugin>`__
@@ -51,9 +49,7 @@
openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter <https://opendev.org/openstack/ironic-prometheus-exporter>`__
openstack/ironic-ui `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
openstack/keystone `https://opendev.org/openstack/keystone <https://opendev.org/openstack/keystone>`__
-openstack/kuryr-kubernetes `https://opendev.org/openstack/kuryr-kubernetes <https://opendev.org/openstack/kuryr-kubernetes>`__
openstack/kuryr-libnetwork `https://opendev.org/openstack/kuryr-libnetwork <https://opendev.org/openstack/kuryr-libnetwork>`__
-openstack/kuryr-tempest-plugin `https://opendev.org/openstack/kuryr-tempest-plugin <https://opendev.org/openstack/kuryr-tempest-plugin>`__
openstack/magnum `https://opendev.org/openstack/magnum <https://opendev.org/openstack/magnum>`__
openstack/magnum-ui `https://opendev.org/openstack/magnum-ui <https://opendev.org/openstack/magnum-ui>`__
openstack/manila `https://opendev.org/openstack/manila <https://opendev.org/openstack/manila>`__
@@ -64,14 +60,10 @@
openstack/monasca-api `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
openstack/monasca-events-api `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
openstack/monasca-tempest-plugin `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
-openstack/murano `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
-openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
-openstack/networking-odl `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
-openstack/networking-powervm `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
openstack/networking-sfc `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
openstack/neutron `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
@@ -80,21 +72,17 @@
openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
-openstack/nova-powervm `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
+openstack/nova `https://opendev.org/openstack/nova <https://opendev.org/openstack/nova>`__
openstack/octavia `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
openstack/openstacksdk `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
openstack/osprofiler `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
-openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/ovn-bgp-agent `https://opendev.org/openstack/ovn-bgp-agent <https://opendev.org/openstack/ovn-bgp-agent>`__
openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
openstack/rally-openstack `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
-openstack/sahara `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
-openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
-openstack/senlin `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
openstack/shade `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
openstack/skyline-apiserver `https://opendev.org/openstack/skyline-apiserver <https://opendev.org/openstack/skyline-apiserver>`__
-openstack/solum `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
openstack/storlets `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
openstack/tacker `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
openstack/tap-as-a-service `https://opendev.org/openstack/tap-as-a-service <https://opendev.org/openstack/tap-as-a-service>`__
@@ -187,6 +175,7 @@
x/valet `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
x/vmware-nsx `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
x/vmware-vspc `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+x/whitebox-neutron-tempest-plugin `https://opendev.org/x/whitebox-neutron-tempest-plugin <https://opendev.org/x/whitebox-neutron-tempest-plugin>`__
======================================== ===
diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst
new file mode 100644
index 0000000..65dd5b1
--- /dev/null
+++ b/doc/source/tempest.rst
@@ -0,0 +1,25 @@
+=======
+Tempest
+=======
+
+`Tempest`_ is the OpenStack Integration test suite. It is installed by default
+and is used to provide integration testing for many of the OpenStack services.
+Just like DevStack itself, it is possible to extend Tempest with plugins. In
+fact, many Tempest plugin packages also include DevStack plugin to do things
+like pre-create required static resources.
+
+The `Tempest documentation <Tempest>`_ provides a thorough guide to using
+Tempest. However, if you simply wish to run the standard set of Tempest tests
+against an existing deployment, you can do the following:
+
+.. code-block:: shell
+
+ cd /opt/stack/tempest
+ /opt/stack/data/venv/bin/tempest run ...
+
+The above assumes you have installed DevStack in the default location
+(configured via the ``DEST`` configuration variable) and have enabled
+virtualenv-based installation in the standard location (configured via the
+``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively).
+
+.. _Tempest: https://docs.openstack.org/tempest/latest/
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index efcfc03..da7a7d2 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -39,4 +39,5 @@
CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined
</VirtualHost>
+%WSGIPYTHONHOME%
WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1a353e5..d99e8e6 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -23,6 +23,7 @@
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
%SSLLISTEN% %SSLKEYFILE%
+%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2
%SSLLISTEN%</VirtualHost>
Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public
diff --git a/files/apache-neutron.template b/files/apache-neutron.template
index c7796b9..358e87f 100644
--- a/files/apache-neutron.template
+++ b/files/apache-neutron.template
@@ -24,6 +24,7 @@
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
%SSLLISTEN% %SSLKEYFILE%
+%SSLLISTEN% SSLProtocol -all +TLSv1.3 +TLSv1.2
%SSLLISTEN%</VirtualHost>
Alias /networking %NEUTRON_BIN%/neutron-api
diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template
deleted file mode 100644
index 011abb9..0000000
--- a/files/apache-placement-api.template
+++ /dev/null
@@ -1,27 +0,0 @@
-# NOTE(sbauza): This virtualhost is only here because some directives can
-# only be set by a virtualhost or server context, so that's why the port is not bound.
-# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing
-# vhost.
-<VirtualHost *:8780>
- WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup placement-api
- WSGIScriptAlias / %PUBLICWSGI%
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- <IfVersion >= 2.4>
- ErrorLogFormat "%M"
- </IfVersion>
- ErrorLog /var/log/%APACHE_NAME%/placement-api.log
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
-Alias /placement %PUBLICWSGI%
-<Location /placement>
- SetHandler wsgi-script
- Options +ExecCGI
- WSGIProcessGroup placement-api
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
-</Location>
diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf
deleted file mode 100644
index 66a3751..0000000
--- a/files/dnsmasq-for-baremetal-from-nova-network.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-enable-tftp
-tftp-root=/tftpboot
-dhcp-boot=pxelinux.0
diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack
new file mode 100755
index 0000000..47fbfc5
--- /dev/null
+++ b/files/openstack-cli-server/openstack
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import os.path
+import json
+
+server_address = "/tmp/openstack.sock"
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+try:
+ sock.connect(server_address)
+except socket.error as msg:
+ print(msg, file=sys.stderr)
+ sys.exit(1)
+
+
+def send(sock, doc):
+ jdoc = json.dumps(doc)
+ sock.send(b'%d\n' % len(jdoc))
+ sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+ length_str = b''
+
+ char = sock.recv(1)
+ if len(char) == 0:
+ print("Unexpected end of file", file=sys.stderr)
+ sys.exit(1)
+
+ while char != b'\n':
+ length_str += char
+ char = sock.recv(1)
+ if len(char) == 0:
+ print("Unexpected end of file", file=sys.stderr)
+ sys.exit(1)
+
+ total = int(length_str)
+
+ # use a memoryview to receive the data chunk by chunk efficiently
+ jdoc = memoryview(bytearray(total))
+ next_offset = 0
+ while total - next_offset > 0:
+ recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+ next_offset += recv_size
+ try:
+ doc = json.loads(jdoc.tobytes())
+ except (TypeError, ValueError) as e:
+ raise Exception('Data received was not in JSON format')
+ return doc
+
+try:
+ env = {}
+ passenv = ["CINDER_VERSION",
+ "OS_AUTH_URL",
+ "OS_NO_CACHE",
+ "OS_PASSWORD",
+ "OS_PROJECT_NAME",
+ "OS_REGION_NAME",
+ "OS_TENANT_NAME",
+ "OS_USERNAME",
+ "OS_VOLUME_API_VERSION",
+ "OS_CLOUD"]
+ for name in passenv:
+ if name in os.environ:
+ env[name] = os.environ[name]
+
+ cmd = {
+ "app": os.path.basename(sys.argv[0]),
+ "env": env,
+ "argv": sys.argv[1:]
+ }
+ try:
+ image_idx = sys.argv.index('image')
+ create_idx = sys.argv.index('create')
+ missing_file = image_idx < create_idx and \
+ not any(x.startswith('--file') for x in sys.argv)
+ except ValueError:
+ missing_file = False
+
+ if missing_file:
+ # This means we were called with an image create command, but were
+ # not provided a --file option. That likely means we're being passed
+ # the image data to stdin, which won't work because we do not proxy
+ # stdin to the server. So, we just reject the operation and ask the
+ # caller to provide the file with --file instead.
+ # We've already connected to the server, we need to send it some dummy
+ # data so it doesn't wait forever.
+ send(sock, {})
+ print('Image create without --file is not allowed in server mode',
+ file=sys.stderr)
+ sys.exit(1)
+ else:
+ send(sock, cmd)
+
+ doc = recv(sock)
+ if doc["stdout"] != b'':
+ print(doc["stdout"], end='')
+ if doc["stderr"] != b'':
+ print(doc["stderr"], file=sys.stderr)
+ sys.exit(doc["status"])
+finally:
+ sock.close()
diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server
new file mode 100755
index 0000000..f3d2747
--- /dev/null
+++ b/files/openstack-cli-server/openstack-cli-server
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import json
+
+from openstackclient import shell as osc_shell
+from io import StringIO
+
+server_address = "/tmp/openstack.sock"
+
+try:
+ os.unlink(server_address)
+except OSError:
+ if os.path.exists(server_address):
+ raise
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+print('starting up on %s' % server_address, file=sys.stderr)
+sock.bind(server_address)
+
+# Listen for incoming connections
+sock.listen(1)
+
+def send(sock, doc):
+ jdoc = json.dumps(doc)
+ sock.send(b'%d\n' % len(jdoc))
+ sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+ length_str = b''
+ char = sock.recv(1)
+ while char != b'\n':
+ length_str += char
+ char = sock.recv(1)
+
+ total = int(length_str)
+
+ # use a memoryview to receive the data chunk by chunk efficiently
+ jdoc = memoryview(bytearray(total))
+ next_offset = 0
+ while total - next_offset > 0:
+ recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+ next_offset += recv_size
+ try:
+ doc = json.loads(jdoc.tobytes())
+ except (TypeError, ValueError) as e:
+ raise Exception('Data received was not in JSON format')
+ return doc
+
+while True:
+ csock, client_address = sock.accept()
+ try:
+ doc = recv(csock)
+
+ print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr)
+ oldenv = {}
+ for name in doc["env"].keys():
+ oldenv[name] = os.environ.get(name, None)
+ os.environ[name] = doc["env"][name]
+
+ try:
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ my_stdout = sys.stdout = StringIO()
+ my_stderr = sys.stderr = StringIO()
+
+ class Exit(BaseException):
+ def __init__(self, status):
+ self.status = status
+
+ def noexit(stat):
+ raise Exit(stat)
+
+ sys.exit = noexit
+
+ if doc["app"] == "openstack":
+ sh = osc_shell.OpenStackShell()
+ ret = sh.run(doc["argv"])
+ else:
+ print("Unknown application %s" % doc["app"], file=sys.stderr)
+ ret = 1
+ except Exit as e:
+ ret = e.status
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+ for name in oldenv.keys():
+ if oldenv[name] is None:
+ del os.environ[name]
+ else:
+ os.environ[name] = oldenv[name]
+
+ send(csock, {
+ "stdout": my_stdout.getvalue(),
+ "stderr": my_stderr.getvalue(),
+ "status": ret,
+ })
+
+ except BaseException as e:
+ print(e, file=sys.stderr)
+ finally:
+ csock.close()
diff --git a/files/rpms/general b/files/rpms/general
index b6866de..8a5755c 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -6,9 +6,11 @@
gcc-c++
gettext # used for compiling message catalogs
git-core
+glibc-langpack-en # dist:rhel9
graphviz # needed only for docs
httpd
httpd-devel
+iptables-nft # dist:rhel9
iptables-services
java-1.8.0-openjdk-headless
libffi-devel
diff --git a/functions b/functions
index 7ada0fe..42d08d7 100644
--- a/functions
+++ b/functions
@@ -118,7 +118,7 @@
useimport="--import"
fi
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
+ openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}")
}
# Retrieve an image from a URL and upload into Glance.
@@ -133,17 +133,28 @@
local image image_fname image_name
+ local max_attempts=5
+
# Create a directory for the downloaded image tarballs.
mkdir -p $FILES/images
image_fname=`basename "$image_url"`
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+akistyle), then extracts it.
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
- wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
- if [[ $? -ne 0 ]]; then
- echo "Not found: $image_url"
- return
- fi
+ for attempt in `seq $max_attempts`; do
+ local rc=0
+ wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$?
+ if [[ $rc -ne 0 ]]; then
+ if [[ "$attempt" -eq "$max_attempts" ]]; then
+ echo "Not found: $image_url"
+ return
+ fi
+ echo "Download failed, retrying in $attempt second, attempt: $attempt"
+ sleep $attempt
+ else
+ break
+ fi
+ done
fi
image="$FILES/${image_fname}"
else
@@ -414,10 +425,10 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id)
+ kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id)
+ ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id)
fi
_upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
fi
@@ -683,6 +694,8 @@
iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
+ # Enable or disable color for oslo.log
+ iniset $conf_file DEFAULT log_color $LOG_COLOR
}
function setup_systemd_logging {
@@ -704,6 +717,9 @@
iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
+
+ # Enable or disable color for oslo.log
+ iniset $conf_file DEFAULT log_color $LOG_COLOR
}
function setup_standard_logging_identity {
diff --git a/functions-common b/functions-common
index fa7e4f2..e265256 100644
--- a/functions-common
+++ b/functions-common
@@ -236,6 +236,27 @@
$xtrace
}
+# bool_to_int <True|False>
+#
+# Convert True|False to int 1 or 0
+# This function can be used to convert the output of trueorfalse
+# to an int follow c conventions where false is 0 and 1 it true.
+function bool_to_int {
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ if [ -z $1 ]; then
+ die $LINENO "Bool value required"
+ fi
+ if [[ $1 == "True" ]] ; then
+ echo '1'
+ else
+ echo '0'
+ fi
+ $xtrace
+}
+
+
function isset {
[[ -v "$1" ]]
}
@@ -380,9 +401,9 @@
# such as "install_package" further abstract things in better ways.
#
# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
-# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora)
+# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora)
# ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
-# ``os_CODENAME`` - vendor's codename for release: ``xenial``
+# ``os_CODENAME`` - vendor's codename for release: ``jammy``
declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME
@@ -412,9 +433,9 @@
# - os_VENDOR
# - os_PACKAGE
function GetOSVersion {
- # CentOS Stream 9 does not provide lsb_release
+ # CentOS Stream 9 and RHEL 9 do not provide lsb_release
source /etc/os-release
- if [[ "${ID}${VERSION}" == "centos9" ]]; then
+ if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then
os_RELEASE=${VERSION_ID}
os_CODENAME="n/a"
os_VENDOR=$(echo $NAME | tr -d '[:space:]')
@@ -520,6 +541,7 @@
[ "$os_VENDOR" = "openEuler" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "RedHatEnterprise" ] || \
+ [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \
[ "$os_VENDOR" = "Rocky" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
[ "$os_VENDOR" = "AlmaLinux" ] || \
@@ -609,8 +631,10 @@
echo "the project to the \$PROJECTS variable in the job definition."
die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
fi
- # '--branch' can also take tags
- git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
+ git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest
+ cd $git_dest
+ git_timed fetch $git_clone_flags origin $git_ref
+ git_timed checkout FETCH_HEAD
elif [[ "$RECLONE" = "True" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
cd $git_dest
@@ -747,7 +771,7 @@
if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
host_ip=""
# Find the interface used for the default route
- host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
+ host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)}
local host_ips
host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
local ip
@@ -1111,6 +1135,12 @@
return 1
}
+function is_ironic_sharded {
+ # todo(JayF): Support >1 shard with multiple n-cpu instances for each
+ is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0
+ return 1
+}
+
# Package Functions
# =================
@@ -1519,6 +1549,7 @@
mkdir -p $SYSTEMD_DIR
iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
+ iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\""
iniset -sudo $unitfile "Service" "User" "$user"
iniset -sudo $unitfile "Service" "ExecStart" "$command"
iniset -sudo $unitfile "Service" "KillMode" "process"
@@ -1546,6 +1577,7 @@
mkdir -p $SYSTEMD_DIR
iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
+ iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\""
iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service"
iniset -sudo $unitfile "Service" "User" "$user"
iniset -sudo $unitfile "Service" "ExecStart" "$command"
@@ -1611,6 +1643,9 @@
fi
local env_vars="$5"
if [[ "$command" =~ "uwsgi" ]] ; then
+ if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ cmd="$cmd --venv $DEVSTACK_VENV"
+ fi
write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
else
write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
@@ -2403,6 +2438,11 @@
_TIME_TOTAL[$name]=$(($total + $elapsed_time))
}
+function install_openstack_cli_server {
+ export PATH=$TOP_DIR/files/openstack-cli-server:$PATH
+ run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server"
+}
+
function oscwrap {
local xtrace
xtrace=$(set +o | grep xtrace)
diff --git a/inc/python b/inc/python
index a24f4e9..bd58905 100644
--- a/inc/python
+++ b/inc/python
@@ -32,6 +32,23 @@
# Python Functions
# ================
+# Setup the global devstack virtualenvs and the associated environment
+# updates.
+function setup_devstack_virtualenv {
+ # We run devstack out of a global virtualenv.
+ if [[ ! -d $DEVSTACK_VENV ]] ; then
+ # Using system site packages to enable nova to use libguestfs.
+ # This package is currently installed via the distro and not
+ # available on pypi.
+ $PYTHON -m venv --system-site-packages "${DEVSTACK_VENV}"
+ pip_install -U pip setuptools[core]
+ fi
+ if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then
+ export PATH="$DEVSTACK_VENV/bin:$PATH"
+ export PYTHON="$DEVSTACK_VENV/bin/python3"
+ fi
+}
+
# Get the path to the pip command.
# get_pip_command
function get_pip_command {
@@ -60,8 +77,11 @@
fi
$xtrace
- local PYTHON_PATH=/usr/local/bin
- echo $PYTHON_PATH
+ if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ echo "$DEVSTACK_VENV/bin"
+ else
+ echo "/usr/local/bin"
+ fi
}
# Wrapper for ``pip install`` that only installs versions of libraries
@@ -166,15 +186,17 @@
if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
+ elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then
+ # We have to check that the DEVSTACK_VENV exists because early
+ # devstack boostrapping needs to operate in a system context
+ # too bootstrap pip. Once pip is bootstrapped we create the
+ # global venv and can start to use it.
+ local cmd_pip=$DEVSTACK_VENV/bin/pip
+ local sudo_pip="env"
+ echo "Using python $PYTHON3_VERSION to install $package_dir"
else
local cmd_pip="python$PYTHON3_VERSION -m pip"
- # See
- # https://github.com/pypa/setuptools/issues/2232
- # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html
- # this makes setuptools >=50 use the platform distutils.
- # We only want to do this on global pip installs, not if
- # installing in a virtualenv
- local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib "
+ local sudo_pip="sudo -H LC_ALL=en_US.UTF-8"
echo "Using python $PYTHON3_VERSION to install $package_dir"
fi
@@ -377,6 +399,9 @@
# source we are about to do.
local name
name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+ if [ -z $name ]; then
+ name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml)
+ fi
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
$REQUIREMENTS_DIR/upper-constraints.txt -- $name
fi
@@ -439,8 +464,11 @@
pip_install $flags "$project_dir$extras"
# ensure that further actions can do things like setup.py sdist
- if [[ "$flags" == "-e" ]]; then
- safe_chown -R $STACK_USER $1/*.egg-info
+ if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then
+ # egg-info is not created when project have pyproject.toml
+ if [ -d $1/*.egg-info ]; then
+ safe_chown -R $STACK_USER $1/*.egg-info
+ fi
fi
}
diff --git a/inc/rootwrap b/inc/rootwrap
index 2a6e4b6..4c65440 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -60,6 +60,11 @@
sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf
sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
+ # Rely on $PATH set by devstack to determine what is safe to execute
+ # by rootwrap rather than use explicit whitelist of paths in
+ # rootwrap.conf
+ sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf
+
# Set up the rootwrap sudoers
local tempfile
tempfile=$(mktemp)
diff --git a/lib/apache b/lib/apache
index 4d68b49..1c034d3 100644
--- a/lib/apache
+++ b/lib/apache
@@ -137,6 +137,8 @@
elif is_fedora; then
sudo rm -f /etc/httpd/conf.d/000-*
install_package httpd python3-mod_wsgi
+ # rpm distros dont enable httpd by default so enable it to support reboots.
+ sudo systemctl enable httpd
# For consistency with Ubuntu, switch to the worker mpm, as
# the default is event
sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
@@ -235,13 +237,17 @@
restart_service $APACHE_NAME
}
+# write_uwsgi_config() - Create a new uWSGI config file
function write_uwsgi_config {
- local file=$1
+ local conf=$1
local wsgi=$2
local url=$3
local http=$4
- local name=""
- name=$(basename $wsgi)
+ local name=$5
+
+ if [ -z "$name" ]; then
+ name=$(basename $wsgi)
+ fi
# create a home for the sockets; note don't use /tmp -- apache has
# a private view of it on some platforms.
@@ -256,39 +262,49 @@
local socket="$socket_dir/${name}.socket"
# always cleanup given that we are using iniset here
- rm -rf $file
- iniset "$file" uwsgi wsgi-file "$wsgi"
- iniset "$file" uwsgi processes $API_WORKERS
+ rm -rf $conf
+ # Set either the module path or wsgi script path depending on what we've
+ # been given. Note that the regex isn't exhaustive - neither Python modules
+ # nor Python variables can start with a number - but it's "good enough"
+ if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+ iniset "$conf" uwsgi module "$wsgi"
+ else
+ deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+ iniset "$conf" uwsgi wsgi-file "$wsgi"
+ fi
+ iniset "$conf" uwsgi processes $API_WORKERS
# This is running standalone
- iniset "$file" uwsgi master true
+ iniset "$conf" uwsgi master true
# Set die-on-term & exit-on-reload so that uwsgi shuts down
- iniset "$file" uwsgi die-on-term true
- iniset "$file" uwsgi exit-on-reload false
+ iniset "$conf" uwsgi die-on-term true
+ iniset "$conf" uwsgi exit-on-reload false
# Set worker-reload-mercy so that worker will not exit till the time
# configured after graceful shutdown
- iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
- iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,python3
+ iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+ iniset "$conf" uwsgi enable-threads true
+ iniset "$conf" uwsgi plugins http,python3
# uwsgi recommends this to prevent thundering herd on accept.
- iniset "$file" uwsgi thunder-lock true
+ iniset "$conf" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
- iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+ iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
# Override the default size for headers from the 4k default.
- iniset "$file" uwsgi buffer-size 65535
+ iniset "$conf" uwsgi buffer-size 65535
# Make sure the client doesn't try to re-use the connection.
- iniset "$file" uwsgi add-header "Connection: close"
+ iniset "$conf" uwsgi add-header "Connection: close"
# This ensures that file descriptors aren't shared between processes.
- iniset "$file" uwsgi lazy-apps true
+ iniset "$conf" uwsgi lazy-apps true
+ # Starting time of the WSGi server
+ iniset "$conf" uwsgi start-time %t
# If we said bind directly to http, then do that and don't start the apache proxy
if [[ -n "$http" ]]; then
- iniset "$file" uwsgi http $http
+ iniset "$conf" uwsgi http $http
else
local apache_conf=""
apache_conf=$(apache_site_config_for $name)
- iniset "$file" uwsgi socket "$socket"
- iniset "$file" uwsgi chmod-socket 666
- echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf
+ iniset "$conf" uwsgi socket "$socket"
+ iniset "$conf" uwsgi chmod-socket 666
+ echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
fi
@@ -301,47 +317,60 @@
# but that involves having apache buffer the request before sending it to
# uwsgi.
function write_local_uwsgi_http_config {
- local file=$1
+ local conf=$1
local wsgi=$2
local url=$3
- name=$(basename $wsgi)
+ local name=$4
+
+ if [ -z "$name" ]; then
+ name=$(basename $wsgi)
+ fi
# create a home for the sockets; note don't use /tmp -- apache has
# a private view of it on some platforms.
# always cleanup given that we are using iniset here
- rm -rf $file
- iniset "$file" uwsgi wsgi-file "$wsgi"
+ rm -rf $conf
+ # Set either the module path or wsgi script path depending on what we've
+ # been given
+ if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+ iniset "$conf" uwsgi module "$wsgi"
+ else
+ deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+ iniset "$conf" uwsgi wsgi-file "$wsgi"
+ fi
port=$(get_random_port)
- iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
- iniset "$file" uwsgi processes $API_WORKERS
+ iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
+ iniset "$conf" uwsgi processes $API_WORKERS
# This is running standalone
- iniset "$file" uwsgi master true
+ iniset "$conf" uwsgi master true
# Set die-on-term & exit-on-reload so that uwsgi shuts down
- iniset "$file" uwsgi die-on-term true
- iniset "$file" uwsgi exit-on-reload false
- iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins http,python3
- # uwsgi recommends this to prevent thundering herd on accept.
- iniset "$file" uwsgi thunder-lock true
- # Set hook to trigger graceful shutdown on SIGTERM
- iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+ iniset "$conf" uwsgi die-on-term true
+ iniset "$conf" uwsgi exit-on-reload false
# Set worker-reload-mercy so that worker will not exit till the time
# configured after graceful shutdown
- iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+ iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+ iniset "$conf" uwsgi enable-threads true
+ iniset "$conf" uwsgi plugins http,python3
+ # uwsgi recommends this to prevent thundering herd on accept.
+ iniset "$conf" uwsgi thunder-lock true
+ # Set hook to trigger graceful shutdown on SIGTERM
+ iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
# Override the default size for headers from the 4k default.
- iniset "$file" uwsgi buffer-size 65535
+ iniset "$conf" uwsgi buffer-size 65535
# Make sure the client doesn't try to re-use the connection.
- iniset "$file" uwsgi add-header "Connection: close"
+ iniset "$conf" uwsgi add-header "Connection: close"
# This ensures that file descriptors aren't shared between processes.
- iniset "$file" uwsgi lazy-apps true
- iniset "$file" uwsgi chmod-socket 666
- iniset "$file" uwsgi http-raw-body true
- iniset "$file" uwsgi http-chunked-input true
- iniset "$file" uwsgi http-auto-chunked true
- iniset "$file" uwsgi http-keepalive false
+ iniset "$conf" uwsgi lazy-apps true
+ iniset "$conf" uwsgi chmod-socket 666
+ iniset "$conf" uwsgi http-raw-body true
+ iniset "$conf" uwsgi http-chunked-input true
+ iniset "$conf" uwsgi http-auto-chunked true
+ iniset "$conf" uwsgi http-keepalive false
# Increase socket timeout for slow chunked uploads
- iniset "$file" uwsgi socket-timeout 30
+ iniset "$conf" uwsgi socket-timeout 30
+ # Starting time of the WSGi server
+ iniset "$conf" uwsgi start-time %t
enable_apache_mod proxy
enable_apache_mod proxy_http
@@ -349,7 +378,7 @@
apache_conf=$(apache_site_config_for $name)
echo "KeepAlive Off" | sudo tee $apache_conf
echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
- echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf
+ echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
}
@@ -368,18 +397,24 @@
echo "KeepAlive Off" | sudo tee $apache_conf
echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
- echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf
+ echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
}
function remove_uwsgi_config {
- local file=$1
+ local conf=$1
local wsgi=$2
local name=""
+ # TODO(stephenfin): Remove this call when everyone is using module path
+ # configuration instead of file path configuration
name=$(basename $wsgi)
- rm -rf $file
+ if [[ "$wsgi" = /* ]]; then
+ deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead"
+ fi
+
+ rm -rf $conf
disable_apache_site $name
}
diff --git a/lib/cinder b/lib/cinder
index e37eff4..b557d4b 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -62,7 +62,7 @@
CINDER_CONF_DIR=/etc/cinder
CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
-CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi
+CINDER_UWSGI=cinder.wsgi.api:application
CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini
CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
@@ -76,6 +76,11 @@
CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
+# We do not need to report service status every 10s for devstack-like
+# deployments. In the gate this generates extra work for the services and the
+# database which are already taxed.
+CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120}
+
# What type of LVM device should Cinder use for LVM backend
# Defaults to auto, which will do thin provisioning if it's a fresh
# volume group, otherwise it will do thick. The other valid choices are
@@ -83,6 +88,10 @@
# thin provisioning.
CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto}
+# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
+# user token while communicating to external REST APIs like Glance.
+CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN)
+
# Default backends
# The backend format is type:name where type is one of the supported backend
# types (lvm, nfs, etc) and name is the identifier used in the Cinder
@@ -151,10 +160,6 @@
# Supported backup drivers are in lib/cinder_backups
CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift}
-# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi
-# reference should be cleaned up to more accurately refer to uwsgi.
-CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True}
-
# Source the enabled backends
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
@@ -178,6 +183,12 @@
# Environment variables to configure the image-volume cache
CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
+# Environment variables to configure the optimized volume upload
+CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False}
+
+# Environment variables to configure the internal tenant during optimized volume upload
+CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False}
+
# For limits, if left unset, it will use cinder defaults of 0 for unlimited
CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
@@ -187,6 +198,11 @@
# enable the cache for all cinder backends.
CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
+# Configure which cinder backends will have optimized volume upload, this takes the same
+# form as the CINDER_ENABLED_BACKENDS config option. By default it will
+# enable the cache for all cinder backends.
+CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS}
+
# Flag to set the oslo_policy.enforce_scope. This is used to switch
# the Volume API policies to start checking the scope of token. by default,
# this flag is False.
@@ -270,7 +286,7 @@
fi
stop_process "c-api"
- remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
+ remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi"
}
# configure_cinder() - Set config files, create data dirs, etc
@@ -325,6 +341,9 @@
# details and example failures.
iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
+ iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL
+ iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6))
+
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local enabled_backends=""
local default_name=""
@@ -345,6 +364,14 @@
iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
fi
configure_cinder_image_volume_cache
+
+ # The upload optimization uses Cinder's clone volume functionality to
+ # clone the Image-Volume from source volume hence can only be
+ # performed when glance is using cinder as it's backend.
+ if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+ # Configure optimized volume upload
+ configure_cinder_volume_upload
+ fi
fi
if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
@@ -362,14 +389,8 @@
if is_service_enabled tls-proxy; then
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
# Set the service port for a proxy to take the original
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
- iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True
- else
- iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
- iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
- iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
- fi
+ iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
+ iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True
fi
fi
@@ -380,10 +401,10 @@
iniset_rpc_backend cinder $CINDER_CONF
# Format logging
- setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI
+ setup_logging $CINDER_CONF
if is_service_enabled c-api; then
- write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
+ write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume" "" "cinder-api"
fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
@@ -406,7 +427,9 @@
if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL"
elif is_service_enabled etcd3; then
- iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
+ # NOTE(jan.gutter): api_version can revert to default once tooz is
+ # updated with the etcd v3.4 defaults
+ iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3"
fi
if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
@@ -416,6 +439,10 @@
iniset $CINDER_CONF oslo_policy enforce_scope false
iniset $CINDER_CONF oslo_policy enforce_new_defaults false
fi
+
+ if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then
+ init_cinder_service_user_conf
+ fi
}
# create_cinder_accounts() - Set up common required cinder accounts
@@ -439,32 +466,15 @@
create_service_user "cinder" $extra_role
+ local cinder_api_url
+ cinder_api_url="$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume"
+
# block-storage is the official service type
get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
- if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
- get_or_create_endpoint \
- "block-storage" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
-
- get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
- get_or_create_endpoint \
- "volumev3" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
- else
- get_or_create_endpoint \
- "block-storage" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
-
- get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
- get_or_create_endpoint \
- "volumev3" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
- fi
-
+ get_or_create_endpoint \
+ "block-storage" \
+ "$REGION_NAME" \
+ "$cinder_api_url/v3"
configure_cinder_internal_tenant
fi
}
@@ -585,10 +595,6 @@
local service_port=$CINDER_SERVICE_PORT
local service_protocol=$CINDER_SERVICE_PROTOCOL
local cinder_url
- if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
- service_port=$CINDER_SERVICE_PORT_INT
- service_protocol="http"
- fi
if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
if is_service_enabled c-vol; then
# Delete any old stack.conf
@@ -605,17 +611,8 @@
fi
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
- run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
- cinder_url=$service_protocol://$SERVICE_HOST:$service_port
- # Start proxy if tls enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
- fi
- else
- run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
- cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
- fi
+ run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
+ cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
fi
echo "Waiting for Cinder API to start..."
@@ -719,6 +716,24 @@
done
}
+function configure_cinder_volume_upload {
+ # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends
+ # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
+ # be the backend specific configuration stanza in cinder.conf.
+ local be be_name
+ for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do
+ be_name=${be##*:}
+
+ iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED
+ iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT
+ done
+}
+
+function init_cinder_service_user_conf {
+ configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user
+ iniset $CINDER_CONF service_user send_service_user_token True
+ iniset $CINDER_CONF service_user auth_strategy keystone
+}
# Restore xtrace
$_XTRACE_CINDER
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 27d1ec6..629014c 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -20,7 +20,7 @@
MYSQL_SERVICE_NAME=mysql
if is_fedora && ! is_oraclelinux; then
MYSQL_SERVICE_NAME=mariadb
- elif [[ "$DISTRO" == "bullseye" ]]; then
+ elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then
MYSQL_SERVICE_NAME=mariadb
fi
fi
@@ -104,10 +104,10 @@
# Set the root password - only works the first time. For Ubuntu, we already
# did that with debconf before installing the package, but we still try,
# because the package might have been installed already. We don't do this
- # for Ubuntu 22.04 (jammy) because the authorization model change in
+ # for Ubuntu 22.04+ because the authorization model change in
# version 10.4 of mariadb. See
# https://mariadb.org/authentication-in-mariadb-10-4/
- if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+ if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
sudo mysqladmin -u root password $DATABASE_PASSWORD || true
fi
@@ -122,17 +122,13 @@
# In mariadb e.g. on Ubuntu socket plugin is used for authentication
# as root so it works only as sudo. To restore old "mysql like" behaviour,
# we need to change auth plugin for root user
- if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
- if [[ "$DISTRO" == "jammy" ]]; then
- # For Ubuntu 22.04 (jammy) we follow the model outlined in
- # https://mariadb.org/authentication-in-mariadb-10-4/
- sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');"
- else
- sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
- sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
- fi
+ # TODO(frickler): simplify this logic
+ if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ # For Ubuntu 22.04+ we follow the model outlined in
+ # https://mariadb.org/authentication-in-mariadb-10-4/
+ sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');"
fi
- if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+ if ! (is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
# Create DB user if it does not already exist
sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
# Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index b21418b..2aa38cc 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -46,6 +46,10 @@
createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db
}
+function _exit_pg_init {
+ sudo cat /var/lib/pgsql/initdb_postgresql.log
+}
+
function configure_database_postgresql {
local pg_conf pg_dir pg_hba check_role version
echo_summary "Configuring and starting PostgreSQL"
@@ -53,7 +57,9 @@
pg_hba=/var/lib/pgsql/data/pg_hba.conf
pg_conf=/var/lib/pgsql/data/postgresql.conf
if ! sudo [ -e $pg_hba ]; then
+ trap _exit_pg_init EXIT
sudo postgresql-setup initdb
+ trap - EXIT
fi
elif is_ubuntu; then
version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2`
diff --git a/lib/etcd3 b/lib/etcd3
index 4f3a7a4..0d22de8 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -51,7 +51,7 @@
fi
cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
- cmd+=" --debug"
+ cmd+=" --log-level=debug"
fi
local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
diff --git a/lib/glance b/lib/glance
index 5aeae16..5c3643d 100644
--- a/lib/glance
+++ b/lib/glance
@@ -41,12 +41,21 @@
GLANCE_BIN_DIR=$(get_python_exec_prefix)
fi
+#S3 for Glance
+GLANCE_USE_S3=$(trueorfalse False GLANCE_USE_S3)
+GLANCE_S3_DEFAULT_BACKEND=${GLANCE_S3_DEFAULT_BACKEND:-s3_fast}
+GLANCE_S3_BUCKET_ON_PUT=$(trueorfalse True GLANCE_S3_BUCKET_ON_PUT)
+GLANCE_S3_BUCKET_NAME=${GLANCE_S3_BUCKET_NAME:-images}
+
# Cinder for Glance
USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE)
# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values
# from CINDER_ENABLED_BACKENDS
GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1}
GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance
+if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance
+fi
# When Cinder is used as a glance store, you can optionally configure cinder to
# optimize bootable volume creation by allowing volumes to be cloned directly
# in the backend instead of transferring data via Glance. To use this feature,
@@ -72,6 +81,7 @@
GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db}
# Full Glance functionality requires running in standalone mode. If we are
# not in uwsgi mode, then we are standalone, otherwise allow separate control.
@@ -95,10 +105,13 @@
GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS)
# Flag to set the oslo_policy.enforce_scope. This is used to switch
-# the Image API policies to start checking the scope of token. By Default,
-# this flag is False.
+# This is used to disable the Image API policies scope and new defaults.
+# By Default, it is True.
# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
-GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE)
+GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE)
+
+# Flag to disable image format inspection on upload
+GLANCE_ENFORCE_IMAGE_FORMAT=$(trueorfalse True GLANCE_ENFORCE_IMAGE_FORMAT)
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
@@ -164,6 +177,35 @@
# Cleanup reserved stores directories
sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
fi
+ remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api"
+}
+
+# Set multiple s3 store related config options
+#
+function configure_multiple_s3_stores {
+ enabled_backends="${GLANCE_S3_DEFAULT_BACKEND}:s3"
+
+ iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends}
+ iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_S3_DEFAULT_BACKEND
+}
+
+# Set common S3 store options to given config section
+#
+# Arguments:
+# config_section
+#
+function set_common_s3_store_params {
+ local config_section="$1"
+ openstack ec2 credential create
+ iniset $GLANCE_API_CONF $config_section s3_store_host "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT"
+ iniset $GLANCE_API_CONF $config_section s3_store_access_key "$(openstack ec2 credential list -c Access -f value)"
+ iniset $GLANCE_API_CONF $config_section s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)"
+ iniset $GLANCE_API_CONF $config_section s3_store_create_bucket_on_put $GLANCE_S3_BUCKET_ON_PUT
+ iniset $GLANCE_API_CONF $config_section s3_store_bucket $GLANCE_S3_BUCKET_NAME
+ iniset $GLANCE_API_CONF $config_section s3_store_bucket_url_format "path"
+ if is_service_enabled tls-proxy; then
+ iniset $GLANCE_API_CONF $config_section s3_store_cacert $SSL_BUNDLE_FILE
+ fi
}
# Set multiple cinder store related config options for each of the cinder store
@@ -250,7 +292,6 @@
local be
if [[ "$glance_enable_multiple_stores" == "False" ]]; then
- # Configure traditional glance_store
if [[ "$use_cinder_for_glance" == "True" ]]; then
# set common glance_store parameters
iniset $GLANCE_API_CONF glance_store stores "cinder,file,http"
@@ -273,7 +314,7 @@
if [[ "$use_cinder_for_glance" == "True" ]]; then
# Configure multiple cinder stores for glance
configure_multiple_cinder_stores
- else
+ elif ! is_service_enabled s-proxy && [[ "$GLANCE_USE_S3" == "False" ]]; then
# Configure multiple file stores for glance
configure_multiple_file_stores
fi
@@ -326,6 +367,7 @@
iniset $GLANCE_API_CONF database connection $dburl
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+ iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR
iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
@@ -337,6 +379,7 @@
# Only use these if you know what you are doing! See OSSN-0065
iniset $GLANCE_API_CONF DEFAULT show_image_direct_url $GLANCE_SHOW_DIRECT_URL
iniset $GLANCE_API_CONF DEFAULT show_multiple_locations $GLANCE_SHOW_MULTIPLE_LOCATIONS
+ iniset $GLANCE_API_CONF image_format require_image_format_match $GLANCE_ENFORCE_IMAGE_FORMAT
# Configure glance_store
configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES
@@ -350,8 +393,15 @@
# No multiple stores for swift yet
if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then
- # Store the images in swift if enabled.
- if is_service_enabled s-proxy; then
+ # Return if s3api is enabled for glance
+ if [[ "$GLANCE_USE_S3" == "True" ]]; then
+ if is_service_enabled s3api; then
+ # set common glance_store parameters
+ iniset $GLANCE_API_CONF glance_store stores "s3,file,http"
+ iniset $GLANCE_API_CONF glance_store default_store s3
+ fi
+ elif is_service_enabled s-proxy; then
+ # Store the images in swift if enabled.
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
@@ -369,6 +419,12 @@
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
fi
+ else
+ if [[ "$GLANCE_USE_S3" == "True" ]]; then
+ if is_service_enabled s3api; then
+ configure_multiple_s3_stores
+ fi
+ fi
fi
# We need to tell glance what it's public endpoint is so that the version
@@ -389,6 +445,7 @@
iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+ iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
@@ -426,6 +483,7 @@
iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+ iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL
fi
if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
@@ -472,6 +530,13 @@
configure_glance_quotas
fi
+ if is_service_enabled s3api && [[ "$GLANCE_USE_S3" == "True" ]]; then
+ if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then
+ set_common_s3_store_params glance_store
+ else
+ set_common_s3_store_params $GLANCE_S3_DEFAULT_BACKEND
+ fi
+ fi
fi
}
@@ -540,7 +605,7 @@
# start_glance_remote_clone() - Clone the regular glance api worker
function start_glance_remote_clone {
local glance_remote_conf_dir glance_remote_port remote_data
- local glance_remote_uwsgi
+ local glance_remote_uwsgi venv
glance_remote_conf_dir="$(glance_remote_conf "")"
glance_remote_port=$(get_random_port)
@@ -578,12 +643,16 @@
# We need to create the systemd service for the clone, but then
# change it to include an Environment line to point the WSGI app
# at the alternate config directory.
+ if [[ "$GLOBAL_VENV" == True ]]; then
+ venv="--venv $DEVSTACK_VENV"
+ fi
write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \
--procname-prefix \
glance-api-remote \
- --ini $glance_remote_uwsgi" \
+ --ini $glance_remote_uwsgi \
+ $venv" \
"" "$STACK_USER"
- iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
+ iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
"Service" "Environment" \
"OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir"
diff --git a/lib/horizon b/lib/horizon
index f76f9e5..7c0d443 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -109,12 +109,21 @@
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True"
fi
+ if is_service_enabled c-bak; then
+ _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True"
+ fi
+
# Create an empty directory that apache uses as docroot
sudo mkdir -p $HORIZON_DIR/.blackhole
local horizon_conf
horizon_conf=$(apache_site_config_for horizon)
+ local wsgi_venv_config=""
+ if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV"
+ fi
+
# Configure apache to run horizon
# Set up the django horizon application to serve via apache/wsgi
sudo sh -c "sed -e \"
@@ -124,6 +133,7 @@
s,%APACHE_NAME%,$APACHE_NAME,g;
s,%DEST%,$DEST,g;
s,%WEBROOT%,$HORIZON_APACHE_ROOT,g;
+ s,%WSGIPYTHONHOME%,$wsgi_venv_config,g;
\" $FILES/apache-horizon.template >$horizon_conf"
if is_ubuntu; then
@@ -163,6 +173,10 @@
# Apache installation, because we mark it NOPRIME
install_apache_wsgi
+ # Install the memcache library so that horizon can use memcached as its
+ # cache backend
+ pip_install_gr pymemcache
+
git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH
}
diff --git a/lib/host b/lib/host
new file mode 100644
index 0000000..a812c39
--- /dev/null
+++ b/lib/host
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Kernel Samepage Merging (KSM)
+# -----------------------------
+
+# Processes that mark their memory as mergeable can share identical memory
+# pages if KSM is enabled. This is particularly useful for nova + libvirt
+# backends but any other setup that marks its memory as mergeable can take
+# advantage. The drawback is there is higher cpu load; however, we tend to
+# be memory bound not cpu bound so enable KSM by default but allow people
+# to opt out if the CPU time is more important to them.
+ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
+ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED)
+function configure_ksm {
+ if [[ $ENABLE_KSMTUNED == "True" ]] ; then
+ install_package "ksmtuned"
+ fi
+ if [[ -f /sys/kernel/mm/ksm/run ]] ; then
+ echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run
+ fi
+}
+
+# Compressed swap (ZSWAP)
+#------------------------
+
+# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html
+# Zswap is a lightweight compressed cache for swap pages.
+# It takes pages that are in the process of being swapped out and attempts
+# to compress them into a dynamically allocated RAM-based memory pool.
+# zswap basically trades CPU cycles for potentially reduced swap I/O.
+# This trade-off can also result in a significant performance improvement
+# if reads from the compressed cache are faster than reads from a swap device.
+
+ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP)
+# lz4 is very fast although it does not have the best compression
+# zstd has much better compression but more latency
+ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"}
+ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"}
+function configure_zswap {
+ if [[ $ENABLE_ZSWAP == "True" ]] ; then
+ # Centos 9 stream seems to only support enabling but not run time
+ # tuning so dont try to choose better default on centos
+ if is_ubuntu; then
+ echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor
+ echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool
+ fi
+ echo 1 | sudo tee /sys/module/zswap/parameters/enabled
+ # print curent zswap kernel config
+ sudo grep -R . /sys/module/zswap/parameters || /bin/true
+ fi
+}
+
+ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING)
+function configure_sysctl_mem_parmaters {
+ if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then
+ # defer write when memory is available
+ sudo sysctl -w vm.dirty_ratio=60
+ sudo sysctl -w vm.dirty_background_ratio=10
+ sudo sysctl -w vm.vfs_cache_pressure=50
+ # assume swap is compressed so on new kernels
+ # give it equal priority as page cache which is
+ # uncompressed. on kernels < 5.8 the max is 100
+ # not 200 so it will strongly prefer swapping.
+ sudo sysctl -w vm.swappiness=100
+ sudo grep -R . /proc/sys/vm/ || /bin/true
+ fi
+}
+
+function configure_host_mem {
+ configure_zswap
+ configure_ksm
+ configure_sysctl_mem_parmaters
+}
+
+ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING)
+function configure_sysctl_net_parmaters {
+ if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then
+ # detect dead TCP connections after 120 seconds
+ sudo sysctl -w net.ipv4.tcp_keepalive_time=60
+ sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10
+ sudo sysctl -w net.ipv4.tcp_keepalive_probes=6
+ # reudce network latency for new connections
+ sudo sysctl -w net.ipv4.tcp_fastopen=3
+ # print tcp options
+ sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true
+ # disable qos by default
+ sudo sysctl -w net.core.default_qdisc=pfifo_fast
+ fi
+}
+
+function configure_host_net {
+ configure_sysctl_net_parmaters
+}
+
+function tune_host {
+ configure_host_mem
+ configure_host_net
+}
diff --git a/lib/infra b/lib/infra
index b983f2b..f4760c3 100644
--- a/lib/infra
+++ b/lib/infra
@@ -31,7 +31,7 @@
local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
[ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
# We don't care about testing git pbr in the requirements venv.
- PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
+ PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools[core]
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
# Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped
diff --git a/lib/keystone b/lib/keystone
index 6cb4aac..76e2598 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -49,16 +49,7 @@
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
KEYSTONE_PUBLIC_UWSGI_CONF=$KEYSTONE_CONF_DIR/keystone-uwsgi-public.ini
-KEYSTONE_PUBLIC_UWSGI=$KEYSTONE_BIN_DIR/keystone-wsgi-public
-
-# KEYSTONE_DEPLOY defines how keystone is deployed, allowed values:
-# - mod_wsgi : Run keystone under Apache HTTPd mod_wsgi
-# - uwsgi : Run keystone under uwsgi
-if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- KEYSTONE_DEPLOY=uwsgi
-else
- KEYSTONE_DEPLOY=mod_wsgi
-fi
+KEYSTONE_PUBLIC_UWSGI=keystone.wsgi.api:application
# Select the Identity backend driver
KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql}
@@ -144,15 +135,9 @@
# cleanup_keystone() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_keystone {
- if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
- # These files will be created if we are running WSGI_MODE="mod_wsgi"
- disable_apache_site keystone
- sudo rm -f $(apache_site_config_for keystone)
- else
- stop_process "keystone"
- remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- sudo rm -f $(apache_site_config_for keystone-wsgi-public)
- fi
+ stop_process "keystone"
+ remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public"
+ sudo rm -f $(apache_site_config_for keystone-wsgi-public)
}
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
@@ -241,12 +226,7 @@
iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
- iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s"
- _config_keystone_apache_wsgi
- else # uwsgi
- write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity"
- fi
+ write_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI" "/identity" "" "keystone-api"
iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
@@ -543,10 +523,6 @@
if is_service_enabled ldap; then
setup_develop $KEYSTONE_DIR ldap
fi
-
- if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
- install_apache_wsgi
- fi
}
# start_keystone() - Start running processes
@@ -559,12 +535,7 @@
auth_protocol="http"
fi
- if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
- enable_apache_site keystone
- restart_apache_server
- else # uwsgi
- run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
- fi
+ run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
echo "Waiting for keystone to start..."
# Check that the keystone service is running. Even if the tls tunnel
@@ -589,12 +560,7 @@
# stop_keystone() - Stop running processes
function stop_keystone {
- if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
- disable_apache_site keystone
- restart_apache_server
- else
- stop_process keystone
- fi
+ stop_process keystone
}
# bootstrap_keystone() - Initialize user, role and project
diff --git a/lib/libraries b/lib/libraries
index 9ea3230..9d5d655 100755
--- a/lib/libraries
+++ b/lib/libraries
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# lib/oslo
+# lib/libraries
#
# Functions to install libraries from git
#
diff --git a/lib/lvm b/lib/lvm
index 57d2cd4..b7e84d9 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -137,15 +137,17 @@
# Start with a clean volume group
_create_lvm_volume_group $vg $size
- # Remove iscsi targets
- if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
- sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
- elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
- sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
- elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
- # If we don't disconnect everything vgremove will block
- sudo nvme disconnect-all
- sudo nvmetcli clear
+ if is_service_enabled cinder; then
+ # Remove iscsi targets
+ if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
+ sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
+ elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
+ sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
+ elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+ # If we don't disconnect everything vgremove will block
+ sudo nvme disconnect-all
+ sudo nvmetcli clear
+ fi
fi
_clean_lvm_volume_group $vg
}
@@ -198,7 +200,7 @@
filter_string=$filter_string$filter_suffix
clean_lvm_filter
- sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf
+ sudo sed -i "/# global_filter = \[.*\]/a\ $filter_string" /etc/lvm/lvm.conf
echo_summary "set lvm.conf device global_filter to: $filter_string"
}
diff --git a/lib/neutron b/lib/neutron
index 368a1b9..bcef8a5 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -88,12 +88,14 @@
# enough
NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
+NEUTRON_UWSGI=neutron.wsgi.api:application
NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+# of the new RBAC policies and scopes. Set it to False if you do not
+# want to run Neutron with new RBAC.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE)
# Agent binaries. Note, binary paths for other agents are set in per-service
# scripts in lib/neutron_plugins/services/
@@ -157,6 +159,14 @@
NEUTRON_ENDPOINT_SERVICE_NAME="networking"
fi
+# Source install libraries
+ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git}
+ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic}
+ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main}
+SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git}
+SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy}
+SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main}
+
# List of config file names in addition to the main plugin config file
# To add additional plugin config files, use ``neutron_server_config_add``
# utility function. For example:
@@ -303,6 +313,11 @@
Q_USE_SECGROUP=False
fi
+# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings
+# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3
+# which initialize PUBLIC_BRIDGE.
+OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE}
+
# Save trace setting
_XTRACE_NEUTRON=$(set +o | grep xtrace)
set +o xtrace
@@ -355,6 +370,24 @@
echo "$opts"
}
+function _enable_ovn_maintenance {
+ if [[ $Q_AGENT == "ovn" ]]; then
+ enable_service neutron-ovn-maintenance-worker
+ fi
+}
+
+function _run_ovn_maintenance {
+ if [[ $Q_AGENT == "ovn" ]]; then
+ run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options"
+ fi
+}
+
+function _stop_ovn_maintenance {
+ if [[ $Q_AGENT == "ovn" ]]; then
+ stop_process neutron-ovn-maintenance-worker
+ fi
+}
+
# For services and agents that require it, dynamically construct a list of
# --config-file arguments that are passed to the binary.
function determine_config_files {
@@ -434,7 +467,7 @@
iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
+ write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api"
fi
}
@@ -453,7 +486,7 @@
local conf=${1:-$NOVA_CONF}
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf neutron username "$Q_ADMIN_USERNAME"
+ iniset $conf neutron username nova
iniset $conf neutron password "$SERVICE_PASSWORD"
iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
@@ -519,6 +552,17 @@
setup_dev_lib "neutron-lib"
fi
+ # Install SQLAlchemy and alembic from git when these are required
+ # see https://bugs.launchpad.net/neutron/+bug/2042941
+ if use_library_from_git "sqlalchemy"; then
+ git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH
+ setup_develop $SQLALCHEMY_DIR
+ fi
+ if use_library_from_git "alembic"; then
+ git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH
+ setup_develop $ALEMBIC_DIR
+ fi
+
git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
setup_develop $NEUTRON_DIR
@@ -570,8 +614,15 @@
# Start running OVN processes
function start_ovn_services {
if [[ $Q_AGENT == "ovn" ]]; then
- init_ovn
- start_ovn
+ if [ "$VIRT_DRIVER" != 'ironic' ]; then
+ # NOTE(TheJulia): Ironic's devstack plugin needs to perform
+ # additional networking configuration to setup a working test
+ # environment with test virtual machines to emulate baremetal,
+ # which requires OVN to be up and running earlier to complete
+ # that base configuration.
+ init_ovn
+ start_ovn
+ fi
if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
@@ -596,13 +647,28 @@
service_port=$Q_PORT_INT
service_protocol="http"
fi
+
# Start the Neutron service
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ # The default value of "rpc_workers" is None (not defined). If
+ # "rpc_workers" is explicitly set to 0, the RPC workers process
+ # should not be executed.
+ local rpc_workers
+ rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers)
+
enable_service neutron-api
run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
neutron_url=$Q_PROTOCOL://$Q_HOST/
- enable_service neutron-rpc-server
- run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+ if [ "$rpc_workers" != "0" ]; then
+ enable_service neutron-rpc-server
+ fi
+ enable_service neutron-periodic-workers
+ _enable_ovn_maintenance
+ if [ "$rpc_workers" != "0" ]; then
+ run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+ fi
+ run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options"
+ _run_ovn_maintenance
else
run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
neutron_url=$service_protocol://$Q_HOST:$service_port/
@@ -674,7 +740,9 @@
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
stop_process neutron-rpc-server
+ stop_process neutron-periodic-workers
stop_process neutron-api
+ _stop_ovn_maintenance
else
stop_process q-svc
fi
@@ -791,7 +859,9 @@
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
stop_process neutron-api
stop_process neutron-rpc-server
- remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+ stop_process neutron-periodic-workers
+ _stop_ovn_maintenance
+ remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api"
sudo rm -f $(apache_site_config_for neutron-api)
fi
@@ -990,7 +1060,7 @@
Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
- if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
_replace_api_paste_composite
fi
@@ -1075,7 +1145,10 @@
sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
fi
sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
- sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
+ # Rely on $PATH set by devstack to determine what is safe to execute
+ # by rootwrap rather than use explicit whitelist of paths in
+ # rootwrap.conf
+ sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE
# Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
@@ -1112,24 +1185,6 @@
# Functions for Neutron Exercises
#--------------------------------
-function delete_probe {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
- neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
-function _get_net_id {
- openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
-}
-
-function _get_probe_cmd_prefix {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
- echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
-}
-
# ssh check
function _ssh_check_neutron {
local from_net=$1
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 3526ccd..be3a9e7 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -91,9 +91,14 @@
# http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt
OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
+# OVN metadata agent configuration
OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
+# OVN agent configuration
+OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini
+OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-}
+
# If True (default) the node will be considered a gateway node.
ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW)
OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
@@ -132,6 +137,7 @@
NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix)
NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent"
+NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent"
STACK_GROUP="$( id --group --name "$STACK_USER" )"
@@ -288,7 +294,7 @@
function create_public_bridge {
# Create the public bridge that OVN will use
sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15
- sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE
+ sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS}
_configure_public_network_connectivity
}
@@ -334,8 +340,24 @@
./boot.sh
fi
+ # NOTE(mnaser): OVN requires that you build using the OVS from the
+ # submodule.
+ #
+ # https://github.com/ovn-org/ovn/blob/3fb397b63663297acbcbf794e1233951222ae5af/Documentation/intro/install/general.rst#bootstrapping
+ # https://github.com/ovn-org/ovn/issues/128
+ git submodule update --init
+ pushd ovs
+ if [ ! -f configure ] ; then
+ ./boot.sh
+ fi
if [ ! -f config.status ] || [ configure -nt config.status ] ; then
- ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir
+ ./configure
+ fi
+ make -j$(($(nproc) + 1))
+ popd
+
+ if [ ! -f config.status ] || [ configure -nt config.status ] ; then
+ ./configure $prefix $localstatedir
fi
make -j$(($(nproc) + 1))
sudo make install
@@ -487,6 +509,8 @@
if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
+ elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
else
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
fi
@@ -508,6 +532,8 @@
if is_service_enabled n-api-meta ; then
if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
iniset $NOVA_CONF neutron service_metadata_proxy True
+ elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
+ iniset $NOVA_CONF neutron service_metadata_proxy True
fi
fi
}
@@ -539,29 +565,42 @@
fi
# Metadata
- if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
+ local sample_file=""
+ local config_file=""
+ if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then
+ sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample
+ config_file=$OVN_AGENT_CONF
+ elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
+ sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample
+ config_file=$OVN_META_CONF
+ fi
+ if [ -n "$config_file" ]; then
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
(cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
- cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF
- configure_root_helper_options $OVN_META_CONF
+ cp $sample_file $config_file
+ configure_root_helper_options $config_file
- iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
- iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
- iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron
- iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
- iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
+ iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST
+ iniset $config_file DEFAULT metadata_workers $API_WORKERS
+ iniset $config_file DEFAULT state_path $DATA_DIR/neutron
+ iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
+ iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE
if is_service_enabled tls-proxy; then
- iniset $OVN_META_CONF ovn \
+ iniset $config_file ovn \
ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem
- iniset $OVN_META_CONF ovn \
+ iniset $config_file ovn \
ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt
- iniset $OVN_META_CONF ovn \
+ iniset $config_file ovn \
ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key
fi
+ if [[ $config_file == $OVN_AGENT_CONF ]]; then
+ iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS
+ iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE
+ fi
fi
}
@@ -684,6 +723,9 @@
if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then
_start_process "devstack@q-ovn-metadata-agent.service"
fi
+ if is_service_enabled q-ovn-agent neutron-ovn-agent ; then
+ _start_process "devstack@q-ovn-agent.service"
+ fi
}
# start_ovn() - Start running processes, including screen
@@ -750,6 +792,12 @@
setup_logging $OVN_META_CONF
fi
+ if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+ run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF"
+ # Format logging
+ setup_logging $OVN_AGENT_CONF
+ fi
+
_start_ovn_services
}
@@ -774,6 +822,12 @@
sudo pkill -9 -f "[h]aproxy" || :
_stop_process "devstack@q-ovn-metadata-agent.service"
fi
+ if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+ # pkill takes care not to kill itself, but it may kill its parent
+ # sudo unless we use the "ps | grep [f]oo" trick
+ sudo pkill -9 -f "[h]aproxy" || :
+ _stop_process "devstack@q-ovn-agent.service"
+ fi
if is_service_enabled ovn-controller-vtep ; then
_stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
fi
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 2bf884a..c6d4663 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -47,7 +47,8 @@
# used for the network. In case of ofagent, you should add the
# corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS.
# For openvswitch agent, you should add the corresponding entry to
-# your OVS_BRIDGE_MAPPINGS.
+# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry
+# to your OVN_BRIDGE_MAPPINGS.
#
# eg. (ofagent)
# Q_USE_PROVIDERNET_FOR_PUBLIC=True
@@ -60,6 +61,11 @@
# PUBLIC_PHYSICAL_NETWORK=public
# OVS_BRIDGE_MAPPINGS=public:br-ex
#
+# eg. (ovn agent)
+# Q_USER_PROVIDERNET_FOR_PUBLIC=True
+# PUBLIC_PHYSICAL_NETWORK=public
+# OVN_BRIDGE_MAPPINGS=public:br-ex
+#
# The provider-network-type defaults to flat, however, the values
# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could
# be set to specify the parameters for an alternate network type.
diff --git a/lib/nova b/lib/nova
index f5f002d..95ed4d0 100644
--- a/lib/nova
+++ b/lib/nova
@@ -53,11 +53,19 @@
NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
NOVA_API_DB=${NOVA_API_DB:-nova_api}
-NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
-NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
+NOVA_UWSGI=nova.wsgi.osapi_compute:application
+NOVA_METADATA_UWSGI=nova.wsgi.metadata:application
NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini
NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini
+# Allow forcing the stable compute uuid to something specific. This would be
+# done by deployment tools that pre-allocate the UUIDs, but it is also handy
+# for developers that need to re-stack a compute-only deployment multiple
+# times. Since the DB is non-local and not erased on an unstack, making it
+# stay the same each time is what developers want. Set to a uuid here or
+# leave it blank for default allocate-on-start behavior.
+NOVA_CPU_UUID=""
+
# The total number of cells we expect. Must be greater than one and doesn't
# count cell0.
NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1}
@@ -67,13 +75,10 @@
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
-# Toggle for deploying Nova-API under a wsgi server. We default to
-# true to use UWSGI, but allow False so that fall back to the
-# eventlet server can happen for grenade runs.
-# NOTE(cdent): We can adjust to remove the eventlet-base api service
-# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to
-# mean "use uwsgi" because we'll be always using uwsgi.
-NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True}
+# We do not need to report service status every 10s for devstack-like
+# deployments. In the gate this generates extra work for the services and the
+# database which are already taxed.
+NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120}
if is_service_enabled tls-proxy; then
NOVA_SERVICE_PROTOCOL="https"
@@ -98,10 +103,10 @@
NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
-# This is used to switch the compute API policies enable the scope and new defaults.
-# By Default, these flag are False.
+# This is used to disable the compute API policies scope and new defaults.
+# By Default, it is True.
# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
-NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE)
+NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE)
if [[ $SERVICE_IP_VERSION == 6 ]]; then
NOVA_MY_IP="$HOST_IPV6"
@@ -115,7 +120,7 @@
# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
# the default filters.
-NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
QEMU_CONF=/etc/libvirt/qemu.conf
@@ -160,6 +165,9 @@
# Whether to use Keystone unified limits instead of legacy quota limits.
NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS)
+# TB Cache Size in MiB for qemu guests
+NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0}
+
# Functions
# ---------
@@ -235,8 +243,8 @@
stop_process "n-api"
stop_process "n-api-meta"
- remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
- remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
+ remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api"
+ remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata"
if [[ "$NOVA_BACKEND" == "LVM" ]]; then
clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME
@@ -377,11 +385,7 @@
create_service_user "nova" "admin"
local nova_api_url
- if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
- nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT"
- else
- nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
- fi
+ nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
get_or_create_service "nova_legacy" "compute_legacy" "Nova Compute Service (Legacy 2.0)"
get_or_create_endpoint \
@@ -448,6 +452,9 @@
iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
+ iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL
+ iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6))
+
if is_fedora; then
# nova defaults to /usr/local/bin, but fedora pip like to
# install things in /usr/bin
@@ -494,11 +501,6 @@
iniset $NOVA_CONF oslo_policy enforce_new_defaults False
iniset $NOVA_CONF oslo_policy enforce_scope False
fi
- if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
- # Set the service port for a proxy to take the original
- iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
- iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
- fi
configure_keystone_authtoken_middleware $NOVA_CONF nova
fi
@@ -537,11 +539,11 @@
iniset $NOVA_CONF upgrade_levels compute "auto"
if is_service_enabled n-api; then
- write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+ write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api"
fi
if is_service_enabled n-api-meta; then
- write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+ write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata"
fi
if is_service_enabled ceilometer; then
@@ -625,7 +627,7 @@
local conf=${1:-$NOVA_CONF}
iniset $conf placement auth_type "password"
iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf placement username placement
+ iniset $conf placement username nova
iniset $conf placement password "$SERVICE_PASSWORD"
iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
iniset $conf placement project_name "$SERVICE_TENANT_NAME"
@@ -995,17 +997,8 @@
local old_path=$PATH
export PATH=$NOVA_BIN_DIR:$PATH
- if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
- run_process n-api "$NOVA_BIN_DIR/nova-api"
- nova_url=$service_protocol://$SERVICE_HOST:$service_port
- # Start proxy if tsl enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
- fi
- else
- run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
- nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
- fi
+ run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
+ nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
echo "Waiting for nova-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then
@@ -1052,6 +1045,10 @@
# by the compute process.
configure_console_compute
+ # Set rebuild timeout longer for BFV instances because we likely have
+ # slower disk than expected. Default is 20s/GB
+ iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180
+
# Configure the OVSDB connection for os-vif
if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
@@ -1062,7 +1059,14 @@
iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True
fi
+ if [[ "$NOVA_CPU_UUID" ]]; then
+ echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id
+ fi
+
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+ if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then
+ iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE}
+ fi
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# ``sg`` is used in run_process to execute nova-compute as a member of the
# **$LIBVIRT_GROUP** group.
@@ -1100,11 +1104,7 @@
local compute_cell_conf=$NOVA_CONF
run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
- if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
- run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
- else
- run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
- fi
+ run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
export PATH=$old_path
}
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 7992306..ba2e98e 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -69,7 +69,7 @@
$REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python
if is_ubuntu; then
- install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt
+ install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump
if is_arch "aarch64"; then
install_package qemu-efi
fi
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index f058e9b..9a39c79 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -53,6 +53,10 @@
iniset $NOVA_CONF ironic project_domain_id default
iniset $NOVA_CONF ironic project_name demo
fi
+ if is_ironic_sharded; then
+ iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME
+ fi
+
iniset $NOVA_CONF ironic user_domain_id default
iniset $NOVA_CONF ironic region_name $REGION_NAME
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 87c3d3a..4b44c1f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -56,6 +56,10 @@
# arm64-specific configuration
if is_arch "aarch64"; then
iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
+ # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is
+ # set to `host-passthrough`, or `nova-compute` refuses to
+ # start.
+ inidelete $NOVA_CONF libvirt cpu_model
fi
if isset ENABLE_FILE_INJECTION; then
diff --git a/lib/oslo b/lib/oslo
deleted file mode 100644
index 3ae64c8..0000000
--- a/lib/oslo
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-#
-# lib/oslo
-#
-# Functions to install **Oslo** libraries from git
-#
-# We need this to handle the fact that projects would like to use
-# pre-released versions of oslo libraries.
-#
-# Included for compatibility with grenade, remove in Queens
-source $TOP_DIR/lib/libraries
diff --git a/lib/placement b/lib/placement
index c6bf99f..03aaa03 100644
--- a/lib/placement
+++ b/lib/placement
@@ -37,7 +37,7 @@
else
PLACEMENT_BIN_DIR=$(get_python_exec_prefix)
fi
-PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api
+PLACEMENT_UWSGI=placement.wsgi.api:application
PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini
if is_service_enabled tls-proxy; then
@@ -68,33 +68,7 @@
# runs that a clean run would need to clean up
function cleanup_placement {
sudo rm -f $(apache_site_config_for placement-api)
- remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
-}
-
-# _config_placement_apache_wsgi() - Set WSGI config files
-function _config_placement_apache_wsgi {
- local placement_api_apache_conf
- local venv_path=""
- local placement_bin_dir=""
- placement_bin_dir=$(get_python_exec_prefix)
- placement_api_apache_conf=$(apache_site_config_for placement-api)
-
- if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages"
- placement_bin_dir=${PROJECT_VENV["placement"]}/bin
- fi
-
- sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
- sudo sed -e "
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g;
- s|%SSLENGINE%|$placement_ssl|g;
- s|%SSLCERTFILE%|$placement_certfile|g;
- s|%SSLKEYFILE%|$placement_keyfile|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- s|%APIWORKERS%|$API_WORKERS|g
- " -i $placement_api_apache_conf
+ remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api"
}
# create_placement_conf() - Write config
@@ -112,11 +86,7 @@
sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR
create_placement_conf
- if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement"
- else
- _config_placement_apache_wsgi
- fi
+ write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement" "" "placement-api"
if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True
iniset $PLACEMENT_CONF oslo_policy enforce_scope True
@@ -147,7 +117,6 @@
# install_placement() - Collect source and prepare
function install_placement {
- install_apache_wsgi
# Install the openstackclient placement client plugin for CLI
pip_install_gr osc-placement
git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH
@@ -156,12 +125,7 @@
# start_placement_api() - Start the API processes ahead of other things
function start_placement_api {
- if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
- else
- enable_apache_site placement-api
- restart_apache_server
- fi
+ run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
echo "Waiting for placement-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then
@@ -175,12 +139,7 @@
# stop_placement() - Disable the api service and stop it.
function stop_placement {
- if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- stop_process "placement-api"
- else
- disable_apache_site placement-api
- restart_apache_server
- fi
+ stop_process "placement-api"
}
# Restore xtrace
diff --git a/lib/swift b/lib/swift
index 1ebf073..3659624 100644
--- a/lib/swift
+++ b/lib/swift
@@ -844,14 +844,14 @@
function swift_configure_tempurls {
# note we are using swift credentials!
- openstack --os-cloud "" \
- --os-region-name $REGION_NAME \
- --os-auth-url $KEYSTONE_SERVICE_URI \
- --os-username=swift \
- --os-password=$SERVICE_PASSWORD \
- --os-user-domain-name=$SERVICE_DOMAIN_NAME \
- --os-project-name=$SERVICE_PROJECT_NAME \
- --os-project-domain-name=$SERVICE_DOMAIN_NAME \
+ openstack --os-cloud="" \
+ --os-region-name="$REGION_NAME" \
+ --os-auth-url="$KEYSTONE_SERVICE_URI" \
+ --os-username="swift" \
+ --os-password="$SERVICE_PASSWORD" \
+ --os-user-domain-name="$SERVICE_DOMAIN_NAME" \
+ --os-project-name="$SERVICE_PROJECT_NAME" \
+ --os-project-domain-name="$SERVICE_DOMAIN_NAME" \
object store account \
set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
}
diff --git a/lib/tempest b/lib/tempest
index 7da9f17..eeeef67 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -18,7 +18,7 @@
# - ``PUBLIC_NETWORK_NAME``
# - ``VIRT_DRIVER``
# - ``LIBVIRT_TYPE``
-# - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone
+# - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone
#
# Optional Dependencies:
#
@@ -29,6 +29,7 @@
# - ``DEFAULT_INSTANCE_USER``
# - ``DEFAULT_INSTANCE_ALT_USER``
# - ``CINDER_ENABLED_BACKENDS``
+# - ``CINDER_BACKUP_DRIVER``
# - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
#
# ``stack.sh`` calls the entry points in this order:
@@ -101,6 +102,9 @@
# it will run tempest with
TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
+TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192}
+TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256}
+
# Functions
# ---------
@@ -149,11 +153,10 @@
# ramdisk and kernel images. Takes 3 arguments, an array and two
# variables. The array will contain the list of active image UUIDs;
# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
-# set as the value of *both* other parameters.
+# set as the value img_id ($2) parameters.
function get_active_images {
declare -n img_array=$1
declare -n img_id=$2
- declare -n img_id_alt=$3
# start with a fresh array in case we are called multiple times
img_array=()
@@ -161,7 +164,6 @@
while read -r IMAGE_NAME IMAGE_UUID; do
if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
img_id="$IMAGE_UUID"
- img_id_alt="$IMAGE_UUID"
fi
img_array+=($IMAGE_UUID)
done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
@@ -170,13 +172,12 @@
function poll_glance_images {
declare -n image_array=$1
declare -n image_id=$2
- declare -n image_id_alt=$3
local -i poll_count
poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
while (( poll_count-- > 0 )) ; do
sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
- get_active_images image_array image_id image_id_alt
+ get_active_images image_array image_id
if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
return
fi
@@ -228,7 +229,7 @@
declare -a images
if is_service_enabled glance; then
- get_active_images images image_uuid image_uuid_alt
+ get_active_images images image_uuid
if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
# Glance image import is asynchronous and may be configured
@@ -236,7 +237,7 @@
# it's possible that this code is being executed before the
# import has completed and there may be no active images yet.
if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
- poll_glance_images images image_uuid image_uuid_alt
+ poll_glance_images images image_uuid
if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
exit 1
@@ -252,13 +253,22 @@
1)
if [ -z "$image_uuid" ]; then
image_uuid=${images[0]}
- image_uuid_alt=${images[0]}
fi
+ image_uuid_alt=$image_uuid
;;
*)
if [ -z "$image_uuid" ]; then
image_uuid=${images[0]}
- image_uuid_alt=${images[1]}
+ if [ -z "$image_uuid_alt" ]; then
+ image_uuid_alt=${images[1]}
+ fi
+ elif [ -z "$image_uuid_alt" ]; then
+ for image in ${images[@]}; do
+ if [[ "$image" != "$image_uuid" ]]; then
+ image_uuid_alt=$image
+ break
+ fi
+ done
fi
;;
esac
@@ -288,13 +298,15 @@
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
# Determine the flavor disk size based on the image size.
disk=$(image_size_in_gib $image_uuid)
- openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
+ ram=${TEMPEST_FLAVOR_RAM}
+ openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
fi
flavor_ref=42
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
# Determine the alt flavor disk size based on the alt image size.
disk=$(image_size_in_gib $image_uuid_alt)
- openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
+ ram=${TEMPEST_FLAVOR_ALT_RAM}
+ openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
fi
flavor_ref_alt=84
else
@@ -345,6 +357,20 @@
fi
fi
+ if is_service_enabled glance; then
+ git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH
+ pushd $OSTESTIMAGES_DIR
+ tox -egenerate
+ popd
+ iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml
+ local image_conversion
+ image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format)
+ if [[ -n "$image_conversion" ]]; then
+ iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True
+ fi
+ iniset $TEMPEST_CONFIG image-feature-enabled image_format_enforcement $GLANCE_ENFORCE_IMAGE_FORMAT
+ fi
+
iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE
ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
@@ -374,7 +400,6 @@
iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT
# Identity
- iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/"
iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3"
iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION
@@ -385,19 +410,7 @@
iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name
iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name
fi
- if [ "$ENABLE_IDENTITY_V2" == "True" ]; then
- # Run Identity API v2 tests ONLY if needed
- iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True
- else
- # Skip Identity API v2 tests by default
- iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
- fi
iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3}
- if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then
- # we're going to disable v2 admin unless we're using v2 by default.
- iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False
- fi
-
if is_service_enabled tls-proxy; then
iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE
fi
@@ -517,8 +530,19 @@
# Scenario
SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
+ SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros}
iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE
+ # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the
+ # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default
+ if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then
+ # the image is a cirros image
+ # use dhcpcd client when version greater or equal 0.6.0
+ if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then
+ iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd
+ fi
+ fi
+
# If using provider networking, use the physical network for validation rather than private
TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
if is_provider_network; then
@@ -554,6 +578,9 @@
TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
fi
iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
+ if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then
+ iniset $TEMPEST_CONFIG volume backup_driver swift
+ fi
local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
if [ "$tempest_volume_min_microversion" == "None" ]; then
@@ -681,8 +708,6 @@
# test can be run with scoped token.
if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $TEMPEST_CONFIG enforce_scope keystone true
- iniset $TEMPEST_CONFIG auth admin_system 'all'
- iniset $TEMPEST_CONFIG auth admin_project_name ''
fi
if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
diff --git a/lib/tls b/lib/tls
index e0c7500..0a598e1 100644
--- a/lib/tls
+++ b/lib/tls
@@ -364,8 +364,11 @@
function fix_system_ca_bundle_path {
if is_service_enabled tls-proxy; then
local capath
- capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
-
+ if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+ else
+ capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+ fi
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
if is_fedora; then
sudo rm -f $capath
@@ -527,6 +530,7 @@
<VirtualHost $f_host:$f_port>
SSLEngine On
SSLCertificateFile $DEVSTACK_CERT
+ SSLProtocol -all +TLSv1.3 +TLSv1.2
# Disable KeepAlive to fix bug #1630664 a.k.a the
# ('Connection aborted.', BadStatusLine("''",)) error
@@ -540,9 +544,11 @@
# Avoid races (at the cost of performance) to re-use a pooled connection
# where the connection is closed (bug 1807518).
+ # Set acquire=1 to disable waiting for connection pool members so that
+ # we can determine when apache is overloaded (returns 503).
SetEnv proxy-initial-not-pooled
<Location />
- ProxyPass http://$b_host:$b_port/ retry=0 nocanon
+ ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1
ProxyPassReverse http://$b_host:$b_port/
</Location>
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
diff --git a/openrc b/openrc
index 6d488bb..5ec7634 100644
--- a/openrc
+++ b/openrc
@@ -7,9 +7,6 @@
# Set OS_USERNAME to override the default user name 'demo'
# Set ADMIN_PASSWORD to set the password for 'admin' and 'demo'
-# NOTE: support for the old NOVA_* novaclient environment variables has
-# been removed.
-
if [[ -n "$1" ]]; then
OS_USERNAME=$1
fi
@@ -35,26 +32,11 @@
# Get some necessary configuration
source $RC_DIR/lib/tls
-# The OpenStack ecosystem has standardized the term **project** as the
-# entity that owns resources. In some places **tenant** remains
-# referenced, but in all cases this just means **project**. We will
-# warn if we need to turn on legacy **tenant** support to have a
-# working environment.
+# Minimal configuration
+export OS_AUTH_TYPE=password
export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo}
-
-echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools."
-export OS_TENANT_NAME=$OS_PROJECT_NAME
-
-# In addition to the owning entity (project), nova stores the entity performing
-# the action as the **user**.
export OS_USERNAME=${OS_USERNAME:-demo}
-
-# With Keystone you pass the keystone password instead of an api key.
-# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs
-# or NOVA_PASSWORD.
export OS_PASSWORD=${ADMIN_PASSWORD:-secret}
-
-# Region
export OS_REGION_NAME=${REGION_NAME:-RegionOne}
# Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION
@@ -73,30 +55,14 @@
GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
fi
-# Identity API version
-export OS_IDENTITY_API_VERSION=3
-
-# Ask keystoneauth1 to use keystone
-export OS_AUTH_TYPE=password
-
-# Authenticating against an OpenStack cloud using Keystone returns a **Token**
-# and **Service Catalog**. The catalog contains the endpoints for all services
-# the user/project has access to - including nova, glance, keystone, swift, ...
-# We currently recommend using the version 3 *identity api*.
-#
-
# If you don't have a working .stackenv, this is the backup position
KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP}
export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI}
-# Currently, in order to use openstackclient with Identity API v3,
-# we need to set the domain which the user and project belong to.
-if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then
- export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"}
- export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"}
-fi
+export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"}
+export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"}
# Set OS_CACERT to a default CA certificate chain if it exists.
if [[ ! -v OS_CACERT ]] ; then
diff --git a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml b/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml
deleted file mode 100644
index f815e14..0000000
--- a/releasenotes/notes/Fix-dbcounter-NotImplementedError-on-SQLAlchemy-2-21bb6dcdf3ce4225.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-fixes:
- - |
- Fixes a NotImplementedError when using the dbcounter SQLAlchemy plugin on
- SQLAlchemy 2.x.
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
index f9bb0f7..51a11b6 100644
--- a/roles/capture-performance-data/tasks/main.yaml
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -3,7 +3,9 @@
executable: /bin/bash
cmd: |
source {{ devstack_conf_dir }}/stackrc
- python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+ source {{ devstack_conf_dir }}/inc/python
+ setup_devstack_virtualenv
+ $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \
--db-user="$DATABASE_USER" \
--db-pass="$DATABASE_PASSWORD" \
--db-host="$DATABASE_HOST" \
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index 294c29c..cb7c6e3 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -43,9 +43,9 @@
base_branch={{ devstack_sources_branch }}
if git branch -a | grep "$base_branch" > /dev/null ; then
git checkout $base_branch
- elif [[ "$base_branch" == stable/* ]]; then
+ elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then
# Look for an eol tag for the stable branch.
- eol_tag=${base_branch#stable/}-eol
+ eol_tag="${base_branch#*/}-eol"
if git tag -l |grep $eol_tag >/dev/null; then
git checkout $eol_tag
git reset --hard $eol_tag
diff --git a/samples/local.sh b/samples/local.sh
index a1c5c81..7e6ae70 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -31,7 +31,7 @@
# ``demo``)
# Get OpenStack user auth
- source $TOP_DIR/openrc
+ export OS_CLOUD=devstack
# Add first keypair found in localhost:$HOME/.ssh
for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
diff --git a/stack.sh b/stack.sh
index be3c9dd..bfa0573 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,5 +1,6 @@
#!/usr/bin/env bash
+
# ``stack.sh`` is an opinionated OpenStack developer installation. It
# installs and configures various combinations of **Cinder**, **Glance**,
# **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift**
@@ -12,7 +13,7 @@
# a multi-node developer install.
# To keep this script simple we assume you are running on a recent **Ubuntu**
-# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL**
+# (Bionic or newer) or **CentOS/RHEL/RockyLinux**
# (7 or newer) machine. (It may work on other platforms but support for those
# platforms is left to those who added them to DevStack.) It should work in
# a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -229,7 +230,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|jammy|f36|rhel8|rhel9|openEuler-22.03"
+SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -301,17 +302,18 @@
}
function _install_rdo {
- if [[ $DISTRO == "rhel8" ]]; then
+ if [[ $DISTRO == "rhel9" ]]; then
+ rdo_release=${TARGET_BRANCH#*/}
if [[ "$TARGET_BRANCH" == "master" ]]; then
- # rdo-release.el8.rpm points to latest RDO release, use that for master
- sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ # adding delorean-deps repo to provide current master rpms
+ sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo
else
- # For stable branches use corresponding release rpm
- rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
- sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+ if sudo dnf provides centos-release-openstack-${rdo_release} >/dev/null 2>&1; then
+ sudo dnf -y install centos-release-openstack-${rdo_release}
+ else
+ sudo wget https://trunk.rdoproject.org/centos9-${rdo_release}/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo
+ fi
fi
- elif [[ $DISTRO == "rhel9" ]]; then
- sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo
fi
sudo dnf -y update
}
@@ -334,7 +336,9 @@
# Destination path for devstack logs
if [[ -n ${LOGDIR:-} ]]; then
- mkdir -p $LOGDIR
+ sudo mkdir -p $LOGDIR
+ safe_chown -R $STACK_USER $LOGDIR
+ safe_chmod 0755 $LOGDIR
fi
# Destination path for service data
@@ -391,7 +395,10 @@
# Patch: https://github.com/rpm-software-management/dnf/pull/1448
echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
elif [[ $DISTRO == "rhel9" ]]; then
+ # for CentOS Stream 9 repository
sudo dnf config-manager --set-enabled crb
+ # for RHEL 9 repository
+ sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms
# rabbitmq and other packages are provided by RDO repositories.
_install_rdo
@@ -408,8 +415,12 @@
# 1. the hostname package is not installed by default
# 2. Some necessary packages are in openstack repo, for example liberasurecode-devel
# 3. python3-pip can be uninstalled by `get_pip.py` automaticly.
- install_package hostname openstack-release-wallaby
+ # 4. Ensure wget installation before use
+ install_package hostname openstack-release-wallaby wget
uninstall_package python3-pip
+
+ # Add yum repository for libvirt7.X
+ sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo
fi
# Ensure python is installed
@@ -594,6 +605,12 @@
source $TOP_DIR/lib/database
source $TOP_DIR/lib/rpc_backend
+# load host tuning functions and defaults
+source $TOP_DIR/lib/host
+# tune host memory early to ensure zswap/ksm are configured before
+# doing memory intensive operation like cloning repos or unpacking packages.
+tune_host
+
# Configure Projects
# ==================
@@ -806,6 +823,20 @@
source $TOP_DIR/tools/fixup_stuff.sh
fixup_all
+if [[ "$GLOBAL_VENV" == "True" ]] ; then
+ # TODO(frickler): find a better solution for this
+ sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin
+ sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin
+
+ setup_devstack_virtualenv
+fi
+
# Install subunit for the subunit output stream
pip_install -U os-testr
@@ -985,6 +1016,9 @@
setup_dev_lib "python-openstackclient"
else
pip_install_gr python-openstackclient
+ if is_service_enabled openstack-cli-server; then
+ install_openstack_cli_server
+ fi
fi
# Installs alias for osc so that we can collect timing for all
@@ -1049,22 +1083,6 @@
# Save configuration values
save_stackenv $LINENO
-# Kernel Samepage Merging (KSM)
-# -----------------------------
-
-# Processes that mark their memory as mergeable can share identical memory
-# pages if KSM is enabled. This is particularly useful for nova + libvirt
-# backends but any other setup that marks its memory as mergeable can take
-# advantage. The drawback is there is higher cpu load; however, we tend to
-# be memory bound not cpu bound so enable KSM by default but allow people
-# to opt out if the CPU time is more important to them.
-
-if [[ $ENABLE_KSM == "True" ]] ; then
- if [[ -f /sys/kernel/mm/ksm/run ]] ; then
- sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run"
- fi
-fi
-
# Start Services
# ==============
diff --git a/stackrc b/stackrc
index b7ce238..c05d4e2 100644
--- a/stackrc
+++ b/stackrc
@@ -85,7 +85,7 @@
# Global toggle for enabling services under mod_wsgi. If this is set to
# ``True`` all services that use HTTPD + mod_wsgi as the preferred method of
# deployment, will be deployed under Apache. If this is set to ``False`` all
-# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``)
+# services will rely on the local toggle variable.
ENABLE_HTTPD_MOD_WSGI_SERVICES=True
# Set the default Nova APIs to enable
@@ -121,24 +121,11 @@
SYSTEMCTL="sudo systemctl"
fi
-
-# Whether or not to enable Kernel Samepage Merging (KSM) if available.
-# This allows programs that mark their memory as mergeable to share
-# memory pages if they are identical. This is particularly useful with
-# libvirt backends. This reduces memory usage at the cost of CPU overhead
-# to scan memory. We default to enabling it because we tend to be more
-# memory constrained than CPU bound.
-ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
-
# Passwords generated by interactive devstack runs
if [[ -r $RC_DIR/.localrc.password ]]; then
source $RC_DIR/.localrc.password
fi
-# Control whether Python 3 should be used at all.
-# TODO(frickler): Drop this when all consumers are fixed
-export USE_PYTHON3=True
-
# Adding the specific version of Python 3 to this variable will install
# the app using that version of the interpreter instead of just 3.
_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
@@ -175,14 +162,27 @@
export PS4='+ $(short_source): '
fi
-# Configure Identity API version
-# TODO(frickler): Drop this when plugins no longer need it
-IDENTITY_API_VERSION=3
-
# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
# each services ${SERVICE}_ENFORCE_SCOPE variables
ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
+# Devstack supports the use of a global virtualenv. These variables enable
+# and disable this functionality as well as set the path to the virtualenv.
+# Note that the DATA_DIR is selected because grenade testing uses a shared
+# DATA_DIR but different DEST dirs and we don't want two sets of venvs,
+# instead we want one global set.
+DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv}
+
+# NOTE(kopecmartin): remove this once this is fixed
+# https://bugs.launchpad.net/devstack/+bug/2031639
+# This couldn't go to fixup_stuff as that's called after projects
+# (e.g. certain paths) are set taking GLOBAL_VENV into account
+if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then
+ GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV)
+else
+ GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV)
+fi
+
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
# an entry in the array will be installed into the named venv.
@@ -190,8 +190,9 @@
USE_VENV=$(trueorfalse False USE_VENV)
# Add packages that need to be installed into a venv but are not in any
-# requirmenets files here, in a comma-separated list
-ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
+# requirements files here, in a comma-separated list.
+# Currently only used when USE_VENV is true (individual project venvs)
+ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}
# This can be used to turn database query logging on and off
# (currently only implemented for MySQL backend)
@@ -204,7 +205,7 @@
# This can be used to reduce the amount of memory mysqld uses while running.
# These are unscientifically determined, and could reduce performance or
# cause other issues.
-MYSQL_REDUCE_MEMORY=$(trueorfalse False MYSQL_REDUCE_MEMORY)
+MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY)
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
@@ -222,6 +223,9 @@
# proxy uwsgi in front of it, or "mod_wsgi", which runs in
# apache. mod_wsgi is deprecated, don't use it.
WSGI_MODE=${WSGI_MODE:-"uwsgi"}
+if [[ "$WSGI_MODE" != "uwsgi" ]]; then
+ die $LINENO "$WSGI_MODE is no longer a supported WSGI mode. Only uwsgi is valid."
+fi
# Repositories
# ------------
@@ -248,7 +252,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="2023.2"
+DEVSTACK_SERIES="2025.1"
##############
#
@@ -303,6 +307,9 @@
TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master}
+OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git}
+OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+OSTESTIMAGES_DIR=${DEST}/os-test-images
##############
#
@@ -571,28 +578,6 @@
GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
GITDIR["os-ken"]=$DEST/os-ken
-##################
-#
-# TripleO / Heat Agent Components
-#
-##################
-
-# run-parts script required by os-refresh-config
-DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
-DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
-
-# os-apply-config configuration template tool
-OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
-OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-collect-config configuration agent
-OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
-OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-refresh-config configuration run-parts tool
-ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
-ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
#################
#
@@ -662,20 +647,19 @@
# If the file ends in .tar.gz, uncompress the tarball and and select the first
# .img file inside it as the image. If present, use "*-vmlinuz*" as the kernel
# and "*-initrd*" as the ramdisk
-# example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
+# example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz
# * disk image (*.img,*.img.gz)
# if file ends in .img, then it will be uploaded and registered as a to
# glance as a disk image. If it ends in .gz, it is uncompressed first.
# example:
-# http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
-# http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
+# https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img
+# https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
# * OpenVZ image:
# OpenVZ uses its own format of image, and does not support UEC style images
-#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
-#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
+#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
-CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.6.3"}
CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -692,11 +676,11 @@
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz}
- IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
+ IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
*) # otherwise, use the qcow image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
- IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
+ IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
esac
;;
vsphere)
@@ -707,7 +691,7 @@
# Use the same as the default for libvirt
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
- IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
+ IMAGE_URLS+="https://github.com/cirros-dev/cirros/releases/download/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
esac
DOWNLOAD_DEFAULT_IMAGES=False
fi
@@ -721,11 +705,11 @@
EXTRA_CACHE_URLS=""
# etcd3 defaults
-ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
-ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
-ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
-ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
-# etcd v3.2.x doesn't have anything for s390x
+ETCD_VERSION=${ETCD_VERSION:-v3.4.27}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"}
+# etcd v3.2.x and later doesn't have anything for s390x
ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
# Make sure etcd3 downloads the correct architecture
if is_arch "x86_64"; then
@@ -805,7 +789,7 @@
SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
# Service graceful shutdown timeout
-WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
+WORKER_TIMEOUT=${WORKER_TIMEOUT:-80}
# Common Configuration
# --------------------
diff --git a/tools/build_venv.sh b/tools/build_venv.sh
index cfa39a8..a439163 100755
--- a/tools/build_venv.sh
+++ b/tools/build_venv.sh
@@ -38,7 +38,7 @@
fi
# Build new venv
-virtualenv $VENV_DEST
+python$PYTHON3_VERSION -m venv --system-site-packages $VENV_DEST
# Install modern pip
PIP_VIRTUAL_ENV=$VENV_DEST pip_install -U pip
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
index 0ed7bb8..86e5529 100644
--- a/tools/dbcounter/dbcounter.py
+++ b/tools/dbcounter/dbcounter.py
@@ -96,20 +96,18 @@
This reads "hists" from from a queue fed by _log_event() and
writes (db,op)+=count stats to the database after ten seconds
of no activity to avoid triggering a write for every SELECT
- call. Write no less often than every thirty seconds and/or 100
- pending hits to avoid being starved by constant activity.
+ call. Write no less often than every sixty seconds to avoid being
+ starved by constant activity.
"""
LOG.debug('[%i] Writer thread running' % os.getpid())
while True:
to_write = {}
- total = 0
last = time.time()
- while time.time() - last < 30 and total < 100:
+ while time.time() - last < 60:
try:
item = self.queue.get(timeout=10)
to_write.setdefault(item, 0)
to_write[item] += 1
- total += 1
except queue.Empty:
break
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 0ec426b..faea44f 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -36,6 +36,12 @@
# Disable selinux to avoid configuring to allow Apache access
# to Horizon files (LP#1175444)
if selinuxenabled; then
+ #persit selinux config across reboots
+ cat << EOF | sudo tee /etc/selinux/config
+SELINUX=permissive
+SELINUXTYPE=targeted
+EOF
+ # then disable at runtime
sudo setenforce 0
fi
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 1cacd06..bc28515 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -73,8 +73,11 @@
s = requests.Session()
# sometimes gitea gives us a 500 error; retry sanely
# https://stackoverflow.com/a/35636367
+# We need to disable raise_on_status because if any repo endup with 500 then
+# propose-updates job which run this script will fail.
retries = Retry(total=3, backoff_factor=1,
- status_forcelist=[ 500 ])
+ status_forcelist=[ 500 ],
+ raise_on_status=False)
s.mount('https://', HTTPAdapter(max_retries=retries))
found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index f2d57c8..bb470b2 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -79,6 +79,8 @@
fi
fi
+# TODO(clarkb) remove these once we are switched to global venv by default
+export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
# Mark end of run
# ---------------
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index 6c36534..2f404c2 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -14,7 +14,12 @@
set -o errexit
-PYTHON=${PYTHON:-python3}
+# TODO(frickler): make this use stackrc variables
+if [ -x /opt/stack/data/venv/bin/python ]; then
+ PYTHON=/opt/stack/data/venv/bin/python
+else
+ PYTHON=${PYTHON:-python3}
+fi
# time to sleep between checks
SLEEP_TIME=20
diff --git a/tools/outfilter.py b/tools/outfilter.py
index e910f79..55f9ee1 100644
--- a/tools/outfilter.py
+++ b/tools/outfilter.py
@@ -76,7 +76,8 @@
# with zuulv3 native jobs and ansible capture it may become
# clearer what to do
if HAS_DATE.search(line) is None:
- now = datetime.datetime.utcnow()
+ now = datetime.datetime.now(datetime.timezone.utc).replace(
+ tzinfo=None)
ts_line = ("%s | %s" % (
now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3],
line))
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
index 73fe3f3..ab8e8df 100755
--- a/tools/ping_neutron.sh
+++ b/tools/ping_neutron.sh
@@ -30,7 +30,8 @@
This provides a wrapper to ping neutron guests that are on isolated
tenant networks that the caller can't normally reach. It does so by
-creating a network namespace probe.
+using either the DHCP or Metadata network namespace to support both
+ML2/OVS and OVN.
It takes arguments like ping, except the first arg must be the network
name.
@@ -44,6 +45,12 @@
exit 1
}
+# BUG: with duplicate network names, this fails pretty hard since it
+# will just pick the first match.
+function _get_net_id {
+ openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}'
+}
+
NET_NAME=$1
if [[ -z "$NET_NAME" ]]; then
@@ -53,12 +60,11 @@
REMAINING_ARGS="${@:2}"
-# BUG: with duplicate network names, this fails pretty hard.
-NET_ID=$(openstack network show -f value -c id "$NET_NAME")
-PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
+NET_ID=`_get_net_id $NET_NAME`
+NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1)
# This runs a command inside the specific netns
-NET_NS_CMD="ip netns exec qprobe-$PROBE_ID"
+NET_NS_CMD="ip netns exec $NET_NS"
PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS"
echo "Running $PING_CMD"
diff --git a/tools/worlddump.py b/tools/worlddump.py
index e292173..edbfa26 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -19,7 +19,6 @@
import argparse
import datetime
-from distutils import spawn
import fnmatch
import io
import os
@@ -52,7 +51,7 @@
def filename(dirname, name=""):
- now = datetime.datetime.utcnow()
+ now = datetime.datetime.now(datetime.timezone.utc)
fmt = "worlddump-%Y-%m-%d-%H%M%S"
if name:
fmt += "-" + name
@@ -76,7 +75,7 @@
def _find_cmd(cmd):
- if not spawn.find_executable(cmd):
+ if not shutil.which(cmd):
print("*** %s not found: skipping" % cmd)
return False
return True
diff --git a/tox.ini b/tox.ini
index ec764ab..26cd68c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@
# against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
# modified bashate tree
deps =
- {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
+ {env:BASHATE_INSTALL_PATH:bashate}
allowlist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
diff --git a/unstack.sh b/unstack.sh
index 33b069b..1b2d8dd 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -168,6 +168,10 @@
cleanup_etcd3
fi
+if is_service_enabled openstack-cli-server; then
+ stop_service devstack@openstack-cli-server
+fi
+
stop_dstat
# NOTE: Cinder automatically installs the lvm2 package, independently of the