Merge "Added recursive for deletion of $OVN_RUNDIR"
diff --git a/.zuul.yaml b/.zuul.yaml
index 0dda262..30e5397 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,11 +1,3 @@
-- pragma:
- # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to
- # be using devstack
- # TODO(gtema): delete this once r1 branch is merged into master
- implied-branches:
- - master
- - feature/r1
-
- nodeset:
name: openstack-single-node
nodes:
@@ -17,6 +9,16 @@
- controller
- nodeset:
+ name: openstack-single-node-jammy
+ nodes:
+ - name: controller
+ label: ubuntu-jammy
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-focal
nodes:
- name: controller
@@ -57,16 +59,6 @@
- controller
- nodeset:
- name: devstack-single-node-centos-8-stream
- nodes:
- - name: controller
- label: centos-8-stream
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
name: devstack-single-node-centos-9-stream
nodes:
- name: controller
@@ -90,7 +82,7 @@
name: devstack-single-node-fedora-latest
nodes:
- name: controller
- label: fedora-35
+ label: fedora-36
groups:
- name: tempest
nodes:
@@ -107,10 +99,20 @@
- controller
- nodeset:
- name: devstack-single-node-openeuler-20.03-sp2
+ name: devstack-single-node-rockylinux-9
nodes:
- name: controller
- label: openEuler-20-03-LTS-SP2
+ label: rockylinux-9
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-openeuler-22.03
+ nodes:
+ - name: controller
+ label: openEuler-22-03-LTS
groups:
- name: tempest
nodes:
@@ -147,12 +149,42 @@
- compute1
- nodeset:
- name: openstack-two-node-centos-8-stream
+ name: openstack-two-node-centos-9-stream
nodes:
- name: controller
- label: centos-8-stream
+ label: centos-9-stream
- name: compute1
- label: centos-8-stream
+ label: centos-9-stream
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
+ name: openstack-two-node-jammy
+ nodes:
+ - name: controller
+ label: ubuntu-jammy
+ - name: compute1
+ label: ubuntu-jammy
groups:
# Node where tests are executed and test results collected
- name: tempest
@@ -353,7 +385,6 @@
required-projects:
- opendev.org/openstack/devstack
roles:
- - zuul: opendev.org/openstack/devstack-gate
- zuul: opendev.org/openstack/openstack-zuul-jobs
vars:
devstack_localrc:
@@ -387,8 +418,10 @@
'{{ devstack_log_dir }}/devstacklog.txt.summary': logs
'{{ devstack_log_dir }}/tcpdump.pcap': logs
'{{ devstack_log_dir }}/worlddump-latest.txt': logs
+ '{{ devstack_log_dir }}/qemu.coredump': logs
'{{ devstack_full_log}}': logs
'{{ stage_dir }}/verify_tempest_conf.log': logs
+ '{{ stage_dir }}/performance.json': logs
'{{ stage_dir }}/apache': logs
'{{ stage_dir }}/apache_config': logs
'{{ stage_dir }}/etc': logs
@@ -407,6 +440,7 @@
'{{ stage_dir }}/rpm-qa.txt': logs
'{{ stage_dir }}/core': logs
'{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/services.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
/etc/ceph: logs
@@ -461,7 +495,7 @@
description: |
Minimal devstack base job, intended for use by jobs that need
less than the normal minimum set of required-projects.
- nodeset: openstack-single-node-focal
+ nodeset: openstack-single-node-jammy
required-projects:
- opendev.org/openstack/requirements
vars:
@@ -475,6 +509,7 @@
dstat: false
etcd3: true
memory_tracker: true
+ file_tracker: true
mysql: true
rabbit: true
group-vars:
@@ -483,6 +518,7 @@
# Shared services
dstat: false
memory_tracker: true
+ file_tracker: true
devstack_localrc:
# Multinode specific settings
HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -530,6 +566,7 @@
- opendev.org/openstack/swift
timeout: 7200
vars:
+ configure_swap_size: 4096
devstack_localrc:
# Common OpenStack services settings
SWIFT_REPLICAS: 1
@@ -550,6 +587,7 @@
dstat: false
etcd3: true
memory_tracker: true
+ file_tracker: true
mysql: true
rabbit: true
tls-proxy: true
@@ -599,6 +637,7 @@
# Shared services
dstat: false
memory_tracker: true
+ file_tracker: true
tls-proxy: true
# Nova services
n-cpu: true
@@ -633,11 +672,13 @@
name: devstack-ipv6
parent: devstack
description: |
- Devstack single node job for integration gate with IPv6.
+ Devstack single node job for integration gate with IPv6,
+ all services and tunnels using IPv6 addresses.
vars:
devstack_localrc:
SERVICE_IP_VERSION: 6
SERVICE_HOST: ""
+ TUNNEL_IP_VERSION: 6
- job:
name: devstack-enforce-scope
@@ -646,15 +687,12 @@
This job runs the devstack with scope checks enabled.
vars:
devstack_localrc:
- # Keep enabeling the services here to run with system scope
- CINDER_ENFORCE_SCOPE: true
- GLANCE_ENFORCE_SCOPE: true
- NEUTRON_ENFORCE_SCOPE: true
+ ENFORCE_SCOPE: true
- job:
name: devstack-multinode
parent: devstack
- nodeset: openstack-two-node-focal
+ nodeset: openstack-two-node-jammy
description: |
Simple multinode test to verify multinode functionality on devstack side.
This is not meant to be used as a parent job.
@@ -664,35 +702,62 @@
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
- name: devstack-platform-centos-8-stream
- parent: tempest-full-py3
- description: CentOS 8 Stream platform test
- nodeset: devstack-single-node-centos-8-stream
- voting: false
- timeout: 9000
- vars:
- configure_swap_size: 4096
-
-- job:
name: devstack-platform-centos-9-stream
parent: tempest-full-py3
description: CentOS 9 Stream platform test
nodeset: devstack-single-node-centos-9-stream
timeout: 9000
- vars:
- configure_swap_size: 4096
+ # TODO(kopecmartin) n-v until the following is resolved:
+ # https://bugs.launchpad.net/neutron/+bug/1979047
+ voting: false
- job:
name: devstack-platform-debian-bullseye
parent: tempest-full-py3
description: Debian Bullseye platform test
nodeset: devstack-single-node-debian-bullseye
- voting: false
timeout: 9000
vars:
configure_swap_size: 4096
- # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS
- # for the time being.
+
+- job:
+ name: devstack-platform-rocky-blue-onyx
+ parent: tempest-full-py3
+ description: Rocky Linux 9 Blue Onyx platform test
+ nodeset: devstack-single-node-rockylinux-9
+ timeout: 9000
+ voting: false
+ vars:
+ configure_swap_size: 4096
+
+- job:
+ name: devstack-platform-ubuntu-focal
+ parent: tempest-full-py3
+ description: Ubuntu 20.04 LTS (focal) platform test
+ nodeset: openstack-single-node-focal
+ timeout: 9000
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovn-source
+ parent: devstack-platform-ubuntu-jammy
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
+ voting: false
+ vars:
+ devstack_localrc:
+ OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+ name: devstack-platform-ubuntu-jammy-ovs
+ parent: tempest-full-py3
+ description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+ nodeset: openstack-single-node-jammy
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 8192
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
@@ -724,10 +789,10 @@
q-agt: true
- job:
- name: devstack-platform-openEuler-20.03-SP2
+ name: devstack-platform-openEuler-22.03-ovn-source
parent: tempest-full-py3
- description: openEuler 20.03 SP2 platform test
- nodeset: devstack-single-node-openeuler-20.03-sp2
+ description: openEuler 22.03 LTS platform test (OVN)
+ nodeset: devstack-single-node-openeuler-22.03
voting: false
timeout: 9000
vars:
@@ -736,6 +801,48 @@
# NOTE(wxy): OVN package is not supported by openEuler yet. Build it
# from source instead.
OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+ name: devstack-platform-openEuler-22.03-ovs
+ parent: tempest-full-py3
+ description: openEuler 22.03 LTS platform test (OVS)
+ nodeset: devstack-single-node-openeuler-22.03
+ voting: false
+ timeout: 9000
+ vars:
+ configure_swap_size: 8192
+ devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ devstack_services:
+ # Disable OVN services
+ ovn-northd: false
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ group-vars:
+ subnode:
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ # Disable Neutron ML2/OVN services
+ q-ovn-metadata-agent: false
+ # Enable Neutron ML2/OVS services
+ q-agt: true
- job:
name: devstack-no-tls-proxy
@@ -754,12 +861,6 @@
description: Fedora latest platform test
nodeset: devstack-single-node-fedora-latest
voting: false
- vars:
- configure_swap_size: 4096
- # Python 3.10 dependency issues; see
- # https://bugs.launchpad.net/horizon/+bug/1960204
- devstack_services:
- horizon: false
- job:
name: devstack-platform-fedora-latest-virt-preview
@@ -768,7 +869,6 @@
nodeset: devstack-single-node-fedora-latest
voting: false
vars:
- configure_swap_size: 4096
devstack_localrc:
ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
@@ -828,7 +928,7 @@
- job:
name: devstack-unit-tests
- nodeset: ubuntu-focal
+ nodeset: ubuntu-jammy
description: |
Runs unit tests on devstack project.
@@ -846,9 +946,14 @@
- devstack-ipv6
- devstack-enforce-scope
- devstack-platform-fedora-latest
- - devstack-platform-centos-8-stream
- devstack-platform-centos-9-stream
- devstack-platform-debian-bullseye
+ - devstack-platform-rocky-blue-onyx
+ - devstack-platform-ubuntu-focal
+ - devstack-platform-ubuntu-jammy-ovn-source
+ - devstack-platform-ubuntu-jammy-ovs
+ - devstack-platform-openEuler-22.03-ovn-source
+ - devstack-platform-openEuler-22.03-ovs
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
@@ -892,7 +997,11 @@
jobs:
- devstack
- devstack-ipv6
- - devstack-platform-centos-9-stream
+ # TODO(kopecmartin) n-v until the following is resolved:
+ # https://bugs.launchpad.net/neutron/+bug/1979047
+ # - devstack-platform-centos-9-stream
+ - devstack-platform-debian-bullseye
+ - devstack-platform-ubuntu-focal
- devstack-enforce-scope
- devstack-multinode
- devstack-unit-tests
@@ -947,7 +1056,6 @@
experimental:
jobs:
- - devstack-platform-openEuler-20.03-SP2
- nova-multi-cell
- nova-next
- neutron-fullstack-with-uwsgi
@@ -978,3 +1086,7 @@
periodic:
jobs:
- devstack-no-tls-proxy
+ periodic-weekly:
+ jobs:
+ - devstack-platform-openEuler-22.03-ovn-source
+ - devstack-platform-openEuler-22.03-ovs
diff --git a/clean.sh b/clean.sh
index 870dfd4..6a31cc6 100755
--- a/clean.sh
+++ b/clean.sh
@@ -50,7 +50,6 @@
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
set -o xtrace
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index dd8f21f..a83b2de 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -181,6 +181,9 @@
If the ``*_PASSWORD`` variables are not set here you will be prompted to
enter values for them by ``stack.sh``.
+.. warning:: Only use alphanumeric characters in your passwords, as some
+ services fail to work when using special characters.
+
The network ranges must not overlap with any networks in use on the
host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly
used for both the local networking and Nova's fixed and floating ranges.
@@ -279,7 +282,7 @@
::
- LOGDAYS=1
+ LOGDAYS=2
Some coloring is used during the DevStack runs to make it easier to
see what is going on. This can be disabled with::
@@ -521,8 +524,8 @@
can be configured with any valid IPv6 prefix. The default values make
use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
-Service Version
-~~~~~~~~~~~~~~~
+Service IP Version
+~~~~~~~~~~~~~~~~~~
DevStack can enable service operation over either IPv4 or IPv6 by
setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or
@@ -542,6 +545,27 @@
HOST_IPV6=${some_local_ipv6_address}
+Tunnel IP Version
+~~~~~~~~~~~~~~~~~
+
+DevStack can enable tunnel operation over either IPv4 or IPv6 by
+setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or
+``TUNNEL_IP_VERSION=6`` respectively.
+
+When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints,
+for example, ``HOST_IP``.
+
+When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints,
+for example, ``HOST_IPV6``.
+
+The default value for this setting is ``4``. Dual-mode support, for
+example ``4+6`` is not supported, as this value must match the address
+family of the local tunnel endpoint IP(v6) address.
+
+The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the
+setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP``
+when set to ``4``, and ``HOST_IPV6`` when set to ``6``.
+
Multi-node setup
~~~~~~~~~~~~~~~~
@@ -615,7 +639,7 @@
::
$ cd /opt/stack/tempest
- $ tox -efull tempest.scenario.test_network_basic_ops
+ $ tox -e smoke
By default tempest is downloaded and the config file is generated, but the
tempest package is not installed in the system's global site-packages (the
@@ -648,6 +672,35 @@
or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value. (The default for
each is 10.)
+DevStack's Cinder LVM configuration module currently supports both iSCSI and
+NVMe connections, and we can choose which one to use with options
+``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``,
+and ``CINDER_TARGET_PORT``.
+
+Defaults use iSCSI with the LIO target manager::
+
+ CINDER_TARGET_HELPER="lioadm"
+ CINDER_TARGET_PROTOCOL="iscsi"
+ CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:"
+ CINDER_TARGET_PORT=3260
+
+Additionally there are 3 supported transport protocols for NVMe,
+``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target
+is selected the protocol, prefix, and port defaults will change to more
+sensible defaults for NVMe::
+
+ CINDER_TARGET_HELPER="nvmet"
+ CINDER_TARGET_PROTOCOL="nvmet_rdma"
+ CINDER_TARGET_PREFIX="nvme-subsystem-1"
+ CINDER_TARGET_PORT=4420
+
+When selecting the RDMA transport protocol DevStack will create on Cinder nodes
+a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined
+then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``.
+
+This Soft-RoCE device will always be created on the Nova compute side since we
+cannot tell beforehand whether there will be an RDMA connection or not.
+
Keystone
~~~~~~~~
@@ -698,7 +751,7 @@
::
- openstack --os-cloud devstack-system-admin registered limit update \
+ openstack --os-cloud devstack-system-admin registered limit set \
--service glance --default-limit 5000 --region RegionOne image_size_total
.. _arch-configuration:
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 4de238f..8b5a85b 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -42,8 +42,9 @@
~~~~~~~~~~~~~~~~~~~~~~~~~
All changes proposed to the Devstack require two ``Code-Review +2`` votes from
Devstack core reviewers before one of the core reviewers can approve the patch
-by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
-which can be approved by single core reviewers.
+by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to
+unblock the gate and patches that do not relate to the Devstack's core logic,
+like for example old job cleanups, can be approved by single core reviewers.
Project Team Lead Duties
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
index fd0d9cd..3ca0ad9 100644
--- a/doc/source/debugging.rst
+++ b/doc/source/debugging.rst
@@ -20,6 +20,12 @@
falling (i.e. processes are consuming memory). It also provides
output showing locked (unswappable) memory.
+file_tracker
+------------
+
+The ``file_tracker`` service periodically monitors the number of
+open files in the system.
+
tcpdump
-------
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index e7ec629..e7b46b6 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -20,7 +20,7 @@
guides/neutron
guides/devstack-with-nested-kvm
guides/nova
- guides/devstack-with-lbaas-v2
+ guides/devstack-with-octavia
guides/devstack-with-ldap
All-In-One Single VM
@@ -69,10 +69,10 @@
Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
-Configure Load-Balancer Version 2
------------------------------------
+Configure Octavia
+-----------------
-Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+Guide on :doc:`Configure Octavia <guides/devstack-with-octavia>`.
Deploying DevStack with LDAP
----------------------------
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
deleted file mode 100644
index 5d96ca7..0000000
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ /dev/null
@@ -1,145 +0,0 @@
-Devstack with Octavia Load Balancing
-====================================
-
-Starting with the OpenStack Pike release, Octavia is now a standalone service
-providing load balancing services for OpenStack.
-
-This guide will show you how to create a devstack with `Octavia API`_ enabled.
-
-.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
-
-Phase 1: Create DevStack + 2 nova instances
---------------------------------------------
-
-First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find
-useful.
-
-Install devstack
-
-::
-
- git clone https://opendev.org/openstack/devstack
- cd devstack/tools
- sudo ./create-stack-user.sh
- cd ../..
- sudo mv devstack /opt/stack
- sudo chown -R stack.stack /opt/stack/devstack
-
-This will clone the current devstack code locally, then setup the "stack"
-account that devstack services will run under. Finally, it will move devstack
-into its default location in /opt/stack/devstack.
-
-Edit your ``/opt/stack/devstack/local.conf`` to look like
-
-::
-
- [[local|localrc]]
- enable_plugin octavia https://opendev.org/openstack/octavia
- # If you are enabling horizon, include the octavia dashboard
- # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git
- # If you are enabling barbican for TLS offload in Octavia, include it here.
- # enable_plugin barbican https://opendev.org/openstack/barbican
-
- # ===== BEGIN localrc =====
- DATABASE_PASSWORD=password
- ADMIN_PASSWORD=password
- SERVICE_PASSWORD=password
- SERVICE_TOKEN=password
- RABBIT_PASSWORD=password
- # Enable Logging
- LOGFILE=$DEST/logs/stack.sh.log
- VERBOSE=True
- LOG_COLOR=True
- # Pre-requisite
- ENABLED_SERVICES=rabbit,mysql,key
- # Horizon - enable for the OpenStack web GUI
- # ENABLED_SERVICES+=,horizon
- # Nova
- ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
- ENABLED_SERVICES+=,placement-api,placement-client
- # Glance
- ENABLED_SERVICES+=,g-api
- # Neutron
- ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
- ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
- # Cinder
- ENABLED_SERVICES+=,c-api,c-vol,c-sch
- # Tempest
- ENABLED_SERVICES+=,tempest
- # Barbican - Optionally used for TLS offload in Octavia
- # ENABLED_SERVICES+=,barbican
- # ===== END localrc =====
-
-Run stack.sh and do some sanity checks
-
-::
-
- sudo su - stack
- cd /opt/stack/devstack
- ./stack.sh
- . ./openrc
-
- openstack network list # should show public and private networks
-
-Create two nova instances that we can use as test http servers:
-
-::
-
- #create nova instances on private network
- openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
- openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
- openstack server list # should show the nova instances just created
-
- #add secgroup rules to allow ssh etc..
- openstack security group rule create default --protocol icmp
- openstack security group rule create default --protocol tcp --dst-port 22:22
- openstack security group rule create default --protocol tcp --dst-port 80:80
-
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
-
-::
-
- MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
- while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
-Phase 2: Create your load balancer
-----------------------------------
-
-Make sure you have the 'openstack loadbalancer' commands:
-
-::
-
- pip install python-octaviaclient
-
-Create your load balancer:
-
-::
-
- openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
- openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
- openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
- openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
- openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
- openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
- openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
- openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
- openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
- openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
- openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
-
-Please note: The <web server # address> fields are the IP addresses of the nova
-servers created in Phase 1.
-Also note, using the API directly you can do all of the above commands in one
-API call.
-
-Phase 3: Test your load balancer
---------------------------------
-
-::
-
- openstack loadbalancer show lb1 # Note the vip_address
- curl http://<vip_address>
- curl http://<vip_address>
-
-This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 3732f06..ba483e9 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -1,3 +1,5 @@
+.. _kvm_nested_virt:
+
=======================================================
Configure DevStack with KVM-based Nested Virtualization
=======================================================
diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst
new file mode 100644
index 0000000..55939f0
--- /dev/null
+++ b/doc/source/guides/devstack-with-octavia.rst
@@ -0,0 +1,144 @@
+Devstack with Octavia Load Balancing
+====================================
+
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
+
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
+
+Phase 1: Create DevStack + 2 nova instances
+--------------------------------------------
+
+First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space,
+make sure it is updated. Install git and any other developer tools you find
+useful.
+
+Install devstack::
+
+ git clone https://opendev.org/openstack/devstack
+ cd devstack/tools
+ sudo ./create-stack-user.sh
+ cd ../..
+ sudo mv devstack /opt/stack
+ sudo chown -R stack.stack /opt/stack/devstack
+
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
+
+Edit your ``/opt/stack/devstack/local.conf`` to look like::
+
+ [[local|localrc]]
+ # ===== BEGIN localrc =====
+ DATABASE_PASSWORD=password
+ ADMIN_PASSWORD=password
+ SERVICE_PASSWORD=password
+ SERVICE_TOKEN=password
+ RABBIT_PASSWORD=password
+ GIT_BASE=https://opendev.org
+ # Optional settings:
+ # OCTAVIA_AMP_BASE_OS=centos
+ # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream
+ # OCTAVIA_AMP_IMAGE_SIZE=3
+ # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY
+ # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True
+ # LIBS_FROM_GIT+=octavia-lib,
+ # Enable Logging
+ LOGFILE=$DEST/logs/stack.sh.log
+ VERBOSE=True
+ LOG_COLOR=True
+ enable_service rabbit
+ enable_plugin neutron $GIT_BASE/openstack/neutron
+ # Octavia supports using QoS policies on the VIP port:
+ enable_service q-qos
+ enable_service placement-api placement-client
+ # Octavia services
+ enable_plugin octavia $GIT_BASE/openstack/octavia master
+ enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard
+ enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider
+ enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin
+ enable_service octavia o-api o-cw o-hm o-hk o-da
+ # If you are enabling barbican for TLS offload in Octavia, include it here.
+ # enable_plugin barbican $GIT_BASE/openstack/barbican
+ # enable_service barbican
+ # Cinder (optional)
+ disable_service c-api c-vol c-sch
+ # Tempest
+ enable_service tempest
+ # ===== END localrc =====
+
+.. note::
+ For best performance it is highly recommended to use KVM
+ virtualization instead of QEMU.
+ Also make sure nested virtualization is enabled as documented in
+ :ref:`the respective guide <kvm_nested_virt>`.
+ By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your
+ ``local.conf`` you enable the guest VMs to make use of all features your
+ host's CPU provides.
+
+Run stack.sh and do some sanity checks::
+
+ sudo su - stack
+ cd /opt/stack/devstack
+ ./stack.sh
+ . ./openrc
+
+ openstack network list # should show public and private networks
+
+Create two nova instances that we can use as test http servers::
+
+ # create nova instances on private network
+ openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+ openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+ openstack server list # should show the nova instances just created
+
+ # add secgroup rules to allow ssh etc..
+ openstack security group rule create default --protocol icmp
+ openstack security group rule create default --protocol tcp --dst-port 22:22
+ openstack security group rule create default --protocol tcp --dst-port 80:80
+
+Set up a simple web server on each of these instances. One possibility is to use
+the `Golang test server`_ that is used by the Octavia project for CI testing
+as well.
+Copy the binary to your instances and start it as shown below
+(username 'cirros', password 'gocubsgo')::
+
+ INST_IP=<instance IP>
+ scp -O test_server.bin cirros@${INST_IP}:
+ ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP}
+
+When started this way the test server will respond to HTTP requests with
+its own IP.
+
+Phase 2: Create your load balancer
+----------------------------------
+
+Create your load balancer::
+
+ openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet
+ openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1
+ openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+ openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+ openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+ openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+ openstack loadbalancer show lb1 # Note the vip_address
+ curl http://<vip_address>
+ curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
+
+
+.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 81c5945..658422b 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -75,13 +75,21 @@
useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+::
+
+ chmod +x /opt/stack
+
This user will be making many changes to your system during installation
and operation so it needs to have sudo privileges to root without a
password:
::
- echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
From here on use the ``stack`` user. **Logout** and **login** as the
``stack`` user.
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index a0e97ed..a4385b5 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -49,13 +49,21 @@
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+ $ sudo chmod +x /opt/stack
+
Since this user will be making many changes to your system, it will need
to have sudo privileges:
.. code-block:: console
$ apt-get install sudo -y || yum install -y sudo
- $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+ $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
.. note:: On some systems you may need to use ``sudo visudo``.
@@ -98,6 +106,9 @@
- Set the service password. This is used by the OpenStack services
(Nova, Glance, etc) to authenticate with Keystone.
+.. warning:: Only use alphanumeric characters in your passwords, as some
+ services fail to work when using special characters.
+
``local.conf`` should look something like this:
.. code-block:: ini
diff --git a/doc/source/index.rst b/doc/source/index.rst
index feb50ce..1e932f8 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,9 +38,10 @@
Start with a clean and minimal install of a Linux system. DevStack
attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler.
+latest/current Fedora version, CentOS/RHEL/Rocky Linux 9, OpenSUSE and
+openEuler.
-If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
+If you do not have a preference, Ubuntu 22.04 (Jammy) is the
most tested, and will probably go the smoothest.
Add Stack User (optional)
@@ -57,6 +58,14 @@
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+ $ sudo chmod +x /opt/stack
+
Since this user will be making many changes to your system, it should
have sudo privileges:
@@ -93,7 +102,10 @@
This is the minimum required config to get started with DevStack.
.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
- under the *samples* directory in the devstack repository.
+ under the *samples* directory in the devstack repository.
+
+.. warning:: Only use alphanumeric characters in your passwords, as some
+ services fail to work when using special characters.
Start the install
-----------------
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 7d70d74..62dd15b 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -238,7 +238,7 @@
locations in the top-level of the plugin repository:
- ``./devstack/files/debs/$plugin_name`` - Packages to install when running
- on Ubuntu, Debian or Linux Mint.
+ on Ubuntu or Debian.
- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
on Red Hat, Fedora, or CentOS.
diff --git a/files/debs/nova b/files/debs/nova
index 0194f00..5c00ad7 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -1,7 +1,5 @@
conntrack
curl
-dnsmasq-base
-dnsmasq-utils # for dhcp_release
ebtables
genisoimage # required for config_drive
iptables
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 1cc2f62..082b9ac 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,8 +1,6 @@
cdrkit-cdrtools-compat # dist:sle12
conntrack-tools
curl
-dnsmasq
-dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
ebtables
iptables
iputils
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 93b5746..19f158f 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
ceph # NOPRIME
-redhat-lsb-core # not:rhel9,openEuler-20.03
+redhat-lsb-core # not:rhel9,openEuler-22.03
xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 163a7c8..b6866de 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -16,7 +16,7 @@
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
-make # dist:openEuler-20.03
+mod_ssl # required for tls-proxy on centos 9 stream computes
net-tools
openssh-server
openssl
@@ -26,10 +26,9 @@
postgresql-devel # psycopg2
psmisc
python3-devel
-python3-pip
+python3-pip # not:openEuler-22.03
python3-systemd
-redhat-rpm-config # not:openEuler-20.03 missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # dist:openEuler-20.03
+redhat-rpm-config # not:openEuler-22.03 missing dep for gcc hardening flags, see rhbz#1217376
tar
tcpdump
unzip
diff --git a/files/rpms/nova b/files/rpms/nova
index 9e8621c..e0f13b8 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,12 +1,10 @@
conntrack-tools
curl
-dnsmasq # for q-dhcp
-dnsmasq-utils # for dhcp_release
ebtables
genisoimage # not:rhel9 required for config_drive
iptables
iputils
-kernel-modules # not:openEuler-20.03
+kernel-modules # not:openEuler-22.03
kpartx
parted
polkit
diff --git a/files/rpms/swift b/files/rpms/swift
index a838d78..49a1833 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,7 +1,7 @@
curl
-liberasurecode-devel # not:openEuler-20.03
+liberasurecode-devel
memcached
rsync-daemon
sqlite
xfsprogs
-xinetd # not:f35,rhel9
+xinetd # not:f36,rhel9
diff --git a/functions b/functions
index ccca5cd..7ada0fe 100644
--- a/functions
+++ b/functions
@@ -414,10 +414,10 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" -f value -c id)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" -f value -c id)
fi
_upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
fi
diff --git a/functions-common b/functions-common
index b2cf9d9..4eed5d8 100644
--- a/functions-common
+++ b/functions-common
@@ -49,7 +49,7 @@
STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
KEYSTONE_SERVICE_URI \
LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
- HOST_IPV6 SERVICE_IP_VERSION"
+ HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION"
# Saves significant environment variables to .stackenv for later use
@@ -418,6 +418,9 @@
os_RELEASE=${VERSION_ID}
os_CODENAME="n/a"
os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+ elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then
+ os_VENDOR="Rocky"
+ os_RELEASE=${VERSION_ID}
else
_ensure_lsb_release
@@ -426,7 +429,7 @@
os_VENDOR=$(lsb_release -i -s)
fi
- if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
+ if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then
os_PACKAGE="deb"
else
os_PACKAGE="rpm"
@@ -444,9 +447,8 @@
function GetDistro {
GetOSVersion
- if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \
- "$os_VENDOR" =~ (LinuxMint) ]]; then
- # 'Everyone' refers to Ubuntu / Debian / Mint releases by
+ if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+ # 'Everyone' refers to Ubuntu / Debian releases by
# the code name adjective
DISTRO=$os_CODENAME
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
@@ -467,13 +469,12 @@
"$os_VENDOR" =~ (AlmaLinux) || \
"$os_VENDOR" =~ (Scientific) || \
"$os_VENDOR" =~ (OracleServer) || \
+ "$os_VENDOR" =~ (Rocky) || \
"$os_VENDOR" =~ (Virtuozzo) ]]; then
# Drop the . release as we assume it's compatible
# XXX re-evaluate when we get RHEL10
DISTRO="rhel${os_RELEASE::1}"
elif [[ "$os_VENDOR" =~ (openEuler) ]]; then
- # The DISTRO here is `openEuler-20.03`. While, actually only openEuler
- # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs.
DISTRO="openEuler-$os_RELEASE"
else
# We can't make a good choice here. Setting a sensible DISTRO
@@ -518,7 +519,7 @@
# Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS, etc).
+# (Fedora, RHEL, CentOS, Rocky, etc).
# is_fedora
function is_fedora {
if [[ -z "$os_VENDOR" ]]; then
@@ -529,6 +530,7 @@
[ "$os_VENDOR" = "openEuler" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "RedHatEnterprise" ] || \
+ [ "$os_VENDOR" = "Rocky" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
[ "$os_VENDOR" = "AlmaLinux" ] || \
[ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
@@ -576,6 +578,8 @@
[ "$os_PACKAGE" = "deb" ]
}
+# Determine if current distribution is an openEuler distribution
+# is_openeuler
function is_openeuler {
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
@@ -657,7 +661,7 @@
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
- find $git_dest -name '*.pyc' -delete
+ sudo find $git_dest -name '*.pyc' -delete
# handle git_ref accordingly to type (tag, branch)
if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
@@ -673,6 +677,18 @@
fi
fi
+ # NOTE(ianw) 2022-04-13 : commit [1] has broken many assumptions
+ # about how we clone and work with repos. Mark them safe globally
+ # as a work-around.
+ #
+ # NOTE(danms): On bionic (and likely others) git-config may write
+ # ~stackuser/.gitconfig if not run with sudo -H. Using --system
+ # writes these changes to /etc/gitconfig which is more
+ # discoverable anyway.
+ #
+ # [1] https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9
+ sudo git config --system --add safe.directory ${git_dest}
+
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
@@ -875,14 +891,9 @@
# Usage: get_or_create_domain <name> <description>
function get_or_create_domain {
local domain_id
- # Gets domain id
domain_id=$(
- # Gets domain id
- openstack --os-cloud devstack-system-admin domain show $1 \
- -f value -c id 2>/dev/null ||
- # Creates new domain
openstack --os-cloud devstack-system-admin domain create $1 \
- --description "$2" \
+ --description "$2" --or-show \
-f value -c id
)
echo $domain_id
@@ -971,29 +982,22 @@
# Usage: get_or_add_user_project_role <role> <user> <project> [<user_domain> <project_domain>]
function get_or_add_user_project_role {
local user_role_id
+ local domain_args
domain_args=$(_get_domain_args $4 $5)
- # Gets user role id
+ # Note this is idempotent so we are safe across multiple
+ # duplicate calls.
+ openstack --os-cloud devstack-system-admin role add $1 \
+ --user $2 \
+ --project $3 \
+ $domain_args
user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--project $3 \
$domain_args \
- | grep '^|\s[a-f0-9]\+' | get_field 1)
- if [[ -z "$user_role_id" ]]; then
- # Adds role to user and get it
- openstack --os-cloud devstack-system-admin role add $1 \
- --user $2 \
- --project $3 \
- $domain_args
- user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
- --role $1 \
- --user $2 \
- --project $3 \
- $domain_args \
- | grep '^|\s[a-f0-9]\+' | get_field 1)
- fi
+ -c Role -f value)
echo $user_role_id
}
@@ -1001,23 +1005,18 @@
# Usage: get_or_add_user_domain_role <role> <user> <domain>
function get_or_add_user_domain_role {
local user_role_id
- # Gets user role id
+
+ # Note this is idempotent so we are safe across multiple
+ # duplicate calls.
+ openstack --os-cloud devstack-system-admin role add $1 \
+ --user $2 \
+ --domain $3
user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--user $2 \
--domain $3 \
- | grep '^|\s[a-f0-9]\+' | get_field 1)
- if [[ -z "$user_role_id" ]]; then
- # Adds role to user and get it
- openstack --os-cloud devstack-system-admin role add $1 \
- --user $2 \
- --domain $3
- user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
- --role $1 \
- --user $2 \
- --domain $3 \
- | grep '^|\s[a-f0-9]\+' | get_field 1)
- fi
+ -c Role -f value)
+
echo $user_role_id
}
@@ -1056,23 +1055,18 @@
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
local group_role_id
- # Gets group role id
+
+ # Note this is idempotent so we are safe across multiple
+ # duplicate calls.
+ openstack role add $1 \
+ --group $2 \
+ --project $3
group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
--role $1 \
--group $2 \
--project $3 \
- -f value)
- if [[ -z "$group_role_id" ]]; then
- # Adds role to group and get it
- openstack --os-cloud devstack-system-admin role add $1 \
- --group $2 \
- --project $3
- group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
- --role $1 \
- --group $2 \
- --project $3 \
- -f value)
- fi
+ -f value -c Role)
+
echo $group_role_id
}
@@ -1154,7 +1148,7 @@
}
function is_ironic_enforce_scope {
- is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+ is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0
return 1
}
@@ -1563,6 +1557,7 @@
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local extra=""
if [[ -n "$group" ]]; then
extra="Group=$group"
@@ -1576,6 +1571,9 @@
iniset -sudo $unitfile "Service" "KillMode" "process"
iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
+ if [[ -n "$env_vars" ]] ; then
+ iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+ fi
if [[ -n "$group" ]]; then
iniset -sudo $unitfile "Service" "Group" "$group"
fi
@@ -1590,6 +1588,7 @@
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local unitfile="$SYSTEMD_DIR/$service"
mkdir -p $SYSTEMD_DIR
@@ -1604,6 +1603,9 @@
iniset -sudo $unitfile "Service" "NotifyAccess" "all"
iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
+ if [[ -n "$env_vars" ]] ; then
+ iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+ fi
if [[ -n "$group" ]]; then
iniset -sudo $unitfile "Service" "Group" "$group"
fi
@@ -1651,10 +1653,14 @@
local systemd_service="devstack@$service.service"
local group=$3
local user=${4:-$STACK_USER}
+ if [[ -z "$user" ]]; then
+ user=$STACK_USER
+ fi
+ local env_vars="$5"
if [[ "$command" =~ "uwsgi" ]] ; then
- write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user"
+ write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
else
- write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+ write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
fi
$SYSTEMCTL enable $systemd_service
@@ -1675,18 +1681,20 @@
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# If an optional group is provided sg will be used to run the
# command as that group.
-# run_process service "command-line" [group] [user]
+# run_process service "command-line" [group] [user] [env_vars]
+# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2"
function run_process {
local service=$1
local command="$2"
local group=$3
local user=$4
+ local env_vars="$5"
local name=$service
time_start "run_process"
if is_service_enabled $service; then
- _run_under_systemd "$name" "$command" "$group" "$user"
+ _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars"
fi
time_stop "run_process"
}
diff --git a/inc/ini-config b/inc/ini-config
index 7993682..f65e42d 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -189,6 +189,9 @@
local option=$3
local value=$4
+ # Escape the ampersand character (&)
+ value=$(echo $value | sed -e 's/&/\\&/g')
+
if [[ -z $section || -z $option ]]; then
$xtrace
return
diff --git a/inc/python b/inc/python
index 9382d35..3eb3efe 100644
--- a/inc/python
+++ b/inc/python
@@ -186,15 +186,11 @@
$xtrace
- # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
- # the same behaviour of setuptools before version 25.0.0.
- # related issue: https://github.com/pypa/pip/issues/3874
$sudo_pip \
http_proxy="${http_proxy:-}" \
https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
- SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
$cmd_pip $upgrade \
$@
result=$?
diff --git a/lib/apache b/lib/apache
index 02827d1..dd8c9a0 100644
--- a/lib/apache
+++ b/lib/apache
@@ -95,7 +95,7 @@
# didn't fix Python 3.10 compatibility before release. Should be
# fixed in uwsgi 4.9.0; can remove this when packages available
# or we drop this release
- elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then
+ elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then
# Note httpd comes with mod_proxy_uwsgi and it is loaded by
# default; the mod_proxy_uwsgi package actually conflicts now.
# See:
diff --git a/lib/cinder b/lib/cinder
index b029fa0..2424f92 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -43,6 +43,13 @@
GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
CINDER_DIR=$DEST/cinder
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+ CINDER_MY_IP="$HOST_IPV6"
+else
+ CINDER_MY_IP="$HOST_IP"
+fi
+
+
# Cinder virtual environment
if [[ ${USE_VENV} = True ]]; then
PROJECT_VENV["cinder"]=${CINDER_DIR}.venv
@@ -88,13 +95,32 @@
CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
-# Default to lioadm
-CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+
+if [[ -n "$CINDER_ISCSI_HELPER" ]]; then
+ if [[ -z "$CINDER_TARGET_HELPER" ]]; then
+ deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead'
+ CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER"
+ else
+ deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER'
+ fi
+fi
+CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm}
+
+if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then
+ CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'}
+ CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'}
+ CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420}
+else
+ CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'}
+ CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'}
+ CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260}
+fi
+
# EL and SUSE should only use lioadm
if is_fedora || is_suse; then
- if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
- die "lioadm is the only valid Cinder target_helper config on this platform"
+ if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then
+ die "lioadm and nvmet are the only valid Cinder target_helper config on this platform"
fi
fi
@@ -187,7 +213,7 @@
function cleanup_cinder {
# ensure the volume group is cleared up because fails might
# leave dead volumes in the group
- if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+ if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
local targets
targets=$(sudo tgtadm --op show --mode target)
if [ $? -ne 0 ]; then
@@ -215,8 +241,14 @@
else
stop_service tgtd
fi
- else
+ elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
+ elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+ # If we don't disconnect everything vgremove will block
+ sudo nvme disconnect-all
+ sudo nvmetcli clear
+ else
+ die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER"
fi
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -267,7 +299,7 @@
iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER"
iniset $CINDER_CONF database connection `database_connection_url cinder`
iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
@@ -275,11 +307,7 @@
iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
- if [[ $SERVICE_IP_VERSION == 6 ]]; then
- iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6"
- else
- iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
- fi
+ iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP"
iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
@@ -380,24 +408,35 @@
iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
- if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $CINDER_CONF oslo_policy enforce_scope true
iniset $CINDER_CONF oslo_policy enforce_new_defaults true
+ else
+ iniset $CINDER_CONF oslo_policy enforce_scope false
+ iniset $CINDER_CONF oslo_policy enforce_new_defaults false
fi
}
# create_cinder_accounts() - Set up common required cinder accounts
-# Tenant User Roles
+# Project User Roles
# ------------------------------------------------------------------
-# service cinder admin # if enabled
+# SERVICE_PROJECT_NAME cinder service
+# SERVICE_PROJECT_NAME cinder creator (if Barbican is enabled)
# Migrated from keystone_data.sh
function create_cinder_accounts {
# Cinder
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- create_service_user "cinder"
+ local extra_role=""
+
+ # cinder needs the "creator" role in order to interact with barbican
+ if is_service_enabled barbican; then
+ extra_role=$(get_or_create_role "creator")
+ fi
+
+ create_service_user "cinder" $extra_role
# block-storage is the official service type
get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
@@ -465,9 +504,9 @@
function install_cinder {
git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
setup_develop $CINDER_DIR
- if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
+ if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then
install_package tgt
- elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
+ elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then
if is_ubuntu; then
# TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
sudo mkdir -p /etc/target
@@ -476,6 +515,43 @@
else
install_package targetcli
fi
+ elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then
+ install_package nvme-cli
+
+ # TODO: Remove manual installation of the dependency when the
+ # requirement is added to nvmetcli:
+ # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html
+ if is_ubuntu; then
+ install_package python3-configshell-fb
+ else
+ install_package python3-configshell
+ fi
+ # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3
+ pip_install git+git://git.infradead.org/users/hch/nvmetcli.git
+
+ sudo modprobe nvmet
+ sudo modprobe nvme-fabrics
+
+ if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+ install_package rdma-core
+ sudo modprobe nvme-rdma
+
+ # Create the Soft-RoCE device over the networking interface
+ local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`}
+ if [[ -z "$iface" ]]; then
+ die $LINENO "Cannot find interface to bind Soft-RoCE"
+ fi
+
+ if ! sudo rdma link | grep $iface ; then
+ sudo rdma link add rxe_$iface type rxe netdev $iface
+ fi
+
+ elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+ sudo modprobe nvme-tcp
+
+ else # 'nvmet_fc'
+ sudo modprobe nvme-fc
+ fi
fi
}
@@ -512,7 +588,7 @@
service_port=$CINDER_SERVICE_PORT_INT
service_protocol="http"
fi
- if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+ if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
if is_service_enabled c-vol; then
# Delete any old stack.conf
sudo rm -f /etc/tgt/conf.d/stack.conf
@@ -552,8 +628,13 @@
fi
run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
- run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
- run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+ # Tune glibc for Python Services using single malloc arena for all threads
+ # and disabling dynamic thresholds to reduce memory usage when using native
+ # threads directly or via eventlet.tpool
+ # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html
+ malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144"
+ run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning"
+ run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning"
# NOTE(jdg): For cinder, startup order matters. To ensure that repor_capabilities is received
# by the scheduler start the cinder-volume service last (or restart it) after the scheduler
diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate
index 3ffd9a6..3b9f1d1 100644
--- a/lib/cinder_backends/fake_gate
+++ b/lib/cinder_backends/fake_gate
@@ -50,7 +50,7 @@
iniset $CINDER_CONF $be_name volume_backend_name $be_name
iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver"
iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
- iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index e03ef14..4286511 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -50,7 +50,10 @@
iniset $CINDER_CONF $be_name volume_backend_name $be_name
iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
- iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
+ iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL"
+ iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT"
+ iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX"
iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
}
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
index e4003c0..4b18049 100644
--- a/lib/cinder_backups/ceph
+++ b/lib/cinder_backups/ceph
@@ -26,12 +26,15 @@
function configure_cinder_backup_ceph {
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
- if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ # Execute this part only when cephadm is not used
+ if [[ "$CEPHADM_DEPLOY" = "False" ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+ if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
fi
- sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
- sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0f45273..fbad44e 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -100,8 +100,13 @@
# Set the root password - only works the first time. For Ubuntu, we already
# did that with debconf before installing the package, but we still try,
- # because the package might have been installed already.
- sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+ # because the package might have been installed already. We don't do this
+ # for Ubuntu 22.04 (jammy) because the authorization model change in
+ # version 10.4 of mariadb. See
+ # https://mariadb.org/authentication-in-mariadb-10-4/
+ if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+ sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+ fi
# In case of Mariadb, giving hostname in arguments causes permission
# problems as it expects connection through socket
@@ -115,13 +120,21 @@
# as root so it works only as sudo. To restore old "mysql like" behaviour,
# we need to change auth plugin for root user
if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
- sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
- sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+ if [[ "$DISTRO" == "jammy" ]]; then
+ # For Ubuntu 22.04 (jammy) we follow the model outlined in
+ # https://mariadb.org/authentication-in-mariadb-10-4/
+ sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');"
+ else
+ sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+ sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+ fi
fi
- # Create DB user if it does not already exist
- sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
- # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
- sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+ if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+ # Create DB user if it does not already exist
+ sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+ # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
+ sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+ fi
# Now update ``my.cnf`` for some local needs and restart the mysql service
@@ -150,6 +163,19 @@
iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
fi
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ echo "enabling MySQL performance counting"
+
+ # Install our sqlalchemy plugin
+ pip_install ${TOP_DIR}/tools/dbcounter
+
+ # Create our stats database for accounting
+ recreate_database stats
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+ "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+ count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
+ fi
+
restart_service $MYSQL_SERVICE_NAME
}
@@ -209,7 +235,17 @@
function database_connection_url_mysql {
local db=$1
- echo "$BASE_SQL_CONN/$db?charset=utf8"
+ local plugin
+
+ # NOTE(danms): We don't enable perf on subnodes yet because the
+ # plugin is not installed there
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+ if is_service_enabled mysql; then
+ plugin="&plugin=dbcounter"
+ fi
+ fi
+
+ echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
}
diff --git a/lib/dstat b/lib/dstat
index eb03ae0..870c901 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -40,12 +40,18 @@
if is_service_enabled peakmem_tracker; then
die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead"
fi
+
+ # To enable file_tracker add:
+ # enable_service file_tracker
+ # to your localrc
+ run_process file_tracker "$TOP_DIR/tools/file_tracker.sh"
}
# stop_dstat() stop dstat process
function stop_dstat {
stop_process dstat
stop_process memory_tracker
+ stop_process file_tracker
}
# Restore xtrace
diff --git a/lib/glance b/lib/glance
index b94c06d..041acaf 100644
--- a/lib/glance
+++ b/lib/glance
@@ -432,10 +432,14 @@
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
- if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $GLANCE_API_CONF oslo_policy enforce_scope true
iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
+ else
+ iniset $GLANCE_API_CONF oslo_policy enforce_scope false
+ iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false
+ iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false
fi
}
diff --git a/lib/keystone b/lib/keystone
index a4c8a52..6cb4aac 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -265,10 +265,15 @@
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
fi
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+
+ iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $KEYSTONE_CONF oslo_policy enforce_scope true
iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
- iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+ else
+ iniset $KEYSTONE_CONF oslo_policy enforce_scope false
+ iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false
fi
}
diff --git a/lib/lvm b/lib/lvm
index d3f6bf1..57ffb96 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -130,7 +130,7 @@
local size=$2
# Start the tgtd service on Fedora and SUSE if tgtadm is used
- if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+ if is_fedora || is_suse && [[ "$CINDER_TARGET_HELPER" = "tgtadm" ]]; then
start_service tgtd
fi
@@ -138,10 +138,14 @@
_create_lvm_volume_group $vg $size
# Remove iscsi targets
- if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then
+ if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
- else
+ elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
+ elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+ # If we don't disconnect everything vgremove will block
+ sudo nvme disconnect-all
+ sudo nvmetcli clear
fi
_clean_lvm_volume_group $vg
}
diff --git a/lib/neutron b/lib/neutron
index e7719d4..8708bf4 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -1,122 +1,311 @@
#!/bin/bash
#
# lib/neutron
-# Install and start **Neutron** network services
+# functions - functions specific to neutron
# Dependencies:
-#
# ``functions`` file
# ``DEST`` must be defined
+# ``STACK_USER`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
-# - is_XXXX_enabled
-# - install_XXXX
-# - configure_XXXX
-# - init_XXXX
-# - start_XXXX
-# - stop_XXXX
-# - cleanup_XXXX
+# - install_neutron_agent_packages
+# - install_neutronclient
+# - install_neutron
+# - install_neutron_third_party
+# - configure_neutron
+# - init_neutron
+# - configure_neutron_third_party
+# - init_neutron_third_party
+# - start_neutron_third_party
+# - create_nova_conf_neutron
+# - configure_neutron_after_post_config
+# - start_neutron_service_and_check
+# - check_neutron_third_party_integration
+# - start_neutron_agents
+# - create_neutron_initial_network
+#
+# ``unstack.sh`` calls the entry points in this order:
+#
+# - stop_neutron
+# - stop_neutron_third_party
+# - cleanup_neutron
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# Functions in lib/neutron are classified into the following categories:
+#
+# - entry points (called from stack.sh or unstack.sh)
+# - internal functions
+# - neutron exercises
+# - 3rd party programs
-# Defaults
+
+# Neutron Networking
+# ------------------
+
+# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want
+# to run Neutron on this host, make sure that q-svc is also in
+# ``ENABLED_SERVICES``.
+#
+# See "Neutron Network Configuration" below for additional variables
+# that must be set in localrc for connectivity across hosts with
+# Neutron.
+
+# Settings
# --------
+
+# Neutron Network Configuration
+# -----------------------------
+
+if is_service_enabled tls-proxy; then
+ Q_PROTOCOL="https"
+fi
+
+
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
+
+NEUTRON_DIR=$DEST/neutron
+NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
+
+# Support entry points installation of console scripts
+if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
+ NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
+else
+ NEUTRON_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+NEUTRON_CONF_DIR=/etc/neutron
+NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
+export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
+
# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
# - False (default) : Run neutron under Eventlet
# - True : Run neutron under uwsgi
# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
# enough
NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
-NEUTRON_DIR=$DEST/neutron
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
# of the new RBAC policies and scopes.
NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
-NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
+# Agent binaries. Note, binary paths for other agents are set in per-service
+# scripts in lib/neutron_plugins/services/
+AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
+AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
+AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
+
+# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
+# loaded from per-plugin scripts in lib/neutron_plugins/
+Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
+# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE
+Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
+# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_L3_CONF=$Q_L3_CONF_FILE
+Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
+
+# Default name for Neutron database
+Q_DB_NAME=${Q_DB_NAME:-neutron}
+# Default Neutron Plugin
+Q_PLUGIN=${Q_PLUGIN:-ml2}
+# Default Neutron Port
+Q_PORT=${Q_PORT:-9696}
+# Default Neutron Internal Port when using TLS proxy
+Q_PORT_INT=${Q_PORT_INT:-19696}
+# Default Neutron Host
+Q_HOST=${Q_HOST:-$SERVICE_HOST}
+# Default protocol
+Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
+# Default listen address
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
+# Default admin username
+Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
+# Default auth strategy
+Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
+# RHEL's support for namespaces requires using veths with ovs
+Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
+Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
+Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
+# Meta data IP
+Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
+# Allow Overlapping IP among subnets
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
+Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
+
+# Allow to skip stopping of OVN services
+SKIP_STOP_OVN=${SKIP_STOP_OVN:-False}
+
+# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
+# /etc/neutron is assumed by many of devstack plugins. Do not change.
+_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
+
+# The name of the service in the endpoint URL
+NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
+if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ NEUTRON_ENDPOINT_SERVICE_NAME="networking"
+fi
+
+# List of config file names in addition to the main plugin config file
+# To add additional plugin config files, use ``neutron_server_config_add``
+# utility function. For example:
+#
+# ``neutron_server_config_add file1``
+#
+# These config files are relative to ``/etc/neutron``. The above
+# example would specify ``--config-file /etc/neutron/file1`` for
+# neutron server.
+declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
+
+# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
+declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
+
+
+Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
+if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+ Q_RR_COMMAND="sudo"
+else
+ NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
+ Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
+ if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+ Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+ fi
+fi
+
+
# Distributed Virtual Router (DVR) configuration
# Can be:
-# - ``legacy`` - No DVR functionality
-# - ``dvr_snat`` - Controller or single node DVR
-# - ``dvr`` - Compute node in multi-node DVR
+# - ``legacy`` - No DVR functionality
+# - ``dvr_snat`` - Controller or single node DVR
+# - ``dvr`` - Compute node in multi-node DVR
# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
#
-# Default is 'dvr_snat' since it can handle both DVR and legacy routers
-NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat}
-
-NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-NEUTRON_DHCP_BINARY="neutron-dhcp-agent"
-
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
-NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
-
-NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
-NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
-NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/
-NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
-
-NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# By default, use the ML2 plugin
-NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
-NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
-NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
-
-NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini}
-NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME
-
-NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
-NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
-NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent}
-NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent}
-
-# Public facing bits
-if is_service_enabled tls-proxy; then
- NEUTRON_SERVICE_PROTOCOL="https"
+Q_DVR_MODE=${Q_DVR_MODE:-legacy}
+if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
fi
-NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST}
-NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696}
-NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696}
-NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone}
-NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
-NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
+# Provider Network Configurations
+# --------------------------------
-# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
-# an external network bridge
-PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
-PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
+# The following variables control the Neutron ML2 plugins' allocation
+# of tenant networks and availability of provider networks. If these
+# are not configured in ``localrc``, tenant networks will be local to
+# the host (with no remote connectivity), and no physical resources
+# will be available for the allocation of provider networks.
-# Network type - default vxlan, however enables vlan based jobs to override
-# using the legacy environment variable as well as a new variable in greater
-# alignment with the naming scheme of this plugin.
-NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan}
+# To disable tunnels (GRE or VXLAN) for tenant networks,
+# set to False in ``local.conf``.
+# GRE tunnels are only supported by the openvswitch.
+ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
-NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}}
+# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
+# specify the range of IDs from which tenant networks are
+# allocated. Can be overridden in ``localrc`` if necessary.
+TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
-# Physical network for VLAN network usage.
-NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+# To use VLANs for tenant networks, set to True in localrc. VLANs
+# are supported by the ML2 plugins, requiring additional configuration
+# described below.
+ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+# If using VLANs for tenant networks, set in ``localrc`` to specify
+# the range of VLAN VIDs from which tenant networks are
+# allocated. An external network switch must be configured to
+# trunk these VLANs between hosts for multi-host connectivity.
+#
+# Example: ``TENANT_VLAN_RANGE=1000:1999``
+TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-# Additional neutron api config files
-declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
+# If using VLANs for tenant networks, or if using flat or VLAN
+# provider networks, set in ``localrc`` to the name of the physical
+# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
+# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
+# agent, as described below.
+#
+# Example: ``PHYSICAL_NETWORK=default``
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
+
+# With the openvswitch agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the OVS bridge to use for the physical network. The
+# bridge will be created if it does not already exist, but a
+# physical interface must be manually added to the bridge as a
+# port for external connectivity.
+#
+# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
+
+# With the linuxbridge agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the network interface to use for the physical
+# network.
+#
+# Example: ``LB_PHYSICAL_INTERFACE=eth1``
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+ default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+ die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+ LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
+
+# With the openvswitch plugin, set to True in ``localrc`` to enable
+# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+#
+# Example: ``OVS_ENABLE_TUNNELING=True``
+OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
+# Use DHCP agent for providing metadata service in the case of
+# without L3 agent (No Route Agent), set to True in localrc.
+ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
+
+# Add a static route as dhcp option, so the request to 169.254.169.254
+# will be able to reach through a route(DHCP agent)
+# This option require ENABLE_ISOLATED_METADATA = True
+ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
+# Neutron plugin specific functions
+# ---------------------------------
+
+# Please refer to ``lib/neutron_plugins/README.md`` for details.
+if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
+ source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+fi
+
+# Agent metering service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/metering
+
+# L3 Service functions
+source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
+
+# Use security group or not
+if has_neutron_plugin_security_group; then
+ Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
+else
+ Q_USE_SECGROUP=False
+fi
+
+# Save trace setting
+_XTRACE_NEUTRON=$(set +o | grep xtrace)
+set +o xtrace
+
# Functions
# ---------
@@ -130,303 +319,194 @@
}
# Test if any Neutron services are enabled
-# is_neutron_enabled
+# TODO(slaweq): this is not really needed now and we should remove it as soon
+# as it will not be called from any other Devstack plugins, like e.g. Neutron
+# plugin
function is_neutron_legacy_enabled {
- # first we need to remove all "neutron-" from DISABLED_SERVICES list
- disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g')
- [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1
- [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
- return 1
+ return 0
}
-if is_neutron_legacy_enabled; then
- source $TOP_DIR/lib/neutron-legacy
-fi
-
-# cleanup_neutron() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_neutron_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
- if is_neutron_ovs_base_plugin; then
- neutron_ovs_base_cleanup
+function _determine_config_server {
+ if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
+ if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
+ deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+ else
+ die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+ fi
fi
-
- if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
- neutron_lb_cleanup
+ if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
+ deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead."
fi
- # delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
- sudo ip netns delete ${ns}
+ for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+ _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
done
+
+ local cfg_file
+ local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
+ opts+=" --config-file $cfg_file"
+ done
+ echo "$opts"
}
-# configure_root_helper_options() - Configure agent rootwrap helper options
-function configure_root_helper_options {
- local conffile=$1
- iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD"
- iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD"
+function _determine_config_l3 {
+ local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
+ echo "$opts"
}
-# configure_neutron() - Set config files, create data dirs, etc
-function configure_neutron_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-
- (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
- cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
- configure_neutron_rootwrap
-
- mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
-
- # NOTE(yamamoto): A decomposed plugin should prepare the config file in
- # its devstack plugin.
- if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then
- cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
+# For services and agents that require it, dynamically construct a list of
+# --config-file arguments that are passed to the binary.
+function determine_config_files {
+ local opts=""
+ case "$1" in
+ "neutron-server") opts="$(_determine_config_server)" ;;
+ "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
+ esac
+ if [ -z "$opts" ] ; then
+ die $LINENO "Could not determine config files for $1."
fi
+ echo "$opts"
+}
- iniset $NEUTRON_CONF database connection `database_connection_url neutron`
- iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
- iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
- iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
-
- iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
+# configure_neutron()
+# Set common config for all neutron server and agents.
+function configure_neutron {
+ _configure_neutron_common
iniset_rpc_backend neutron $NEUTRON_CONF
- # Neutron API server & Neutron plugin
- if is_service_enabled neutron-api; then
- local policy_file=$NEUTRON_CONF_DIR/policy.json
- # Allow neutron user to administer neutron to match neutron account
- # NOTE(amotoki): This is required for nova works correctly with neutron.
- if [ -f $NEUTRON_DIR/etc/policy.json ]; then
- cp $NEUTRON_DIR/etc/policy.json $policy_file
- sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file
- else
- echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file
- fi
-
- cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
-
- iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
-
- iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
- iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
- iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
-
- iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
- configure_keystone_authtoken_middleware $NEUTRON_CONF neutron
- configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
- # Configure tenant network type
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE
-
- local mech_drivers="openvswitch"
- if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
- mech_drivers+=",l2population"
- else
- mech_drivers+=",linuxbridge"
- fi
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
-
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
- if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE}
- fi
- if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
- neutron_ml2_extension_driver_add port_security
- fi
- configure_rbac_policies
+ if is_service_enabled q-metering neutron-metering; then
+ _configure_neutron_metering
+ fi
+ if is_service_enabled q-agt neutron-agent; then
+ _configure_neutron_plugin_agent
+ fi
+ if is_service_enabled q-dhcp neutron-dhcp; then
+ _configure_neutron_dhcp_agent
+ fi
+ if is_service_enabled q-l3 neutron-l3; then
+ _configure_neutron_l3_agent
+ fi
+ if is_service_enabled q-meta neutron-metadata-agent; then
+ _configure_neutron_metadata_agent
fi
- # Neutron OVS or LB agent
- if is_service_enabled neutron-agent; then
- iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
- iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF
+ if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+ _configure_dvr
+ fi
+ if is_service_enabled ceilometer; then
+ _configure_neutron_ceilometer_notifications
+ fi
- # Configure the neutron agent
- if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
- iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
- iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
- elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
- iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
- iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+ if [[ $Q_AGENT == "ovn" ]]; then
+ configure_ovn
+ configure_ovn_plugin
+ fi
- if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
- iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
- iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
- iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True
- fi
- fi
-
- if ! running_in_container; then
- enable_kernel_bridge_firewall
+ # Configure Neutron's advanced services
+ if is_service_enabled q-placement neutron-placement; then
+ configure_placement_extension
+ fi
+ if is_service_enabled q-trunk neutron-trunk; then
+ configure_trunk_extension
+ fi
+ if is_service_enabled q-qos neutron-qos; then
+ configure_qos
+ if is_service_enabled q-l3 neutron-l3; then
+ configure_l3_agent_extension_fip_qos
+ configure_l3_agent_extension_gateway_ip_qos
fi
fi
- # DHCP Agent
- if is_service_enabled neutron-dhcp; then
- cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
-
- iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- # make it so we have working DNS from guests
- iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
-
- configure_root_helper_options $NEUTRON_DHCP_CONF
- iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
- neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
+ # Finally configure Neutron server and core plugin
+ if is_service_enabled q-agt neutron-agent q-svc neutron-api; then
+ _configure_neutron_service
fi
- if is_service_enabled neutron-l3; then
- cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
- iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
- neutron_service_plugin_class_add router
- configure_root_helper_options $NEUTRON_L3_CONF
- iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
+ iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
+ # devstack is not a tool for running uber scale OpenStack
+ # clouds, therefore running without a dedicated RPC worker
+ # for state reports is more than adequate.
+ iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
- # Configure the neutron agent to serve external network ports
- if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
- iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
- else
- iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
- fi
-
- if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
- iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE
- fi
- fi
-
- # Metadata
- if is_service_enabled neutron-metadata-agent; then
- cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
-
- iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST
- iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
- # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
- configure_root_helper_options $NEUTRON_META_CONF
-
- # TODO(dtroyer): remove the v2.0 hard code below
- iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
- configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT
- fi
-
- # Format logging
- setup_logging $NEUTRON_CONF
-
- if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
- # Set the service port for a proxy to take the original
- iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
- iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
- fi
-
- # Metering
- if is_service_enabled neutron-metering; then
- cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF
- neutron_service_plugin_class_add metering
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
fi
}
-# configure_neutron_rootwrap() - configure Neutron's rootwrap
-function configure_neutron_rootwrap {
- # Deploy new rootwrap filters files (owned by root).
- # Wipe any existing rootwrap.d files first
- if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then
- sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d
+function configure_neutron_nova {
+ create_nova_conf_neutron $NOVA_CONF
+ if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
+ for i in $(seq 1 $NOVA_NUM_CELLS); do
+ local conf
+ conf=$(conductor_conf $i)
+ create_nova_conf_neutron $conf
+ done
fi
-
- # Deploy filters to /etc/neutron/rootwrap.d
- sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
- sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-
- # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
- sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR
- sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf
-
- # Set up the rootwrap sudoers for Neutron
- tempfile=`mktemp`
- echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile
- echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile
- chmod 0440 $tempfile
- sudo chown root:root $tempfile
- sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap
}
-# Make Neutron-required changes to nova.conf
-# Takes a single optional argument which is the config file to update,
-# if not passed $NOVA_CONF is used.
-function configure_neutron_nova_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+function create_nova_conf_neutron {
local conf=${1:-$NOVA_CONF}
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf neutron username neutron
+ iniset $conf neutron username "$Q_ADMIN_USERNAME"
iniset $conf neutron password "$SERVICE_PASSWORD"
- iniset $conf neutron user_domain_name "Default"
- iniset $conf neutron project_name "$SERVICE_TENANT_NAME"
- iniset $conf neutron project_domain_name "Default"
- iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
+ iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
+ iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $conf neutron region_name "$REGION_NAME"
# optionally set options in nova_conf
neutron_plugin_create_nova_conf $conf
- if is_service_enabled neutron-metadata-agent; then
+ if is_service_enabled q-meta neutron-metadata-agent; then
iniset $conf neutron service_metadata_proxy "True"
fi
+ iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+ iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
}
+# create_neutron_accounts() - Set up common required neutron accounts
+
# Tenant User Roles
# ------------------------------------------------------------------
# service neutron admin # if enabled
-# create_neutron_accounts() - Create required service accounts
-function create_neutron_accounts_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+# Migrated from keystone_data.sh
+function create_neutron_accounts {
local neutron_url
-
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+ neutron_url=$Q_PROTOCOL://$SERVICE_HOST/
else
- neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+ neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
+ fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
fi
-
- if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
+ if is_service_enabled q-svc neutron-api; then
create_service_user "neutron"
- neutron_service=$(get_or_create_service "neutron" \
- "network" "Neutron Service")
- get_or_create_endpoint $neutron_service \
+ get_or_create_service "neutron" "network" "Neutron Service"
+ get_or_create_endpoint \
+ "network" \
"$REGION_NAME" "$neutron_url"
fi
}
# init_neutron() - Initialize databases, etc.
-function init_neutron_new {
-
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- recreate_database neutron
-
+function init_neutron {
+ recreate_database $Q_DB_NAME
time_start "dbsync"
# Run Neutron db migrations
- $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads
+ $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
time_stop "dbsync"
}
# install_neutron() - Collect source and prepare
-function install_neutron_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
- setup_develop $NEUTRON_DIR
-
+function install_neutron {
# Install neutron-lib from git so we make sure we're testing
# the latest code.
if use_library_from_git "neutron-lib"; then
@@ -434,17 +514,12 @@
setup_dev_lib "neutron-lib"
fi
- # L3 service requires radvd
- if is_service_enabled neutron-l3; then
- install_package radvd
- fi
+ git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
+ setup_develop $NEUTRON_DIR
- if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then
- #TODO(sc68cal) - kind of ugly
- source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
- neutron_plugin_install_agent_packages
+ if [[ $Q_AGENT == "ovn" ]]; then
+ install_ovn
fi
-
}
# install_neutronclient() - Collect source and prepare
@@ -452,187 +527,33 @@
if use_library_from_git "python-neutronclient"; then
git_clone_by_name "python-neutronclient"
setup_dev_lib "python-neutronclient"
- sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion
fi
}
-# start_neutron_api() - Start the API process ahead of other things
-function start_neutron_api {
- local service_port=$NEUTRON_SERVICE_PORT
- local service_protocol=$NEUTRON_SERVICE_PROTOCOL
- local neutron_url
- if is_service_enabled tls-proxy; then
- service_port=$NEUTRON_SERVICE_PORT_INT
- service_protocol="http"
+# install_neutron_agent_packages() - Collect source and prepare
+function install_neutron_agent_packages {
+ # radvd doesn't come with the OS. Install it if the l3 service is enabled.
+ if is_service_enabled q-l3 neutron-l3; then
+ install_package radvd
fi
-
- local opts=""
- opts+=" --config-file $NEUTRON_CONF"
- opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
- local cfg_file
- for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do
- opts+=" --config-file $cfg_file"
- done
-
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
- neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
- enable_service neutron-rpc-server
- run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
- else
- # Start the Neutron service
- # TODO(sc68cal) Stop hard coding this
- run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
- neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
- # Start proxy if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
- fi
- fi
-
- if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
- die $LINENO "neutron-api did not start"
+ # install packages that are specific to plugin agent(s)
+ if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then
+ neutron_plugin_install_agent_packages
fi
}
-# start_neutron() - Start running processes
-function start_neutron_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- # Start up the neutron agents if enabled
- # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
- # can resolve the $NEUTRON_AGENT_BINARY
- if is_service_enabled neutron-agent; then
- # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files
- run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
+# Finish neutron configuration
+function configure_neutron_after_post_config {
+ if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
+ iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
fi
- if is_service_enabled neutron-dhcp; then
- neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
- run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF"
- fi
- if is_service_enabled neutron-l3; then
- run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF"
- fi
- if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
- # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
- # of the code in lib/neutron_plugins/services/l3
- if type -p neutron_plugin_create_initial_networks > /dev/null; then
- neutron_plugin_create_initial_networks
- else
- # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
- source $TOP_DIR/lib/neutron_plugins/services/l3
- # Create the networks using servic
- create_neutron_initial_network
- fi
- fi
- if is_service_enabled neutron-metadata-agent; then
- run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF"
- fi
-
- if is_service_enabled neutron-metering; then
- run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
- fi
-}
-
-# stop_neutron() - Stop running processes
-function stop_neutron_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- for serv in neutron-api neutron-agent neutron-l3; do
- stop_process $serv
- done
-
- if is_service_enabled neutron-rpc-server; then
- stop_process neutron-rpc-server
- fi
-
- if is_service_enabled neutron-dhcp; then
- stop_process neutron-dhcp
- pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
- [ ! -z "$pid" ] && sudo kill -9 $pid
- fi
-
- if is_service_enabled neutron-metadata-agent; then
- sudo pkill -9 -f neutron-ns-metadata-proxy || :
- stop_process neutron-metadata-agent
- fi
-}
-
-# neutron_service_plugin_class_add() - add service plugin class
-function neutron_service_plugin_class_add_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- local service_plugin_class=$1
- local plugins=""
-
- plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
- if [ $plugins ]; then
- plugins+=","
- fi
- plugins+="${service_plugin_class}"
- iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
-}
-
-function _neutron_ml2_extension_driver_add {
- local driver=$1
- local drivers=""
-
- drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers)
- if [ $drivers ]; then
- drivers+=","
- fi
- drivers+="${driver}"
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers
-}
-
-function neutron_server_config_add_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
-function neutron_deploy_rootwrap_filters_new {
- deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
- local srcdir=$1
- sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
- sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-}
-
-# Dispatch functions
-# These are needed for compatibility between the old and new implementations
-# where there are function name overlaps. These will be removed when
-# neutron-legacy is removed.
-# TODO(sc68cal) Remove when neutron-legacy is no more.
-function cleanup_neutron {
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- stop_process neutron-api
- stop_process neutron-rpc-server
- remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
- sudo rm -f $(apache_site_config_for neutron-api)
- fi
-
- if is_neutron_legacy_enabled; then
- # Call back to old function
- cleanup_mutnauq "$@"
- else
- cleanup_neutron_new "$@"
- fi
-}
-
-function configure_neutron {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- configure_mutnauq "$@"
- else
- configure_neutron_new "$@"
- fi
-
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
- fi
+ configure_rbac_policies
}
# configure_rbac_policies() - Configure Neutron to enforce new RBAC
# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+ if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
iniset $NEUTRON_CONF oslo_policy enforce_scope True
else
@@ -641,120 +562,595 @@
fi
}
-
-function configure_neutron_nova {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- create_nova_conf_neutron $NOVA_CONF
- if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
- for i in $(seq 1 $NOVA_NUM_CELLS); do
- local conf
- conf=$(conductor_conf $i)
- create_nova_conf_neutron $conf
- done
- fi
- else
- configure_neutron_nova_new $NOVA_CONF
- if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
- for i in $(seq 1 $NOVA_NUM_CELLS); do
- local conf
- conf=$(conductor_conf $i)
- configure_neutron_nova_new $conf
- done
+# Start running OVN processes
+function start_ovn_services {
+ if [[ $Q_AGENT == "ovn" ]]; then
+ init_ovn
+ start_ovn
+ if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
+ if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
+ echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
+ echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
+ else
+ create_public_bridge
+ fi
fi
fi
}
-function create_neutron_accounts {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- create_mutnauq_accounts "$@"
- else
- create_neutron_accounts_new "$@"
- fi
-}
+# Start running processes
+function start_neutron_service_and_check {
+ local service_port=$Q_PORT
+ local service_protocol=$Q_PROTOCOL
+ local cfg_file_options
+ local neutron_url
-function init_neutron {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- init_mutnauq "$@"
- else
- init_neutron_new "$@"
- fi
-}
+ cfg_file_options="$(determine_config_files neutron-server)"
-function install_neutron {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- install_mutnauq "$@"
- else
- install_neutron_new "$@"
+ if is_service_enabled tls-proxy; then
+ service_port=$Q_PORT_INT
+ service_protocol="http"
fi
-}
+ # Start the Neutron service
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ enable_service neutron-api
+ run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ neutron_url=$Q_PROTOCOL://$Q_HOST/
+ enable_service neutron-rpc-server
+ run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+ else
+ run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+ neutron_url=$service_protocol://$Q_HOST:$service_port/
+ # Start proxy if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+ fi
+ fi
+ if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+ neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+ fi
+ echo "Waiting for Neutron to start..."
-function neutron_service_plugin_class_add {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- _neutron_service_plugin_class_add "$@"
- else
- neutron_service_plugin_class_add_new "$@"
- fi
-}
-
-function neutron_ml2_extension_driver_add {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- _neutron_ml2_extension_driver_add_old "$@"
- else
- _neutron_ml2_extension_driver_add "$@"
- fi
-}
-
-function install_neutron_agent_packages {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- install_neutron_agent_packages_mutnauq "$@"
- else
- :
- fi
-}
-
-function neutron_server_config_add {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- mutnauq_server_config_add "$@"
- else
- neutron_server_config_add_new "$@"
- fi
+ local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
+ test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
}
function start_neutron {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- start_mutnauq_l2_agent "$@"
- start_mutnauq_other_agents "$@"
- else
- start_neutron_new "$@"
+ start_l2_agent "$@"
+ start_other_agents "$@"
+}
+
+# Control of the l2 agent is separated out to make it easier to test partial
+# upgrades (everything upgraded except the L2 agent)
+function start_l2_agent {
+ run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+
+ if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
+ sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
+ sudo ip link set $OVS_PHYSICAL_BRIDGE up
+ sudo ip link set br-int up
+ sudo ip link set $PUBLIC_INTERFACE up
+ if is_ironic_hardware; then
+ for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
+ sudo ip addr del $IP dev $PUBLIC_INTERFACE
+ sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
+ done
+ sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ fi
fi
}
+function start_other_agents {
+ run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
+
+ run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
+
+ run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
+ run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+}
+
+# Start running processes, including screen
+function start_neutron_agents {
+ # NOTE(slaweq): it's now just a wrapper for start_neutron function
+ start_neutron "$@"
+}
+
+function stop_l2_agent {
+ stop_process q-agt
+}
+
+# stop_other() - Stop running processes
+function stop_other {
+ if is_service_enabled q-dhcp neutron-dhcp; then
+ stop_process q-dhcp
+ pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
+ [ ! -z "$pid" ] && sudo kill -9 $pid
+ fi
+
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ stop_process neutron-rpc-server
+ stop_process neutron-api
+ else
+ stop_process q-svc
+ fi
+
+ if is_service_enabled q-l3 neutron-l3; then
+ sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
+ stop_process q-l3
+ fi
+
+ if is_service_enabled q-meta neutron-metadata-agent; then
+ stop_process q-meta
+ fi
+
+ if is_service_enabled q-metering neutron-metering; then
+ neutron_metering_stop
+ fi
+
+ if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+ # pkill takes care not to kill itself, but it may kill its parent
+ # sudo unless we use the "ps | grep [f]oo" trick
+ sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || :
+ fi
+}
+
+# stop_neutron() - Stop running processes (non-screen)
function stop_neutron {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- stop_mutnauq "$@"
- else
- stop_neutron_new "$@"
+ stop_other
+ stop_l2_agent
+
+ if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then
+ stop_ovn
fi
}
-function neutron_deploy_rootwrap_filters {
- if is_neutron_legacy_enabled; then
- # Call back to old function
- _neutron_deploy_rootwrap_filters "$@"
+# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
+# on startup, or back to the public interface on cleanup. If no IP is
+# configured on the interface, just add it as a port to the OVS bridge.
+function _move_neutron_addresses_route {
+ local from_intf=$1
+ local to_intf=$2
+ local add_ovs_port=$3
+ local del_ovs_port=$4
+ local af=$5
+
+ if [[ -n "$from_intf" && -n "$to_intf" ]]; then
+ # Remove the primary IP address from $from_intf and add it to $to_intf,
+ # along with the default route, if it exists. Also, when called
+ # on configure we will also add $from_intf as a port on $to_intf,
+ # assuming it is an OVS bridge.
+
+ local IP_REPLACE=""
+ local IP_DEL=""
+ local IP_UP=""
+ local DEFAULT_ROUTE_GW
+ DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
+ local ADD_OVS_PORT=""
+ local DEL_OVS_PORT=""
+ local ARP_CMD=""
+
+ IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
+
+ if [ "$DEFAULT_ROUTE_GW" != "" ]; then
+ ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
+ fi
+
+ if [[ "$add_ovs_port" == "True" ]]; then
+ ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
+ fi
+
+ if [[ "$del_ovs_port" == "True" ]]; then
+ DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
+ fi
+
+ if [[ "$IP_BRD" != "" ]]; then
+ IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
+ IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
+ IP_UP="sudo ip link set $to_intf up"
+ if [[ "$af" == "inet" ]]; then
+ IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
+ ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
+ fi
+ fi
+
+ # The add/del OVS port calls have to happen either before or
+ # after the address is moved in order to not leave it orphaned.
+ $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
+ fi
+}
+
+# _configure_public_network_connectivity() - Configures connectivity to the
+# external network using $PUBLIC_INTERFACE or NAT on the single interface
+# machines
+function _configure_public_network_connectivity {
+ # If we've given a PUBLIC_INTERFACE to take over, then we assume
+ # that we can own the whole thing, and privot it into the OVS
+ # bridge. If we are not, we're probably on a single interface
+ # machine, and we just setup NAT so that fixed guests can get out.
+ if [[ -n "$PUBLIC_INTERFACE" ]]; then
+ _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+
+ if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+ _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+ fi
else
- neutron_deploy_rootwrap_filters_new "$@"
+ for d in $default_v4_route_devs; do
+ sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+ done
+ fi
+}
+
+# cleanup_neutron() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_neutron {
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ stop_process neutron-api
+ stop_process neutron-rpc-server
+ remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+ sudo rm -f $(apache_site_config_for neutron-api)
+ fi
+
+ if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
+
+ if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+ # ip(8) wants the prefix length when deleting
+ local v6_gateway
+ v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
+ sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
+ fi
+
+ if is_provider_network && is_ironic_hardware; then
+ for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+ sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+ sudo ip addr add $IP dev $PUBLIC_INTERFACE
+ done
+ sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ fi
+ fi
+
+ if is_neutron_ovs_base_plugin; then
+ neutron_ovs_base_cleanup
+ fi
+
+ if [[ $Q_AGENT == "linuxbridge" ]]; then
+ neutron_lb_cleanup
+ fi
+
+ # delete all namespaces created by neutron
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
+ sudo ip netns delete ${ns}
+ done
+
+ if [[ $Q_AGENT == "ovn" ]]; then
+ cleanup_ovn
+ fi
+}
+
+
+function _create_neutron_conf_dir {
+ # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
+ sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
+}
+
+# _configure_neutron_common()
+# Set common config for all neutron server and agents.
+# This MUST be called before other ``_configure_neutron_*`` functions.
+function _configure_neutron_common {
+ _create_neutron_conf_dir
+
+ # Uses oslo config generator to generate core sample configuration files
+ (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+ cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
+
+ Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
+
+ # allow neutron user to administer neutron to match neutron account
+ # NOTE(amotoki): This is required for nova works correctly with neutron.
+ if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+ cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+ sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+ else
+ echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+ fi
+
+ # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
+ # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
+ neutron_plugin_configure_common
+
+ if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
+ die $LINENO "Neutron plugin not set.. exiting"
+ fi
+
+ # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
+ mkdir -p /$Q_PLUGIN_CONF_PATH
+ Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+ # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository,
+ # it was previously defined in the lib/neutron module which is now deleted.
+ NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE
+ # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
+ # there is no config file in Neutron tree. They should prepare the file in each plugin.
+ if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
+ cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
+ elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
+ cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+ fi
+
+ iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
+ iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
+ iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
+ iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
+ iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
+
+ # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
+ iniset $NEUTRON_CONF nova region_name $REGION_NAME
+
+ if [ "$VIRT_DRIVER" = 'fake' ]; then
+ # Disable arbitrary limits
+ iniset $NEUTRON_CONF quotas quota_network -1
+ iniset $NEUTRON_CONF quotas quota_subnet -1
+ iniset $NEUTRON_CONF quotas quota_port -1
+ iniset $NEUTRON_CONF quotas quota_security_group -1
+ iniset $NEUTRON_CONF quotas quota_security_group_rule -1
+ fi
+
+ # Format logging
+ setup_logging $NEUTRON_CONF
+
+ if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
+ # Set the service port for a proxy to take the original
+ iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+ iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
+ fi
+
+ _neutron_setup_rootwrap
+}
+
+function _configure_neutron_dhcp_agent {
+
+ cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
+
+ iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ # make it so we have working DNS from guests
+ iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
+ configure_root_helper_options $Q_DHCP_CONF_FILE
+
+ if ! is_service_enabled q-l3 neutron-l3; then
+ if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
+ iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
+ iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
+ else
+ if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
+ die "$LINENO" "Enable isolated metadata is a must for metadata network"
+ fi
+ fi
+ fi
+
+ _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
+
+ neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
+}
+
+
+function _configure_neutron_metadata_agent {
+ cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
+
+ iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
+ iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
+ configure_root_helper_options $Q_META_CONF_FILE
+}
+
+function _configure_neutron_ceilometer_notifications {
+ iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
+}
+
+function _configure_neutron_metering {
+ neutron_agent_metering_configure_common
+ neutron_agent_metering_configure_agent
+}
+
+function _configure_dvr {
+ iniset $NEUTRON_CONF DEFAULT router_distributed True
+ iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
+}
+
+
+# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
+# It is called when q-agt is enabled.
+function _configure_neutron_plugin_agent {
+ # Specify the default root helper prior to agent configuration to
+ # ensure that an agent's configuration can override the default
+ configure_root_helper_options /$Q_PLUGIN_CONF_FILE
+ iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+
+ # Configure agent for plugin
+ neutron_plugin_configure_plugin_agent
+}
+
+function _replace_api_paste_composite {
+ local sep
+ sep=$(echo -ne "\x01")
+ # Replace it
+ $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE"
+ $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE"
+ $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE"
+}
+
+# _configure_neutron_service() - Set config files for neutron service
+# It is called when q-svc is enabled.
+function _configure_neutron_service {
+ Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
+ cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+
+ if [[ -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+ _replace_api_paste_composite
+ fi
+
+ # Update either configuration file with plugin
+ iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
+
+ iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
+
+ iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
+ configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
+
+ # Configuration for neutron notifications to nova.
+ iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
+ iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
+
+ configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
+
+ # Configuration for placement client
+ configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
+
+ # Configure plugin
+ neutron_plugin_configure_service
+}
+
+# Utility Functions
+#------------------
+
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add {
+ local service_plugin_class=$1
+ if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+ Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
+ elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
+ Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
+ fi
+}
+
+# neutron_ml2_extension_driver_add() - add ML2 extension driver
+function neutron_ml2_extension_driver_add {
+ local extension=$1
+ if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
+ Q_ML2_PLUGIN_EXT_DRIVERS=$extension
+ elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
+ Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
+ fi
+}
+
+# neutron_server_config_add() - add server config file
+function neutron_server_config_add {
+ _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
+}
+
+# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
+function neutron_deploy_rootwrap_filters {
+ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+ return
+ fi
+ local srcdir=$1
+ sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
+ sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
+}
+
+# _neutron_setup_rootwrap() - configure Neutron's rootwrap
+function _neutron_setup_rootwrap {
+ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+ return
+ fi
+ # Wipe any existing ``rootwrap.d`` files first
+ Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
+ if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
+ sudo rm -rf $Q_CONF_ROOTWRAP_D
+ fi
+
+ neutron_deploy_rootwrap_filters $NEUTRON_DIR
+
+ # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
+ # location moved in newer versions, prefer new location
+ if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
+ sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
+ else
+ sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+ fi
+ sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
+ sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
+
+ # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
+ ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
+ ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+
+ # Set up the rootwrap sudoers for neutron
+ TEMPFILE=`mktemp`
+ echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+ echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
+ chmod 0440 $TEMPFILE
+ sudo chown root:root $TEMPFILE
+ sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
+
+ # Update the root_helper
+ configure_root_helper_options $NEUTRON_CONF
+}
+
+function configure_root_helper_options {
+ local conffile=$1
+ iniset $conffile agent root_helper "$Q_RR_COMMAND"
+ if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+ iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+ fi
+}
+
+function _neutron_setup_interface_driver {
+
+ # ovs_use_veth needs to be set before the plugin configuration
+ # occurs to allow plugins to override the setting.
+ iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
+ neutron_plugin_setup_interface_driver $1
+}
+# Functions for Neutron Exercises
+#--------------------------------
+
+function delete_probe {
+ local from_net="$1"
+ net_id=`_get_net_id $from_net`
+ probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
+ neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
+}
+
+function _get_net_id {
+ openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
+}
+
+function _get_probe_cmd_prefix {
+ local from_net="$1"
+ net_id=`_get_net_id $from_net`
+ probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
+ echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
+}
+
+# ssh check
+function _ssh_check_neutron {
+ local from_net=$1
+ local key_file=$2
+ local ip=$3
+ local user=$4
+ local timeout_sec=$5
+ local probe_cmd = ""
+ probe_cmd=`_get_probe_cmd_prefix $from_net`
+ local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
+ test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
+}
+
+function plugin_agent_add_l2_agent_extension {
+ local l2_agent_extension=$1
+ if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+ L2_AGENT_EXTENSIONS=$l2_agent_extension
+ elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+ L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
fi
}
# Restore xtrace
-$XTRACE
+$_XTRACE_NEUTRON
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index b906a1b..e90400f 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -1,1075 +1,6 @@
#!/bin/bash
-#
-# lib/neutron
-# functions - functions specific to neutron
-# Dependencies:
-# ``functions`` file
-# ``DEST`` must be defined
-# ``STACK_USER`` must be defined
+# TODO(slaweq): remove that file when other projects, like e.g. Grenade will
+# be using lib/neutron
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_neutron_agent_packages
-# - install_neutronclient
-# - install_neutron
-# - install_neutron_third_party
-# - configure_neutron
-# - init_neutron
-# - configure_neutron_third_party
-# - init_neutron_third_party
-# - start_neutron_third_party
-# - create_nova_conf_neutron
-# - configure_neutron_after_post_config
-# - start_neutron_service_and_check
-# - check_neutron_third_party_integration
-# - start_neutron_agents
-# - create_neutron_initial_network
-#
-# ``unstack.sh`` calls the entry points in this order:
-#
-# - stop_neutron
-# - stop_neutron_third_party
-# - cleanup_neutron
-
-# Functions in lib/neutron are classified into the following categories:
-#
-# - entry points (called from stack.sh or unstack.sh)
-# - internal functions
-# - neutron exercises
-# - 3rd party programs
-
-
-# Neutron Networking
-# ------------------
-
-# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want
-# to run Neutron on this host, make sure that q-svc is also in
-# ``ENABLED_SERVICES``.
-#
-# See "Neutron Network Configuration" below for additional variables
-# that must be set in localrc for connectivity across hosts with
-# Neutron.
-
-# Settings
-# --------
-
-
-# Neutron Network Configuration
-# -----------------------------
-
-if is_service_enabled tls-proxy; then
- Q_PROTOCOL="https"
-fi
-
-
-# Set up default directories
-GITDIR["python-neutronclient"]=$DEST/python-neutronclient
-
-
-NEUTRON_DIR=$DEST/neutron
-NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-
-# Support entry points installation of console scripts
-if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
- NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
-else
- NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
-
-# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
-# - False (default) : Run neutron under Eventlet
-# - True : Run neutron under uwsgi
-# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
-# enough
-NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
-# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
-
-# Agent binaries. Note, binary paths for other agents are set in per-service
-# scripts in lib/neutron_plugins/services/
-AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
-AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
-AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
-
-# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
-# loaded from per-plugin scripts in lib/neutron_plugins/
-Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
-Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
-
-# Default name for Neutron database
-Q_DB_NAME=${Q_DB_NAME:-neutron}
-# Default Neutron Plugin
-Q_PLUGIN=${Q_PLUGIN:-ml2}
-# Default Neutron Port
-Q_PORT=${Q_PORT:-9696}
-# Default Neutron Internal Port when using TLS proxy
-Q_PORT_INT=${Q_PORT_INT:-19696}
-# Default Neutron Host
-Q_HOST=${Q_HOST:-$SERVICE_HOST}
-# Default protocol
-Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
-# Default listen address
-Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
-# Default admin username
-Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
-# Default auth strategy
-Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# RHEL's support for namespaces requires using veths with ovs
-Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
-Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
-Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
-# Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
-# Allow Overlapping IP among subnets
-Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
-Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
-VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
-VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
-
-# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
-# /etc/neutron is assumed by many of devstack plugins. Do not change.
-_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
-
-# List of config file names in addition to the main plugin config file
-# To add additional plugin config files, use ``neutron_server_config_add``
-# utility function. For example:
-#
-# ``neutron_server_config_add file1``
-#
-# These config files are relative to ``/etc/neutron``. The above
-# example would specify ``--config-file /etc/neutron/file1`` for
-# neutron server.
-declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
-
-# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
-declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
-
-
-Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
- Q_RR_COMMAND="sudo"
-else
- NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
- Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
- fi
-fi
-
-
-# Distributed Virtual Router (DVR) configuration
-# Can be:
-# - ``legacy`` - No DVR functionality
-# - ``dvr_snat`` - Controller or single node DVR
-# - ``dvr`` - Compute node in multi-node DVR
-#
-Q_DVR_MODE=${Q_DVR_MODE:-legacy}
-if [[ "$Q_DVR_MODE" != "legacy" ]]; then
- Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
-fi
-
-# Provider Network Configurations
-# --------------------------------
-
-# The following variables control the Neutron ML2 plugins' allocation
-# of tenant networks and availability of provider networks. If these
-# are not configured in ``localrc``, tenant networks will be local to
-# the host (with no remote connectivity), and no physical resources
-# will be available for the allocation of provider networks.
-
-# To disable tunnels (GRE or VXLAN) for tenant networks,
-# set to False in ``local.conf``.
-# GRE tunnels are only supported by the openvswitch.
-ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
-
-# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
-# specify the range of IDs from which tenant networks are
-# allocated. Can be overridden in ``localrc`` if necessary.
-TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
-
-# To use VLANs for tenant networks, set to True in localrc. VLANs
-# are supported by the ML2 plugins, requiring additional configuration
-# described below.
-ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
-# If using VLANs for tenant networks, set in ``localrc`` to specify
-# the range of VLAN VIDs from which tenant networks are
-# allocated. An external network switch must be configured to
-# trunk these VLANs between hosts for multi-host connectivity.
-#
-# Example: ``TENANT_VLAN_RANGE=1000:1999``
-TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
-# If using VLANs for tenant networks, or if using flat or VLAN
-# provider networks, set in ``localrc`` to the name of the physical
-# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
-# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
-# agent, as described below.
-#
-# Example: ``PHYSICAL_NETWORK=default``
-PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
-
-# With the openvswitch agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the OVS bridge to use for the physical network. The
-# bridge will be created if it does not already exist, but a
-# physical interface must be manually added to the bridge as a
-# port for external connectivity.
-#
-# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
-
-# With the linuxbridge agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the network interface to use for the physical
-# network.
-#
-# Example: ``LB_PHYSICAL_INTERFACE=eth1``
-if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
- default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
- die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
- LB_PHYSICAL_INTERFACE=$default_route_dev
-fi
-
-# When Neutron tunnels are enabled it is needed to specify the
-# IP address of the end point in the local server. This IP is set
-# by default to the same IP address that the HOST IP.
-# This variable can be used to specify a different end point IP address
-# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1``
-TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP}
-
-# With the openvswitch plugin, set to True in ``localrc`` to enable
-# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
-#
-# Example: ``OVS_ENABLE_TUNNELING=True``
-OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
-
-# Use DHCP agent for providing metadata service in the case of
-# without L3 agent (No Route Agent), set to True in localrc.
-ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
-
-# Add a static route as dhcp option, so the request to 169.254.169.254
-# will be able to reach through a route(DHCP agent)
-# This option require ENABLE_ISOLATED_METADATA = True
-ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
-# Neutron plugin specific functions
-# ---------------------------------
-
-# Please refer to ``lib/neutron_plugins/README.md`` for details.
-if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
- source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
-fi
-
-# Agent metering service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/metering
-
-# L3 Service functions
-source $TOP_DIR/lib/neutron_plugins/services/l3
-
-# Additional Neutron service plugins
-source $TOP_DIR/lib/neutron_plugins/services/placement
-source $TOP_DIR/lib/neutron_plugins/services/trunk
-source $TOP_DIR/lib/neutron_plugins/services/qos
-
-# Use security group or not
-if has_neutron_plugin_security_group; then
- Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
-else
- Q_USE_SECGROUP=False
-fi
-
-# Save trace setting
-_XTRACE_NEUTRON=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Functions
-# ---------
-
-function _determine_config_server {
- if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
- if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
- deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
- else
- die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
- fi
- fi
- if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
- deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated. Use neutron_server_config_add instead."
- fi
- for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
- _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
- done
-
- local cfg_file
- local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
- opts+=" --config-file $cfg_file"
- done
- echo "$opts"
-}
-
-function _determine_config_l3 {
- local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
- echo "$opts"
-}
-
-# For services and agents that require it, dynamically construct a list of
-# --config-file arguments that are passed to the binary.
-function determine_config_files {
- local opts=""
- case "$1" in
- "neutron-server") opts="$(_determine_config_server)" ;;
- "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
- esac
- if [ -z "$opts" ] ; then
- die $LINENO "Could not determine config files for $1."
- fi
- echo "$opts"
-}
-
-# configure_mutnauq()
-# Set common config for all neutron server and agents.
-function configure_mutnauq {
- _configure_neutron_common
- iniset_rpc_backend neutron $NEUTRON_CONF
-
- if is_service_enabled q-metering; then
- _configure_neutron_metering
- fi
- if is_service_enabled q-agt q-svc; then
- _configure_neutron_service
- fi
- if is_service_enabled q-agt; then
- _configure_neutron_plugin_agent
- fi
- if is_service_enabled q-dhcp; then
- _configure_neutron_dhcp_agent
- fi
- if is_service_enabled q-l3; then
- _configure_neutron_l3_agent
- fi
- if is_service_enabled q-meta; then
- _configure_neutron_metadata_agent
- fi
-
- if [[ "$Q_DVR_MODE" != "legacy" ]]; then
- _configure_dvr
- fi
- if is_service_enabled ceilometer; then
- _configure_neutron_ceilometer_notifications
- fi
-
- if [[ $Q_AGENT == "ovn" ]]; then
- configure_ovn
- configure_ovn_plugin
- fi
-
- # Configure Neutron's advanced services
- if is_service_enabled q-placement neutron-placement; then
- configure_placement_extension
- fi
- if is_service_enabled q-trunk neutron-trunk; then
- configure_trunk_extension
- fi
- if is_service_enabled q-qos neutron-qos; then
- configure_qos
- if is_service_enabled q-l3 neutron-l3; then
- configure_l3_agent_extension_fip_qos
- configure_l3_agent_extension_gateway_ip_qos
- fi
- fi
-
- iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
- # devstack is not a tool for running uber scale OpenStack
- # clouds, therefore running without a dedicated RPC worker
- # for state reports is more than adequate.
- iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
-}
-
-function create_nova_conf_neutron {
- local conf=${1:-$NOVA_CONF}
- iniset $conf neutron auth_type "password"
- iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf neutron username "$Q_ADMIN_USERNAME"
- iniset $conf neutron password "$SERVICE_PASSWORD"
- iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
- iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
- iniset $conf neutron region_name "$REGION_NAME"
-
- # optionally set options in nova_conf
- neutron_plugin_create_nova_conf $conf
-
- if is_service_enabled q-meta; then
- iniset $conf neutron service_metadata_proxy "True"
- fi
-
- iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
- iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
-}
-
-# create_mutnauq_accounts() - Set up common required neutron accounts
-
-# Tenant User Roles
-# ------------------------------------------------------------------
-# service neutron admin # if enabled
-
-# Migrated from keystone_data.sh
-function create_mutnauq_accounts {
- local neutron_url
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
- else
- neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
- fi
-
- if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-
- create_service_user "neutron"
-
- get_or_create_service "neutron" "network" "Neutron Service"
- get_or_create_endpoint \
- "network" \
- "$REGION_NAME" "$neutron_url"
- fi
-}
-
-# init_mutnauq() - Initialize databases, etc.
-function init_mutnauq {
- recreate_database $Q_DB_NAME
- time_start "dbsync"
- # Run Neutron db migrations
- $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
- time_stop "dbsync"
-}
-
-# install_mutnauq() - Collect source and prepare
-function install_mutnauq {
- # Install neutron-lib from git so we make sure we're testing
- # the latest code.
- if use_library_from_git "neutron-lib"; then
- git_clone_by_name "neutron-lib"
- setup_dev_lib "neutron-lib"
- fi
-
- git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
- setup_develop $NEUTRON_DIR
-
- if [[ $Q_AGENT == "ovn" ]]; then
- install_ovn
- fi
-}
-
-# install_neutron_agent_packages() - Collect source and prepare
-function install_neutron_agent_packages_mutnauq {
- # radvd doesn't come with the OS. Install it if the l3 service is enabled.
- if is_service_enabled q-l3; then
- install_package radvd
- fi
- # install packages that are specific to plugin agent(s)
- if is_service_enabled q-agt q-dhcp q-l3; then
- neutron_plugin_install_agent_packages
- fi
-}
-
-# Finish neutron configuration
-function configure_neutron_after_post_config {
- if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
- iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
- fi
- configure_rbac_policies
-}
-
-# configure_rbac_policies() - Configure Neutron to enforce new RBAC
-# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
-function configure_rbac_policies {
- if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
- iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
- iniset $NEUTRON_CONF oslo_policy enforce_scope True
- else
- iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
- iniset $NEUTRON_CONF oslo_policy enforce_scope False
- fi
-}
-
-# Start running OVN processes
-function start_ovn_services {
- if [[ $Q_AGENT == "ovn" ]]; then
- init_ovn
- start_ovn
- if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
- if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
- echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
- echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
- else
- create_public_bridge
- fi
- fi
- fi
-}
-
-# Start running processes
-function start_neutron_service_and_check {
- local service_port=$Q_PORT
- local service_protocol=$Q_PROTOCOL
- local cfg_file_options
- local neutron_url
-
- cfg_file_options="$(determine_config_files neutron-server)"
-
- if is_service_enabled tls-proxy; then
- service_port=$Q_PORT_INT
- service_protocol="http"
- fi
- # Start the Neutron service
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- enable_service neutron-api
- run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
- neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
- enable_service neutron-rpc-server
- run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
- else
- run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
- neutron_url=$service_protocol://$Q_HOST:$service_port
- # Start proxy if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
- fi
- fi
- echo "Waiting for Neutron to start..."
-
- local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
- test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-}
-
-# Control of the l2 agent is separated out to make it easier to test partial
-# upgrades (everything upgraded except the L2 agent)
-function start_mutnauq_l2_agent {
- run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-
- if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
- sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
- sudo ip link set $OVS_PHYSICAL_BRIDGE up
- sudo ip link set br-int up
- sudo ip link set $PUBLIC_INTERFACE up
- if is_ironic_hardware; then
- for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
- sudo ip addr del $IP dev $PUBLIC_INTERFACE
- sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
- done
- sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
- fi
- fi
-}
-
-function start_mutnauq_other_agents {
- run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
-
- run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
-
- run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
- run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
-}
-
-# Start running processes, including screen
-function start_neutron_agents {
- # Start up the neutron agents if enabled
- start_mutnauq_l2_agent
- start_mutnauq_other_agents
-}
-
-function stop_mutnauq_l2_agent {
- stop_process q-agt
-}
-
-# stop_mutnauq_other() - Stop running processes
-function stop_mutnauq_other {
- if is_service_enabled q-dhcp; then
- stop_process q-dhcp
- pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
- [ ! -z "$pid" ] && sudo kill -9 $pid
- fi
-
- if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- stop_process neutron-rpc-server
- stop_process neutron-api
- else
- stop_process q-svc
- fi
-
- if is_service_enabled q-l3; then
- sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
- stop_process q-l3
- fi
-
- if is_service_enabled q-meta; then
- sudo pkill -9 -f neutron-ns-metadata-proxy || :
- stop_process q-meta
- fi
-
- if is_service_enabled q-metering; then
- neutron_metering_stop
- fi
-
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || :
- fi
-}
-
-# stop_neutron() - Stop running processes (non-screen)
-function stop_mutnauq {
- stop_mutnauq_other
- stop_mutnauq_l2_agent
-
- if [[ $Q_AGENT == "ovn" ]]; then
- stop_ovn
- fi
-}
-
-# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
-# on startup, or back to the public interface on cleanup. If no IP is
-# configured on the interface, just add it as a port to the OVS bridge.
-function _move_neutron_addresses_route {
- local from_intf=$1
- local to_intf=$2
- local add_ovs_port=$3
- local del_ovs_port=$4
- local af=$5
-
- if [[ -n "$from_intf" && -n "$to_intf" ]]; then
- # Remove the primary IP address from $from_intf and add it to $to_intf,
- # along with the default route, if it exists. Also, when called
- # on configure we will also add $from_intf as a port on $to_intf,
- # assuming it is an OVS bridge.
-
- local IP_REPLACE=""
- local IP_DEL=""
- local IP_UP=""
- local DEFAULT_ROUTE_GW
- DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
- local ADD_OVS_PORT=""
- local DEL_OVS_PORT=""
- local ARP_CMD=""
-
- IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
-
- if [ "$DEFAULT_ROUTE_GW" != "" ]; then
- ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
- fi
-
- if [[ "$add_ovs_port" == "True" ]]; then
- ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
- fi
-
- if [[ "$del_ovs_port" == "True" ]]; then
- DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
- fi
-
- if [[ "$IP_BRD" != "" ]]; then
- IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
- IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
- IP_UP="sudo ip link set $to_intf up"
- if [[ "$af" == "inet" ]]; then
- IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
- ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
- fi
- fi
-
- # The add/del OVS port calls have to happen either before or
- # after the address is moved in order to not leave it orphaned.
- $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
- fi
-}
-
-# _configure_public_network_connectivity() - Configures connectivity to the
-# external network using $PUBLIC_INTERFACE or NAT on the single interface
-# machines
-function _configure_public_network_connectivity {
- # If we've given a PUBLIC_INTERFACE to take over, then we assume
- # that we can own the whole thing, and privot it into the OVS
- # bridge. If we are not, we're probably on a single interface
- # machine, and we just setup NAT so that fixed guests can get out.
- if [[ -n "$PUBLIC_INTERFACE" ]]; then
- _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
-
- if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
- fi
- else
- for d in $default_v4_route_devs; do
- sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
- done
- fi
-}
-
-# cleanup_mutnauq() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_mutnauq {
-
- if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
-
- if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
- # ip(8) wants the prefix length when deleting
- local v6_gateway
- v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
- sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
- fi
-
- if is_provider_network && is_ironic_hardware; then
- for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
- sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
- sudo ip addr add $IP dev $PUBLIC_INTERFACE
- done
- sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
- fi
- fi
-
- if is_neutron_ovs_base_plugin; then
- neutron_ovs_base_cleanup
- fi
-
- if [[ $Q_AGENT == "linuxbridge" ]]; then
- neutron_lb_cleanup
- fi
-
- # delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
- sudo ip netns delete ${ns}
- done
-
- if [[ $Q_AGENT == "ovn" ]]; then
- cleanup_ovn
- fi
-}
-
-
-function _create_neutron_conf_dir {
- # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
- sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-}
-
-# _configure_neutron_common()
-# Set common config for all neutron server and agents.
-# This MUST be called before other ``_configure_neutron_*`` functions.
-function _configure_neutron_common {
- _create_neutron_conf_dir
-
- # Uses oslo config generator to generate core sample configuration files
- (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
- cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
- Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
-
- # allow neutron user to administer neutron to match neutron account
- # NOTE(amotoki): This is required for nova works correctly with neutron.
- if [ -f $NEUTRON_DIR/etc/policy.json ]; then
- cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
- sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
- else
- echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
- fi
-
- # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
- # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
- neutron_plugin_configure_common
-
- if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
- die $LINENO "Neutron plugin not set.. exiting"
- fi
-
- # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
- mkdir -p /$Q_PLUGIN_CONF_PATH
- Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
- # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
- # there is no config file in Neutron tree. They should prepare the file in each plugin.
- if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
- cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
- elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
- cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
- fi
-
- iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
- iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
- iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
- iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
- iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
-
- # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
- iniset $NEUTRON_CONF nova region_name $REGION_NAME
-
- if [ "$VIRT_DRIVER" = 'fake' ]; then
- # Disable arbitrary limits
- iniset $NEUTRON_CONF quotas quota_network -1
- iniset $NEUTRON_CONF quotas quota_subnet -1
- iniset $NEUTRON_CONF quotas quota_port -1
- iniset $NEUTRON_CONF quotas quota_security_group -1
- iniset $NEUTRON_CONF quotas quota_security_group_rule -1
- fi
-
- # Format logging
- setup_logging $NEUTRON_CONF
-
- if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
- # Set the service port for a proxy to take the original
- iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
- iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
- fi
-
- _neutron_setup_rootwrap
-}
-
-function _configure_neutron_dhcp_agent {
-
- cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
-
- iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- # make it so we have working DNS from guests
- iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
- iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
- fi
-
- if ! is_service_enabled q-l3; then
- if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
- else
- if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
- die "$LINENO" "Enable isolated metadata is a must for metadata network"
- fi
- fi
- fi
-
- _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
-
- neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
-}
-
-
-function _configure_neutron_metadata_agent {
- cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
-
- iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
- iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
- iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
- fi
-}
-
-function _configure_neutron_ceilometer_notifications {
- iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
-}
-
-function _configure_neutron_metering {
- neutron_agent_metering_configure_common
- neutron_agent_metering_configure_agent
-}
-
-function _configure_dvr {
- iniset $NEUTRON_CONF DEFAULT router_distributed True
- iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
-}
-
-
-# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
-# It is called when q-agt is enabled.
-function _configure_neutron_plugin_agent {
- # Specify the default root helper prior to agent configuration to
- # ensure that an agent's configuration can override the default
- iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
- fi
- iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
- # Configure agent for plugin
- neutron_plugin_configure_plugin_agent
-}
-
-# _configure_neutron_service() - Set config files for neutron service
-# It is called when q-svc is enabled.
-function _configure_neutron_service {
- Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
- cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
-
- # Update either configuration file with plugin
- iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
-
- iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
- iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
-
- iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
- configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
-
- # Configuration for neutron notifications to nova.
- iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
- iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
- configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
- # Configure plugin
- neutron_plugin_configure_service
-}
-
-# Utility Functions
-#------------------
-
-# _neutron_service_plugin_class_add() - add service plugin class
-function _neutron_service_plugin_class_add {
- local service_plugin_class=$1
- if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
- Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
- elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
- Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
- fi
-}
-
-# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver
-function _neutron_ml2_extension_driver_add_old {
- local extension=$1
- if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
- Q_ML2_PLUGIN_EXT_DRIVERS=$extension
- elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
- Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
- fi
-}
-
-# mutnauq_server_config_add() - add server config file
-function mutnauq_server_config_add {
- _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
-function _neutron_deploy_rootwrap_filters {
- if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
- return
- fi
- local srcdir=$1
- sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
- sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
-}
-
-# _neutron_setup_rootwrap() - configure Neutron's rootwrap
-function _neutron_setup_rootwrap {
- if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
- return
- fi
- # Wipe any existing ``rootwrap.d`` files first
- Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
- if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
- sudo rm -rf $Q_CONF_ROOTWRAP_D
- fi
-
- _neutron_deploy_rootwrap_filters $NEUTRON_DIR
-
- # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
- # location moved in newer versions, prefer new location
- if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
- sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
- else
- sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
- fi
- sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
- sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
-
- # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
- ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
- ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
-
- # Set up the rootwrap sudoers for neutron
- TEMPFILE=`mktemp`
- echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
- echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
- chmod 0440 $TEMPFILE
- sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
-
- # Update the root_helper
- iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND"
- if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
- iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
- fi
-}
-
-function _neutron_setup_interface_driver {
-
- # ovs_use_veth needs to be set before the plugin configuration
- # occurs to allow plugins to override the setting.
- iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
- neutron_plugin_setup_interface_driver $1
-}
-# Functions for Neutron Exercises
-#--------------------------------
-
-function delete_probe {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
- neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
-function _get_net_id {
- openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
-}
-
-function _get_probe_cmd_prefix {
- local from_net="$1"
- net_id=`_get_net_id $from_net`
- probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
- echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
-}
-
-# ssh check
-function _ssh_check_neutron {
- local from_net=$1
- local key_file=$2
- local ip=$3
- local user=$4
- local timeout_sec=$5
- local probe_cmd = ""
- probe_cmd=`_get_probe_cmd_prefix $from_net`
- local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
- test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
-}
-
-function plugin_agent_add_l2_agent_extension {
- local l2_agent_extension=$1
- if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
- L2_AGENT_EXTENSIONS=$l2_agent_extension
- elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
- L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
- fi
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
+source $TOP_DIR/lib/neutron
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index ed40886..728aaee 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -13,7 +13,7 @@
functions
---------
-``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled
+``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled
* ``neutron_plugin_create_nova_conf`` :
optionally set options in nova_conf
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index d3f5bd5..84ca7ec 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -67,7 +67,7 @@
}
function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+ is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
}
# Restore xtrace
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 310b72e..9640063 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -72,7 +72,7 @@
}
function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+ is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
}
# Restore xtrace
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index bdeaf0f..a392bd0 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -97,7 +97,7 @@
}
function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+ is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
}
# Restore xtrace
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index f00feac..c2e78c6 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -67,7 +67,7 @@
Q_PLUGIN_CLASS="ml2"
# The ML2 plugin delegates L3 routing/NAT functionality to
# the L3 service plugin which must therefore be specified.
- _neutron_service_plugin_class_add $ML2_L3_PLUGIN
+ neutron_service_plugin_class_add $ML2_L3_PLUGIN
}
function neutron_plugin_configure_service {
@@ -111,20 +111,13 @@
fi
fi
fi
- # REVISIT(rkukura): Setting firewall_driver here for
- # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is
- # used in the server, in case no L2 agent is configured on the
- # server's node. If an L2 agent is configured, this will get
- # overridden with the correct driver. The ml2 plugin should
- # instead use its own config variable to indicate whether security
- # groups is enabled, and that will need to be set here instead.
- if [[ "$Q_USE_SECGROUP" == "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver
- else
- iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
- fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
+ if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then
+ iniset $NEUTRON_CONF experimental linuxbridge True
+ fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION
if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 7fed8bf..6e79984 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -68,7 +68,7 @@
}
function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+ is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
}
# Restore xtrace
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index 2938f47..3526ccd 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -99,8 +99,10 @@
OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+TUNNEL_IP=$TUNNEL_ENDPOINT_IP
if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+ TUNNEL_IP=[$TUNNEL_IP]
fi
OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
@@ -169,12 +171,23 @@
# Utility Functions
# -----------------
+function wait_for_db_file {
+ local count=0
+ while [ ! -f $1 ]; do
+ sleep 1
+ count=$((count+1))
+ if [ "$count" -gt 40 ]; then
+ die $LINENO "DB File $1 not found"
+ fi
+ done
+}
+
function wait_for_sock_file {
local count=0
while [ ! -S $1 ]; do
sleep 1
count=$((count+1))
- if [ "$count" -gt 5 ]; then
+ if [ "$count" -gt 40 ]; then
die $LINENO "Socket $1 not found"
fi
done
@@ -231,11 +244,12 @@
local cmd="$2"
local stop_cmd="$3"
local group=$4
- local user=${5:-$STACK_USER}
+ local user=$5
+ local rundir=${6:-$OVS_RUNDIR}
local systemd_service="devstack@$service.service"
local unit_file="$SYSTEMD_DIR/$systemd_service"
- local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
+ local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
echo "Starting $service executed command": $cmd
@@ -251,14 +265,14 @@
_start_process $systemd_service
- local testcmd="test -e $OVS_RUNDIR/$service.pid"
+ local testcmd="test -e $rundir/$service.pid"
test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
local service_ctl_file
- service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+ service_ctl_file=$(ls $rundir | grep $service | grep ctl)
if [ -z "$service_ctl_file" ]; then
die $LINENO "ctl file for service $service is not present."
fi
- sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
+ sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info
}
function clone_repository {
@@ -334,7 +348,7 @@
# OVN service sanity check
function ovn_sanity_check {
- if is_service_enabled q-agt neutron-agt; then
+ if is_service_enabled q-agt neutron-agent; then
die $LINENO "The q-agt/neutron-agt service must be disabled with OVN."
elif is_service_enabled q-l3 neutron-l3; then
die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN."
@@ -357,10 +371,6 @@
sudo mkdir -p $OVS_RUNDIR
sudo chown $(whoami) $OVS_RUNDIR
- # NOTE(lucasagomes): To keep things simpler, let's reuse the same
- # RUNDIR for both OVS and OVN. This way we avoid having to specify the
- # --db option in the ovn-{n,s}bctl commands while playing with DevStack
- sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
# If OVS is already installed, remove it, because we're about to
@@ -384,6 +394,8 @@
sudo mkdir -p $OVS_PREFIX/var/log/ovn
sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
else
+ # Load fixup_ovn_centos
+ source ${TOP_DIR}/tools/fixup_stuff.sh
fixup_ovn_centos
install_package $(get_packages openvswitch)
install_package $(get_packages ovn)
@@ -449,7 +461,7 @@
function configure_ovn_plugin {
echo "Configuring Neutron for OVN"
- if is_service_enabled q-svc ; then
+ if is_service_enabled q-svc neutron-api; then
filter_network_api_extensions
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
@@ -473,7 +485,7 @@
inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
fi
- if is_service_enabled q-ovn-metadata-agent; then
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
else
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
@@ -494,7 +506,7 @@
fi
if is_service_enabled n-api-meta ; then
- if is_service_enabled q-ovn-metadata-agent ; then
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
iniset $NOVA_CONF neutron service_metadata_proxy True
fi
fi
@@ -527,7 +539,7 @@
fi
# Metadata
- if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
@@ -539,7 +551,7 @@
iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
- iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
+ iniset $OVN_META_CONF DEFAULT state_path $DATA_DIR/neutron
iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
if is_service_enabled tls-proxy; then
@@ -575,6 +587,7 @@
rm -f $OVS_DATADIR/.*.db.~lock~
sudo rm -f $OVN_DATADIR/*.db
sudo rm -f $OVN_DATADIR/.*.db.~lock~
+ sudo rm -f $OVN_RUNDIR/*.sock
}
function _start_ovs {
@@ -601,12 +614,12 @@
dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
fi
dbcmd+=" $OVS_DATADIR/conf.db"
- _run_process ovsdb-server "$dbcmd"
+ _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
# Note: ovn-controller will create and configure br-int once it is started.
# So, no need to create it now because nothing depends on that bridge here.
local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
- _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+ _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
else
_start_process "$OVSDB_SERVER_SERVICE"
_start_process "$OVS_VSWITCHD_SERVICE"
@@ -626,8 +639,8 @@
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
- sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
- sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname)
# Select this chassis to host gateway routers
if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
@@ -641,11 +654,11 @@
if is_service_enabled ovn-controller-vtep ; then
ovn_base_setup_bridge br-v
vtep-ctl add-ps br-v
- vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+ vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP
enable_service ovs-vtep
local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
- _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root"
+ _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
vtep-ctl set-manager tcp:$HOST_IP:6640
fi
@@ -668,7 +681,7 @@
if is_service_enabled ovs-vtep ; then
_start_process "devstack@ovs-vtep.service"
fi
- if is_service_enabled q-ovn-metadata-agent; then
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then
_start_process "devstack@q-ovn-metadata-agent.service"
fi
}
@@ -689,23 +702,26 @@
local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
- _run_process ovn-northd "$cmd" "$stop_cmd"
+ _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
else
_start_process "$OVN_NORTHD_SERVICE"
fi
# Wait for the service to be ready
- wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
- wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+ # Check for socket and db files for both OVN NB and SB
+ wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock
+ wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock
+ wait_for_db_file $OVN_DATADIR/ovnnb_db.db
+ wait_for_db_file $OVN_DATADIR/ovnsb_db.db
if is_service_enabled tls-proxy; then
- sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
- sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+ sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+ sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
fi
- sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
- sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
- sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
- sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+ sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+ sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+ sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+ sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
fi
if is_service_enabled ovn-controller ; then
@@ -713,7 +729,7 @@
local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
- _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+ _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
else
_start_process "$OVN_CONTROLLER_SERVICE"
fi
@@ -722,13 +738,13 @@
if is_service_enabled ovn-controller-vtep ; then
if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
- _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+ _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR"
else
_start_process "$OVN_CONTROLLER_VTEP_SERVICE"
fi
fi
- if is_service_enabled q-ovn-metadata-agent; then
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF"
# Format logging
setup_logging $OVN_META_CONF
@@ -752,8 +768,10 @@
}
function stop_ovn {
- if is_service_enabled q-ovn-metadata-agent; then
- sudo pkill -9 -f haproxy || :
+ if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
+ # pkill takes care not to kill itself, but it may kill its parent
+ # sudo unless we use the "ps | grep [f]oo" trick
+ sudo pkill -9 -f "[h]aproxy" || :
_stop_process "devstack@q-ovn-metadata-agent.service"
fi
if is_service_enabled ovn-controller-vtep ; then
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 9ae5555..ea71e60 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -33,9 +33,9 @@
local fatal=$2
if [ "$(trueorfalse True fatal)" == "True" ]; then
- sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module")
+ sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module")
else
- sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg)
+ sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg)
fi
}
@@ -87,9 +87,15 @@
install_package kernel-devel-$KERNEL_VERSION
install_package kernel-headers-$KERNEL_VERSION
+ if is_service_enabled tls-proxy; then
+ install_package openssl-devel
+ fi
elif is_ubuntu ; then
install_package linux-headers-$KERNEL_VERSION
+ if is_service_enabled tls-proxy; then
+ install_package libssl-dev
+ fi
fi
}
@@ -97,7 +103,7 @@
function load_ovs_kernel_modules {
load_module openvswitch
load_module vport-geneve False
- dmesg | tail
+ sudo dmesg | tail
}
# reload_ovs_kernel_modules() - reload openvswitch kernel module
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index c0d74c7..2bf884a 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -166,14 +166,14 @@
if is_provider_network; then
die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
- NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK"
if [[ "$IP_VERSION" =~ 4.* ]]; then
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
+ SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME"
fi
@@ -183,7 +183,7 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
fi
- IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
+ IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id)
die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME"
fi
@@ -193,7 +193,7 @@
sudo ip link set $PUBLIC_INTERFACE up
fi
else
- NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME"
if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -211,11 +211,11 @@
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
- ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
- ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
fi
@@ -225,9 +225,9 @@
fi
# Create an external network, and a subnet. Configure the external network as router gw
if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id)
else
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id)
fi
die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
@@ -257,7 +257,7 @@
subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
local subnet_id
- subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet"
echo $subnet_id
}
@@ -278,7 +278,7 @@
subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} "
subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
local ipv6_subnet_id
- ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet"
echo $ipv6_subnet_id
}
@@ -323,7 +323,7 @@
openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
# This logic is specific to using OVN or the l3-agent for layer 3
- if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+ if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
# Configure and enable public bridge
local ext_gw_interface="none"
if is_neutron_ovs_base_plugin; then
@@ -372,7 +372,7 @@
fi
# This logic is specific to using OVN or the l3-agent for layer 3
- if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+ if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
# if the Linux host considers itself to be a router then it will
# ignore all router advertisements
# Ensure IPv6 RAs are accepted on interfaces with a default route.
@@ -403,7 +403,10 @@
ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
- # Configure interface for public bridge
+ # Configure interface for public bridge by setting the interface
+ # to "up" in case the job is running entirely private network based
+ # testing.
+ sudo ip link set $ext_gw_interface up
sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
# Any IPv6 private subnet that uses the default IPV6 subnet pool
# and that is plugged into the default router (Q_ROUTER_NAME) will
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 5b32468..757a562 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -12,7 +12,7 @@
METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin"
function neutron_agent_metering_configure_common {
- _neutron_service_plugin_class_add $METERING_PLUGIN
+ neutron_service_plugin_class_add $METERING_PLUGIN
}
function neutron_agent_metering_configure_agent {
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
index af9eb3d..c11c315 100644
--- a/lib/neutron_plugins/services/qos
+++ b/lib/neutron_plugins/services/qos
@@ -6,7 +6,7 @@
function configure_qos_core_plugin {
- configure_qos_$NEUTRON_CORE_PLUGIN
+ configure_qos_$Q_PLUGIN
}
diff --git a/lib/nova b/lib/nova
index 4c14374..3aa6b9e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -97,6 +97,18 @@
METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to switch the compute API policies enable the scope and new defaults.
+# By Default, these flag are False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+NOVA_ENFORCE_SCOPE=$(trueorfalse False NOVA_ENFORCE_SCOPE)
+
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+ NOVA_MY_IP="$HOST_IPV6"
+else
+ NOVA_MY_IP="$HOST_IP"
+fi
+
# Option to enable/disable config drive
# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
@@ -107,20 +119,6 @@
QEMU_CONF=/etc/libvirt/qemu.conf
-# Set default defaults here as some hypervisor drivers override these
-PUBLIC_INTERFACE_DEFAULT=br100
-# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
-# the default isn't completely crazy. This will match ``eth*``, ``em*``, or
-# the new ``p*`` interfaces, then basically picks the first
-# alphabetically. It's probably wrong, however it's less wrong than
-# always using ``eth0`` which doesn't exist on new Linux distros at all.
-GUEST_INTERFACE_DEFAULT=$(ip link \
- | grep 'state UP' \
- | awk '{print $2}' \
- | sed 's/://' \
- | grep ^[ep] \
- | head -1)
-
# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
# In multi-node setups allows compute hosts to not run ``n-novnc``.
NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)
@@ -219,6 +217,9 @@
done
sudo iscsiadm --mode node --op delete || true
+ # Disconnect all nvmeof connections
+ sudo nvme disconnect-all || true
+
# Clean out the instances directory.
sudo rm -rf $NOVA_INSTANCES_PATH/*
fi
@@ -306,6 +307,7 @@
fi
fi
+ # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM
# Ensure each compute host uses a unique iSCSI initiator
echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
@@ -324,14 +326,30 @@
# set chap algorithms. The default chap_algorithm is md5 which will
# not work under FIPS.
- # FIXME(alee) For some reason, this breaks openeuler. Openeuler devs should weigh in
- # and determine the correct solution for openeuler here
- if ! is_openeuler; then
- iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
- fi
+ iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
- # ensure that iscsid is started, even when disabled by default
- restart_service iscsid
+ if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then
+ # ensure that iscsid is started, even when disabled by default
+ restart_service iscsid
+
+ # For NVMe-oF we need different packages that many not be present
+ else
+ install_package nvme-cli
+ sudo modprobe nvme-fabrics
+
+ # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface
+ if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+ sudo modprobe nvme-rdma
+ iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`}
+ if ! sudo rdma link | grep $iface ; then
+ sudo rdma link add rxe_$iface type rxe netdev $iface
+ fi
+ elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+ sudo modprobe nvme-tcp
+ else # 'nvmet_fc'
+ sudo modprobe nvme-fc
+ fi
+ fi
fi
# Rebuild the config file from scratch
@@ -422,11 +440,7 @@
iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
iniset $NOVA_CONF scheduler workers "$API_WORKERS"
iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
- if [[ $SERVICE_IP_VERSION == 6 ]]; then
- iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
- else
- iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
- fi
+ iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP"
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
@@ -473,6 +487,13 @@
NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
fi
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
+ if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+ iniset $NOVA_CONF oslo_policy enforce_new_defaults True
+ iniset $NOVA_CONF oslo_policy enforce_scope True
+ else
+ iniset $NOVA_CONF oslo_policy enforce_new_defaults False
+ iniset $NOVA_CONF oslo_policy enforce_scope False
+ fi
if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
# Set the service port for a proxy to take the original
iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
@@ -903,8 +924,23 @@
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE)
if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
+ # Installing novnc on Debian bullseye breaks the global pip
+ # install. This happens because novnc pulls in distro cryptography
+ # which will be prefered by distro pip, but if anything has
+ # installed pyOpenSSL from pypi (keystone) that is not compatible
+ # with distro cryptography. Fix this by installing
+ # python3-openssl (pyOpenSSL) from the distro which pip will prefer
+ # on Debian. Ubuntu has inverse problems so we only do this for
+ # Debian.
+ local novnc_packages
+ novnc_packages="novnc"
+ GetOSVersion
+ if [[ "$os_VENDOR" = "Debian" ]] ; then
+ novnc_packages="$novnc_packages python3-openssl"
+ fi
+
NOVNC_WEB_DIR=/usr/share/novnc
- install_package novnc
+ install_package $novnc_packages
else
NOVNC_WEB_DIR=$DEST/novnc
git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 3e7d280..c0e45eb 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -82,11 +82,17 @@
sudo dnf copr enable -y @virtmaint-sig/virt-preview
fi
+ if is_openeuler; then
+ qemu_package=qemu
+ else
+ qemu_package=qemu-kvm
+ fi
+
# Note that in CentOS/RHEL this needs to come from the RDO
# repositories (qemu-kvm-ev ... which provides this package)
# as the base system version is too old. We should have
# pre-installed these
- install_package qemu-kvm
+ install_package $qemu_package
install_package libvirt libvirt-devel python3-libvirt
if is_arch "aarch64"; then
diff --git a/lib/os-vif b/lib/os-vif
index 865645c..7c8bee3 100644
--- a/lib/os-vif
+++ b/lib/os-vif
@@ -1,10 +1,5 @@
#!/bin/bash
-# support vsctl or native.
-# until bug #1929446 is resolved we override the os-vif default
-# and fall back to the legacy "vsctl" driver.
-OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"}
-
function is_ml2_ovs {
if [[ "${Q_AGENT}" == "openvswitch" ]]; then
echo "True"
@@ -19,11 +14,9 @@
function configure_os_vif {
if [[ -e ${NOVA_CONF} ]]; then
- iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
fi
if [[ -e ${NEUTRON_CONF} ]]; then
- iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
fi
}
diff --git a/lib/placement b/lib/placement
index b779866..c6bf99f 100644
--- a/lib/placement
+++ b/lib/placement
@@ -48,6 +48,12 @@
PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to switch the Placement API policies scope and new defaults.
+# By Default, these flag are False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE)
+
# Functions
# ---------
@@ -111,6 +117,13 @@
else
_config_placement_apache_wsgi
fi
+ if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+ iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True
+ iniset $PLACEMENT_CONF oslo_policy enforce_scope True
+ else
+ iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False
+ iniset $PLACEMENT_CONF oslo_policy enforce_scope False
+ fi
}
# create_placement_accounts() - Set up required placement accounts
diff --git a/lib/swift b/lib/swift
index ba92f3d..251c462 100644
--- a/lib/swift
+++ b/lib/swift
@@ -402,6 +402,11 @@
# Versioned Writes
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
+ # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068
+ if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512"
+ fi
+
# Configure Ceilometer
if is_service_enabled ceilometer; then
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
diff --git a/lib/tempest b/lib/tempest
index 4504663..c3d3e9a 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -71,6 +71,17 @@
TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them. In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT. If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
# Neutron/Network variables
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -117,6 +128,13 @@
(cd $REQUIREMENTS_DIR &&
git show master:upper-constraints.txt 2>/dev/null ||
git show origin/master:upper-constraints.txt) > $tmp_c
+ # NOTE(gmann): we need to set the below env var pointing to master
+ # constraints even that is what default in tox.ini. Otherwise it can
+ # create the issue for grenade run where old and new devstack can have
+ # different tempest (old and master) to install. For detail problem,
+ # refer to the https://bugs.launchpad.net/devstack/+bug/2003993
+ export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
+ export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
else
echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -127,6 +145,48 @@
fi
}
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images. Takes 3 arguments, an array and two
+# variables. The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value of *both* other parameters.
+function get_active_images {
+ declare -n img_array=$1
+ declare -n img_id=$2
+ declare -n img_id_alt=$3
+
+ # start with a fresh array in case we are called multiple times
+ img_array=()
+
+ while read -r IMAGE_NAME IMAGE_UUID; do
+ if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+ img_id="$IMAGE_UUID"
+ img_id_alt="$IMAGE_UUID"
+ fi
+ img_array+=($IMAGE_UUID)
+ done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+ declare -n image_array=$1
+ declare -n image_id=$2
+ declare -n image_id_alt=$3
+ local -i poll_count
+
+ poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+ while (( poll_count-- > 0 )) ; do
+ sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+ get_active_images image_array image_id image_id_alt
+ if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+ return
+ fi
+ done
+ local msg
+ msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+ msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+ warn $LINENO "$msg"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -168,13 +228,21 @@
declare -a images
if is_service_enabled glance; then
- while read -r IMAGE_NAME IMAGE_UUID; do
- if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
- image_uuid="$IMAGE_UUID"
- image_uuid_alt="$IMAGE_UUID"
+ get_active_images images image_uuid image_uuid_alt
+
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ # Glance image import is asynchronous and may be configured
+ # to do image conversion. If image import is being used,
+ # it's possible that this code is being executed before the
+ # import has completed and there may be no active images yet.
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ poll_glance_images images image_uuid image_uuid_alt
+ if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+ echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+ exit 1
+ fi
fi
- images+=($IMAGE_UUID)
- done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+ fi
case "${#images[*]}" in
0)
@@ -607,14 +675,27 @@
# If services enable the enforce_scope for their policy
# we need to enable the same on Tempest side so that
# test can be run with scoped token.
- if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+ if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
iniset $TEMPEST_CONFIG enforce_scope keystone true
iniset $TEMPEST_CONFIG auth admin_system 'all'
iniset $TEMPEST_CONFIG auth admin_project_name ''
fi
- iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
- iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+ if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope nova true
+ fi
+
+ if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope placement true
+ fi
+
+ if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope glance true
+ fi
+
+ if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+ iniset $TEMPEST_CONFIG enforce_scope cinder true
+ fi
if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
# libvirt-lxc does not support boot from volume or attaching volumes
@@ -629,13 +710,13 @@
local tmp_cfg_file
tmp_cfg_file=$(mktemp)
cd $TEMPEST_DIR
- if [[ "$OFFLINE" != "True" ]]; then
- tox -revenv-tempest --notest
- fi
local tmp_u_c_m
tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
set_tempest_venv_constraints $tmp_u_c_m
+ if [[ "$OFFLINE" != "True" ]]; then
+ tox -revenv-tempest --notest
+ fi
tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
rm -f $tmp_u_c_m
@@ -663,12 +744,12 @@
# Neutron API Extensions
# disable metering if we didn't enable the service
- if ! is_service_enabled q-metering; then
+ if ! is_service_enabled q-metering neutron-metering; then
DISABLE_NETWORK_API_EXTENSIONS+=", metering"
fi
# disable l3_agent_scheduler if we didn't enable L3 agent
- if ! is_service_enabled q-l3; then
+ if ! is_service_enabled q-l3 neutron-l3; then
DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler"
fi
@@ -709,7 +790,12 @@
# install_tempest() - Collect source and prepare
function install_tempest {
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
- pip_install 'tox!=2.8.0'
+ # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+ # released after zed was released and has some incompatible changes
+ # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+ # beanches jobs. We can continue testing the stable/zed and lower
+ # branches with tox<4.0.0
+ pip_install 'tox!=2.8.0,<4.0.0'
pushd $TEMPEST_DIR
# NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
# is tag name not master. git_clone would not checkout tag because
diff --git a/lib/tls b/lib/tls
index 5a7f5ae..b8758cd 100644
--- a/lib/tls
+++ b/lib/tls
@@ -557,7 +557,7 @@
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
</VirtualHost>
EOF
if is_suse ; then
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 9e66f20..0047d78 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -17,9 +17,18 @@
dest: "{{ stage_dir }}/verify_tempest_conf.log"
state: hard
when: tempest_log.stat.exists
+ - name: Capture most recent qemu crash dump, if any
+ shell:
+ executable: /bin/bash
+ cmd: |
+ coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64
+ ignore_errors: yes
roles:
- export-devstack-journal
- apache-logs-conf
+ # This should run as early as possible to make sure we don't skew
+ # the post-tempest results with other activities.
+ - capture-performance-data
- devstack-project-conf
# capture-system-logs should be the last role before stage-output
- capture-system-logs
diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml
index d7e4670..68d5254 100644
--- a/playbooks/tox/pre.yaml
+++ b/playbooks/tox/pre.yaml
@@ -5,4 +5,10 @@
bindep_profile: test
bindep_dir: "{{ zuul_work_dir }}"
- test-setup
- - ensure-tox
+ # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+ # released after zed was released and has some incompatible changes
+ # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+ # beanches jobs. We can continue testing the stable/zed and lower
+ # branches with tox<4.0.0
+ - role: ensure-tox
+ ensure_tox_version: "<4"
diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst
new file mode 100644
index 0000000..b7a37c2
--- /dev/null
+++ b/roles/capture-performance-data/README.rst
@@ -0,0 +1,25 @@
+Generate performance logs for staging
+
+Captures usage information from mysql, systemd, apache logs, and other
+parts of the system and generates a performance.json file in the
+staging directory.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory
+
+.. zuul:rolevar:: devstack_conf_dir
+ :default: /opt/stack
+
+ The base devstack destination directory
+
+.. zuul:rolevar:: debian_suse_apache_deref_logs
+
+ The apache logs found in the debian/suse locations
+
+.. zuul:rolevar:: redhat_apache_deref_logs
+
+ The apache logs found in the redhat locations
diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/roles/capture-performance-data/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_conf_dir: "{{ devstack_base_dir }}"
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
new file mode 100644
index 0000000..f9bb0f7
--- /dev/null
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -0,0 +1,16 @@
+- name: Generate statistics
+ shell:
+ executable: /bin/bash
+ cmd: |
+ source {{ devstack_conf_dir }}/stackrc
+ python3 {{ devstack_conf_dir }}/tools/get-stats.py \
+ --db-user="$DATABASE_USER" \
+ --db-pass="$DATABASE_PASSWORD" \
+ --db-host="$DATABASE_HOST" \
+ {{ apache_logs }} > {{ stage_dir }}/performance.json
+ vars:
+ apache_logs: >-
+ {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %}
+ --apache-log="{{ i.stat.path }}"
+ {% endfor %}
+ ignore_errors: yes
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
- coredumps
- dns resolver
- listen53
+- services
- unbound.log
- deprecation messages
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # Services status
+ sudo systemctl status --all > services.txt 2>/dev/null
+
# NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
# failed to start due to denials from SELinux — useful for CentOS
# and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst
index 400a8da..3bddf5e 100644
--- a/roles/devstack-ipv6-only-deployments-verification/README.rst
+++ b/roles/devstack-ipv6-only-deployments-verification/README.rst
@@ -1,10 +1,10 @@
-Verify the IPv6-only deployments
+Verify all addresses in IPv6-only deployments
This role needs to be invoked from a playbook that
-run tests. This role verifies the IPv6 setting on
-devstack side and devstack deploy services on IPv6.
-This role is invoked before tests are run so that
-if any missing IPv6 setting or deployments can fail
+runs tests. This role verifies the IPv6 settings on the
+devstack side and that devstack deploys with all addresses
+being IPv6. This role is invoked before tests are run so that
+if there is any missing IPv6 setting, deployments can fail
the job early.
diff --git a/samples/local.conf b/samples/local.conf
index 8b76137..55b7298 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -49,7 +49,7 @@
# path of the destination log file. A timestamp will be appended to the given name.
LOGFILE=$DEST/logs/stack.sh.log
-# Old log files are automatically removed after 7 days to keep things neat. Change
+# Old log files are automatically removed after 2 days to keep things neat. Change
# the number of days by setting ``LOGDAYS``.
LOGDAYS=2
diff --git a/stack.sh b/stack.sh
index 0082b99..28576d1 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
# a multi-node developer install.
# To keep this script simple we assume you are running on a recent **Ubuntu**
-# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer), **Fedora** (F36 or newer), or **CentOS/RHEL**
# (7 or newer) machine. (It may work on other platforms but support for those
# platforms is left to those who added them to DevStack.) It should work in
# a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -67,7 +67,9 @@
umask 022
# Not all distros have sbin in PATH for regular users.
-PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin
+# osc will normally be installed at /usr/local/bin/openstack so ensure
+# /usr/local/bin is also in the path
+PATH=$PATH:/usr/local/bin:/usr/local/sbin:/usr/sbin:/sbin
# Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
@@ -227,7 +229,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
+SUPPORTED_DISTROS="bullseye|focal|jammy|f36|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-22.03"
if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
@@ -278,13 +280,6 @@
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
-# TODO(wxy): Currently some base packages are not installed by default in
-# openEuler. Remove the code below once the packaged are installed by default
-# in the future.
-if [[ $DISTRO == "openEuler-20.03" ]]; then
- install_package hostname
-fi
-
# Configure Distro Repositories
# -----------------------------
@@ -399,6 +394,15 @@
sudo dnf config-manager --set-enabled crb
# rabbitmq and other packages are provided by RDO repositories.
_install_rdo
+elif [[ $DISTRO == "openEuler-22.03" ]]; then
+ # There are some problem in openEuler. We should fix it first. Some required
+ # package/action runs before fixup script. So we can't fix there.
+ #
+ # 1. the hostname package is not installed by default
+ # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel
+ # 3. python3-pip can be uninstalled by `get_pip.py` automaticly.
+ install_package hostname openstack-release-wallaby
+ uninstall_package python3-pip
fi
# Ensure python is installed
@@ -1157,7 +1161,8 @@
# ----
if is_service_enabled q-dhcp; then
- # Delete traces of nova networks from prior runs
+ # TODO(frickler): These are remnants from n-net, check which parts are really
+ # still needed for Neutron.
# Do not kill any dnsmasq instance spawned by NetworkManager
netman_pid=$(pidof NetworkManager || true)
if [ -z "$netman_pid" ]; then
@@ -1217,12 +1222,7 @@
echo_summary "Configuring Nova"
init_nova
- # Additional Nova configuration that is dependent on other services
- # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
- # not, remove the if here
- if is_service_enabled neutron; then
- async_runfunc configure_neutron_nova
- fi
+ async_runfunc configure_neutron_nova
fi
@@ -1510,6 +1510,19 @@
time_totals
async_print_timing
+if is_service_enabled mysql; then
+ if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+ echo ""
+ echo ""
+ echo "Post-stack database query stats:"
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'SELECT * FROM queries' -t 2>/dev/null
+ mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+ 'DELETE FROM queries' 2>/dev/null
+ fi
+fi
+
+
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
index d22fa88..a05d1e5 100644
--- a/stackrc
+++ b/stackrc
@@ -179,6 +179,10 @@
# TODO(frickler): Drop this when plugins no longer need it
IDENTITY_API_VERSION=3
+# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
+# each services ${SERVICE}_ENFORCE_SCOPE variables
+ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
+
# Enable use of Python virtual environments. Individual project use of
# venvs are controlled by the PROJECT_VENV array; every project with
# an entry in the array will be installed into the named venv.
@@ -193,6 +197,10 @@
# (currently only implemented for MySQL backend)
DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
+# This can be used to turn on various non-default items in the
+# performance_schema that are of interest to us
+MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
+
# Set a timeout for git operations. If git is still running when the
# timeout expires, the command will be retried up to 3 times. This is
# in the format for timeout(1);
@@ -235,7 +243,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="zed"
+DEVSTACK_SERIES="2023.1"
##############
#
@@ -869,7 +877,31 @@
# This is either 127.0.0.1 for IPv4 or ::1 for IPv6
SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}}
-REGION_NAME=${REGION_NAME:-RegionOne}
+# TUNNEL IP version
+# This is the IP version to use for tunnel endpoints
+TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4}
+
+# Validate TUNNEL_IP_VERSION
+if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then
+ die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6"
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then
+ DEF_TUNNEL_ENDPOINT_IP=$HOST_IP
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then
+ # Only die if the user has not over-ridden the endpoint IP
+ if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then
+ die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6."
+ fi
+
+ DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6
+fi
+
+# Allow the use of an alternate address for tunnel endpoints.
+# Default is dependent on TUNNEL_IP_VERSION above.
+TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}}
# Configure services to use syslog instead of writing to individual log files
SYSLOG=$(trueorfalse False SYSLOG)
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index 6ed1647..6367cde 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -44,6 +44,9 @@
multi = foo1
multi = foo2
+[fff]
+ampersand =
+
[key_with_spaces]
rgw special key = something
@@ -85,7 +88,7 @@
# test iniget_sections
VAL=$(iniget_sections "${TEST_INI}")
-assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \
+assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \
del_separate_options del_same_option del_missing_option \
del_missing_option_multi del_no_options"
@@ -124,6 +127,13 @@
VAL=$(iniget ${TEST_INI} bbb handlers)
assert_equal "$VAL" "33,44" "inset at EOF"
+# Test with ampersand in values
+for i in `seq 3`; do
+ iniset ${TEST_INI} fff ampersand '&y'
+done
+VAL=$(iniget ${TEST_INI} fff ampersand)
+assert_equal "$VAL" "&y" "iniset ampersands in option"
+
# test empty option
if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
passed "ini_has_option: ddd.empty present"
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 919cacb..cb8d7aa 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -44,6 +44,15 @@
if ! getent passwd $STACK_USER >/dev/null; then
echo "Creating a user called $STACK_USER"
useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER
+ # RHEL based distros create home dir with 700 permissions,
+ # And Ubuntu 21.04+ with 750, i.e missing executable
+ # permission for either group or others
+ # Devstack deploy will have issues with this, fix it by
+ # adding executable permission
+ if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then
+ echo "Executable permission missing for $DEST, adding it"
+ chmod +x $DEST
+ fi
fi
echo "Giving stack user passwordless sudo privileges"
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..5057f0f
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,120 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+ def __init__(self, url, kwargs):
+ self.db_name = url.database
+ LOG.info('Registered counter for database %s' % self.db_name)
+ new_url = sqlalchemy.engine.URL.create(url.drivername,
+ url.username,
+ url.password,
+ url.host,
+ url.port,
+ 'stats')
+
+ self.engine = sqlalchemy.create_engine(new_url)
+ self.queue = queue.Queue()
+ self.thread = None
+
+ def engine_created(self, engine):
+ """Hook the engine creation process.
+
+ This is the plug point for the sqlalchemy plugin. Using
+ plugin=$this in the URL causes this method to be called when
+ the engine is created, giving us a chance to hook it below.
+ """
+ event.listen(engine, "before_cursor_execute", self._log_event)
+
+ def ensure_writer_thread(self):
+ self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+ self.thread.start()
+
+ def _log_event(self, conn, cursor, statement, parameters, context,
+ executemany):
+ """Queue a "hit" for this operation to be recorded.
+
+ Attepts to determine the operation by the first word of the
+ statement, or 'OTHER' if it cannot be determined.
+ """
+
+ # Start our thread if not running. If we were forked after the
+ # engine was created and this plugin was associated, our
+ # writer thread is gone, so respawn.
+ if not self.thread or not self.thread.is_alive():
+ self.ensure_writer_thread()
+
+ try:
+ op = statement.strip().split(' ', 1)[0] or 'OTHER'
+ except Exception:
+ op = 'OTHER'
+
+ self.queue.put((self.db_name, op))
+
+ def do_incr(self, db, op, count):
+ """Increment the counter for (db,op) by count."""
+
+ query = ('INSERT INTO queries (db, op, count) '
+ ' VALUES (%s, %s, %s) '
+ ' ON DUPLICATE KEY UPDATE count=count+%s')
+ try:
+ with self.engine.begin() as conn:
+ r = conn.execute(query, (db, op, count, count))
+ except Exception as e:
+ LOG.error('Failed to account for access to database %r: %s',
+ db, e)
+
+ def stat_writer(self):
+ """Consume messages from the queue and write them in batches.
+
+ This reads "hists" from from a queue fed by _log_event() and
+ writes (db,op)+=count stats to the database after ten seconds
+ of no activity to avoid triggering a write for every SELECT
+ call. Write no less often than every thirty seconds and/or 100
+ pending hits to avoid being starved by constant activity.
+ """
+ LOG.debug('[%i] Writer thread running' % os.getpid())
+ while True:
+ to_write = {}
+ total = 0
+ last = time.time()
+ while time.time() - last < 30 and total < 100:
+ try:
+ item = self.queue.get(timeout=10)
+ to_write.setdefault(item, 0)
+ to_write[item] += 1
+ total += 1
+ except queue.Empty:
+ break
+
+ if to_write:
+ LOG.debug('[%i] Writing DB stats %s' % (
+ os.getpid(),
+ ','.join(['%s:%s=%i' % (db, op, count)
+ for (db, op), count in to_write.items()])))
+
+ for (db, op), count in to_write.items():
+ self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..12300bf
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+py_modules = dbcounter
+entry_points =
+ [sqlalchemy.plugins]
+ dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh
new file mode 100755
index 0000000..9c31b30
--- /dev/null
+++ b/tools/file_tracker.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+# time to sleep between checks
+SLEEP_TIME=20
+
+function tracker {
+ echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened"
+ while true; do
+ cat /proc/sys/fs/file-nr
+ sleep $SLEEP_TIME
+ done
+}
+
+function usage {
+ echo "Usage: $0 [-x] [-s N]" 1>&2
+ exit 1
+}
+
+while getopts ":s:x" opt; do
+ case $opt in
+ s)
+ SLEEP_TIME=$OPTARG
+ ;;
+ x)
+ set -o xtrace
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+
+tracker
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index f24ac40..daa1bc6 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -153,32 +153,8 @@
sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
}
-function fixup_openeuler {
- if ! is_openeuler; then
- return
- fi
-
- if is_arch "x86_64"; then
- arch="x86_64"
- elif is_arch "aarch64"; then
- arch="aarch64"
- fi
-
- # Some packages' version in openEuler are too old, use the newer ones we
- # provide in oepkg. (oepkg is an openEuler third part yum repo which is
- # endorsed by openEuler community)
- (echo '[openstack-ci]'
- echo 'name=openstack'
- echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/'
- echo 'enabled=1'
- echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null
-
- yum_install liberasurecode-devel
-}
-
function fixup_all {
fixup_ubuntu
fixup_fedora
fixup_suse
- fixup_openeuler
}
diff --git a/tools/get-stats.py b/tools/get-stats.py
new file mode 100755
index 0000000..b958af6
--- /dev/null
+++ b/tools/get-stats.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python3
+
+import argparse
+import csv
+import datetime
+import glob
+import itertools
+import json
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+
+try:
+ import psutil
+except ImportError:
+ psutil = None
+ print('No psutil, process information will not be included',
+ file=sys.stderr)
+
+try:
+ import pymysql
+except ImportError:
+ pymysql = None
+ print('No pymysql, database information will not be included',
+ file=sys.stderr)
+
+LOG = logging.getLogger('perf')
+
+# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
+
+
+def tryint(value):
+ try:
+ return int(value)
+ except (ValueError, TypeError):
+ return value
+
+
+def get_service_stats(service):
+ stats = {'MemoryCurrent': 0}
+ output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] +
+ ['-p%s' % stat for stat in stats])
+ for line in output.decode().split('\n'):
+ if not line:
+ continue
+ stat, val = line.split('=')
+ stats[stat] = tryint(val)
+
+ return stats
+
+
+def get_services_stats():
+ services = [os.path.basename(s) for s in
+ glob.glob('/etc/systemd/system/devstack@*.service')] + \
+ ['apache2.service']
+ return [dict(service=service, **get_service_stats(service))
+ for service in services]
+
+
+def get_process_stats(proc):
+ cmdline = proc.cmdline()
+ if 'python' in cmdline[0]:
+ cmdline = cmdline[1:]
+ return {'cmd': cmdline[0],
+ 'pid': proc.pid,
+ 'args': ' '.join(cmdline[1:]),
+ 'rss': proc.memory_info().rss}
+
+
+def get_processes_stats(matches):
+ me = os.getpid()
+ procs = psutil.process_iter()
+
+ def proc_matches(proc):
+ return me != proc.pid and any(
+ re.search(match, ' '.join(proc.cmdline()))
+ for match in matches)
+
+ return [
+ get_process_stats(proc)
+ for proc in procs
+ if proc_matches(proc)]
+
+
+def get_db_stats(host, user, passwd):
+ dbs = []
+ try:
+ db = pymysql.connect(host=host, user=user, password=passwd,
+ database='stats',
+ cursorclass=pymysql.cursors.DictCursor)
+ except pymysql.err.OperationalError as e:
+ if 'Unknown database' in str(e):
+ print('No stats database; assuming devstack failed',
+ file=sys.stderr)
+ return []
+ raise
+
+ with db:
+ with db.cursor() as cur:
+ cur.execute('SELECT db,op,count FROM queries')
+ for row in cur:
+ dbs.append({k: tryint(v) for k, v in row.items()})
+ return dbs
+
+
+def get_http_stats_for_log(logfile):
+ stats = {}
+ apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+ 'length', 'c', 'agent')
+ ignore_agents = ('curl', 'uwsgi', 'nova-status')
+ ignored_services = set()
+ for line in csv.reader(open(logfile), delimiter=' '):
+ fields = dict(zip(apache_fields, line))
+ if len(fields) != len(apache_fields):
+ # Not a combined access log, so we can bail completely
+ return []
+ try:
+ method, url, http = fields['request'].split(' ')
+ except ValueError:
+ method = url = http = ''
+ if 'HTTP' not in http:
+ # Not a combined access log, so we can bail completely
+ return []
+
+ # Tempest's User-Agent is unchanged, but client libraries and
+ # inter-service API calls use proper strings. So assume
+ # 'python-urllib' is tempest so we can tell it apart.
+ if 'python-urllib' in fields['agent'].lower():
+ agent = 'tempest'
+ else:
+ agent = fields['agent'].split(' ')[0]
+ if agent.startswith('python-'):
+ agent = agent.replace('python-', '')
+ if '/' in agent:
+ agent = agent.split('/')[0]
+
+ if agent in ignore_agents:
+ continue
+
+ try:
+ service, rest = url.strip('/').split('/', 1)
+ except ValueError:
+ # Root calls like "GET /identity"
+ service = url.strip('/')
+ rest = ''
+
+ if not service.isalpha():
+ ignored_services.add(service)
+ continue
+
+ method_key = '%s-%s' % (agent, method)
+ try:
+ length = int(fields['length'])
+ except ValueError:
+ LOG.warning('[%s] Failed to parse length %r from line %r' % (
+ logfile, fields['length'], line))
+ length = 0
+ stats.setdefault(service, {'largest': 0})
+ stats[service].setdefault(method_key, 0)
+ stats[service][method_key] += 1
+ stats[service]['largest'] = max(stats[service]['largest'],
+ length)
+
+ if ignored_services:
+ LOG.warning('Ignored services: %s' % ','.join(
+ sorted(ignored_services)))
+
+ # Flatten this for ES
+ return [{'service': service, 'log': os.path.basename(logfile),
+ **vals}
+ for service, vals in stats.items()]
+
+
+def get_http_stats(logfiles):
+ return list(itertools.chain.from_iterable(get_http_stats_for_log(log)
+ for log in logfiles))
+
+
+def get_report_info():
+ return {
+ 'timestamp': datetime.datetime.now().isoformat(),
+ 'hostname': socket.gethostname(),
+ 'version': 2,
+ }
+
+
+if __name__ == '__main__':
+ process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd']
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--db-user', default='root',
+ help=('MySQL user for collecting stats '
+ '(default: "root")'))
+ parser.add_argument('--db-pass', default=None,
+ help='MySQL password for db-user')
+ parser.add_argument('--db-host', default='localhost',
+ help='MySQL hostname')
+ parser.add_argument('--apache-log', action='append', default=[],
+ help='Collect API call stats from this apache log')
+ parser.add_argument('--process', action='append',
+ default=process_defaults,
+ help=('Include process stats for this cmdline regex '
+ '(default is %s)' % ','.join(process_defaults)))
+ args = parser.parse_args()
+
+ logging.basicConfig(level=logging.WARNING)
+
+ data = {
+ 'services': get_services_stats(),
+ 'db': pymysql and args.db_pass and get_db_stats(args.db_host,
+ args.db_user,
+ args.db_pass) or [],
+ 'processes': psutil and get_processes_stats(args.process) or [],
+ 'api': get_http_stats(args.apache_log),
+ 'report': get_report_info(),
+ }
+
+ print(json.dumps(data, indent=2))
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index e9c52ea..91b180c 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -139,15 +139,18 @@
# recent enough anyway. This is included via rpms/general
: # Simply fall through
elif is_ubuntu; then
- : # pip on Ubuntu 20.04 is new enough, too
+ # pip on Ubuntu 20.04 and higher is new enough, too
+ # drop setuptools from u-c
+ sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt
else
install_get_pip
+
+ # Note setuptools is part of requirements.txt and we want to make sure
+ # we obey any versioning as described there.
+ pip_install_gr setuptools
fi
set -x
-# Note setuptools is part of requirements.txt and we want to make sure
-# we obey any versioning as described there.
-pip_install_gr setuptools
get_versions
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
index 2596395..0f0cba8 100755
--- a/tools/verify-ipv6-only-deployments.sh
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -23,32 +23,43 @@
_service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
local _service_local_host=''
_service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+ local _tunnel_endpoint_ip=''
+ _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d [])
if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
exit 1
fi
+ if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then
+ echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address."
+ exit 1
+ fi
is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
if [[ "$is_service_host_ipv6" != "True" ]]; then
- echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
if [[ "$is_host_ipv6" != "True" ]]; then
- echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
if [[ "$is_service_listen_address" != "True" ]]; then
- echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
exit 1
fi
is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
if [[ "$is_service_local_host" != "True" ]]; then
- echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+ echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
+ exit 1
+ fi
+ is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))')
+ if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then
+ echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address."
exit 1
fi
echo "Devstack is properly configured with IPv6"
- echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+ echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP
}
function sanity_check_system_ipv6_enabled {
@@ -72,7 +83,7 @@
is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
if [[ "$is_endpoint_ipv6" != "True" ]]; then
all_ipv6=False
- echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+ echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address."
continue
fi
endpoints_verified=True
@@ -80,7 +91,7 @@
if [[ "$all_ipv6" == "False" ]] || [[ "$endpoints_verified" == "False" ]]; then
exit 1
fi
- echo "All services deployed by devstack is on IPv6 endpoints"
+ echo "All services deployed by devstack are on IPv6 endpoints"
echo $endpoints
}
diff --git a/unstack.sh b/unstack.sh
index 4b57b6e..a36af3f 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -181,3 +181,8 @@
clean_pyc_files
rm -Rf $DEST/async
+
+# Clean any safe.directory items we wrote into the global
+# gitconfig. We can identify the relevant ones by checking that they
+# point to somewhere in our $DEST directory.
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig