Merge "Cinder: only set volume_clear for LVM"
diff --git a/.zuul.yaml b/.zuul.yaml
index f7594d4..00129b5 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -9,6 +9,16 @@
- controller
- nodeset:
+ name: openstack-single-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-bionic
nodes:
- name: controller
@@ -39,6 +49,16 @@
- controller
- nodeset:
+ name: devstack-single-node-centos-8
+ nodes:
+ - name: controller
+ label: centos-8
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: devstack-single-node-opensuse-15
nodes:
- name: controller
@@ -52,7 +72,7 @@
name: devstack-single-node-fedora-latest
nodes:
- name: controller
- label: fedora-29
+ label: fedora-32
groups:
- name: tempest
nodes:
@@ -89,6 +109,36 @@
- compute1
- nodeset:
+ name: openstack-two-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ - name: compute1
+ label: ubuntu-focal
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
name: openstack-two-node-bionic
nodes:
- name: controller
@@ -149,6 +199,41 @@
- compute1
- nodeset:
+ name: openstack-three-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ - name: compute1
+ label: ubuntu-focal
+ - name: compute2
+ label: ubuntu-focal
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ - compute2
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ - compute2
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+ - compute2
+
+- nodeset:
name: openstack-three-node-bionic
nodes:
- name: controller
@@ -219,8 +304,8 @@
VERBOSE_NO_TIMESTAMP: true
NOVNC_FROM_PACKAGE: true
ERROR_ON_CLONE: true
- # Gate jobs can't deal with nested virt. Disable it.
- LIBVIRT_TYPE: qemu
+ # Gate jobs can't deal with nested virt. Disable it by default.
+ LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}'
devstack_services:
# Ignore any default set by devstack. Emit a "disable_all_services".
base: false
@@ -255,6 +340,7 @@
'{{ stage_dir }}/listen53.txt': logs
'{{ stage_dir }}/deprecations.log': logs
'{{ stage_dir }}/audit.log': logs
+ /etc/ceph: logs
/var/log/ceph: logs
/var/log/openvswitch: logs
/var/log/glusterfs: logs
@@ -306,7 +392,7 @@
description: |
Minimal devstack base job, intended for use by jobs that need
less than the normal minimum set of required-projects.
- nodeset: openstack-single-node-bionic
+ nodeset: openstack-single-node-focal
required-projects:
- opendev.org/openstack/requirements
vars:
@@ -319,15 +405,15 @@
# Shared services
dstat: true
etcd3: true
+ memory_tracker: true
mysql: true
- peakmem_tracker: true
rabbit: true
group-vars:
subnode:
devstack_services:
# Shared services
dstat: true
- peakmem_tracker: true
+ memory_tracker: true
devstack_localrc:
# Multinode specific settings
HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -393,15 +479,14 @@
# Shared services
dstat: true
etcd3: true
+ memory_tracker: true
mysql: true
- peakmem_tracker: true
rabbit: true
tls-proxy: true
# Keystone services
key: true
# Glance services
g-api: true
- g-reg: true
# Nova services
n-api: true
n-api-meta: true
@@ -409,22 +494,15 @@
n-cpu: true
n-novnc: true
n-sch: true
+ # Placement service
placement-api: true
# Neutron services
- # We need to keep using the neutron-legacy based services for
- # now until all issues with the new lib/neutron code are solved
q-agt: true
q-dhcp: true
q-l3: true
q-meta: true
q-metering: true
q-svc: true
- # neutron-api: true
- # neutron-agent: true
- # neutron-dhcp: true
- # neutron-l3: true
- # neutron-metadata-agent: true
- # neutron-metering: true
# Swift services
s-account: true
s-container: true
@@ -450,16 +528,14 @@
# This list replaces the test-matrix.
# Shared services
dstat: true
- peakmem_tracker: true
+ memory_tracker: true
tls-proxy: true
# Nova services
n-cpu: true
+ # Placement services
placement-client: true
# Neutron services
- # We need to keep using the neutron-legacy based services for
- # now until all issues with the new lib/neutron code are solved
q-agt: true
- # neutron-agent: true
# Cinder services
c-bak: true
c-vol: true
@@ -469,7 +545,7 @@
# s-*: false
horizon: false
tempest: false
- # Test matrix emits ceilometer but ceilomenter is not installed in the
+ # Test matrix emits ceilometer but ceilometer is not installed in the
# integrated gate, so specifying the services has not effect.
# ceilometer-*: false
devstack_localrc:
@@ -487,52 +563,47 @@
devstack_localrc:
SERVICE_IP_VERSION: 6
SERVICE_HOST: ""
- # IPv6 and certificates known issue with python2
- # https://bugs.launchpad.net/devstack/+bug/1794929
- USE_PYTHON3: true
-
-- job:
- name: devstack-xenial
- parent: devstack
- nodeset: openstack-single-node-xenial
- description: |
- Simple singlenode test to verify functionality on devstack
- side running on Xenial.
- job:
name: devstack-multinode
parent: devstack
- nodeset: openstack-two-node-bionic
+ nodeset: openstack-two-node-focal
description: |
Simple multinode test to verify multinode functionality on devstack side.
This is not meant to be used as a parent job.
-- job:
- name: devstack-multinode-xenial
- parent: devstack
- nodeset: openstack-two-node-xenial
- description: |
- Simple multinode test to verify multinode functionality on devstack
- side running on Xenial.
- This is not meant to be used as a parent job.
-
# NOTE(ianw) Platform tests have traditionally been non-voting because
# we often have to rush things through devstack to stabilise the gate,
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
- name: devstack-platform-centos-7
- parent: tempest-full
- description: Centos 7 platform test
- nodeset: devstack-single-node-centos-7
+ name: devstack-platform-centos-8
+ parent: tempest-full-py3
+ description: Centos 8 platform test
+ nodeset: devstack-single-node-centos-8
voting: false
+ timeout: 9000
- job:
- name: devstack-platform-opensuse-15
+ name: devstack-platform-bionic
parent: tempest-full-py3
- description: openSUSE 15.x platform test
- nodeset: devstack-single-node-opensuse-15
+ description: Ubuntu Bionic platform test
+ nodeset: openstack-single-node-bionic
voting: false
+ vars:
+ devstack_localrc:
+ CINDER_ISCSI_HELPER: tgtadm
+
+- job:
+ name: devstack-async
+ parent: tempest-full-py3
+ description: Async mode enabled
+ voting: false
+ vars:
+ devstack_localrc:
+ DEVSTACK_PARALLEL: True
+ zuul_copy_output:
+ /opt/stack/async: logs
- job:
name: devstack-platform-fedora-latest
@@ -542,11 +613,14 @@
voting: false
- job:
- name: devstack-platform-xenial
+ name: devstack-platform-fedora-latest-virt-preview
parent: tempest-full-py3
- description: Ubuntu Xenial platform test
- nodeset: openstack-single-node-xenial
+ description: Fedora latest platform test using the virt-preview repo.
+ nodeset: devstack-single-node-fedora-latest
voting: false
+ vars:
+ devstack_localrc:
+ ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
- job:
name: devstack-tox-base
@@ -613,30 +687,30 @@
- project:
templates:
- - integrated-gate
- integrated-gate-py3
- publish-openstack-docs-pti
check:
jobs:
- devstack
- - devstack-xenial
- devstack-ipv6
- - devstack-platform-centos-7
- - devstack-platform-opensuse-15
- devstack-platform-fedora-latest
- - devstack-platform-xenial
+ - devstack-platform-centos-8
+ - devstack-platform-bionic
+ - devstack-async
- devstack-multinode
- - devstack-multinode-xenial
- devstack-unit-tests
- openstack-tox-bashate
- ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
voting: false
- swift-dsvm-functional:
voting: false
- irrelevant-files:
+ irrelevant-files: &dsvm-irrelevant-files
- ^.*\.rst$
- ^doc/.*$
- - neutron-grenade:
+ - swift-dsvm-functional-py3:
+ voting: false
+ irrelevant-files: *dsvm-irrelevant-files
+ - grenade:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -648,7 +722,12 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - tempest-multinode-full:
+ - neutron-ovn-tempest-ovs-release:
+ voting: false
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - tempest-multinode-full-py3:
voting: false
irrelevant-files:
- ^.*\.rst$
@@ -661,13 +740,15 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ - nova-ceph-multistore:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
gate:
jobs:
- devstack
- - devstack-xenial
- devstack-ipv6
- devstack-multinode
- - devstack-multinode-xenial
- devstack-unit-tests
- openstack-tox-bashate
- neutron-grenade-multinode:
@@ -678,7 +759,7 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - neutron-grenade:
+ - grenade:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -690,6 +771,10 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ - nova-ceph-multistore:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
# Please add a note on each job and conditions for the job not
# being experimental any more, so we can keep this list somewhat
# pruned.
@@ -707,6 +792,10 @@
# Next cycle we can remove this if everything run out stable enough.
# * nova-multi-cell: maintained by nova and currently non-voting in the
# check queue for nova changes but relies on devstack configuration
+ # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood
+ # for Nova to allow early testing of the latest versions of Libvirt and
+ # QEMU. Should only graduate out of experimental if it ever moves into
+ # the check queue for Nova.
experimental:
jobs:
@@ -715,10 +804,6 @@
- neutron-fullstack-with-uwsgi
- neutron-functional-with-uwsgi
- neutron-tempest-with-uwsgi
- - devstack-plugin-ceph-tempest:
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- devstack-plugin-ceph-tempest-py3:
irrelevant-files:
- ^.*\.rst$
@@ -727,15 +812,11 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - legacy-tempest-dsvm-neutron-dvr-multinode-full:
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- neutron-tempest-dvr-ha-multinode-full:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - legacy-tempest-dsvm-lvm-multibackend:
+ - cinder-tempest-lvm-multibackend:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -743,3 +824,4 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ - devstack-platform-fedora-latest-virt-preview
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..bb51165
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,19 @@
+The source repository for this project can be found at:
+
+ https://opendev.org/openstack/devstack
+
+Pull requests submitted through GitHub are not monitored.
+
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
+
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
+
+Bugs should be filed on Launchpad:
+
+ https://bugs.launchpad.net/devstack
+
+For more specific information about contributing to this repository, see the
+Devstack contributor guide:
+
+ https://docs.openstack.org/devstack/latest/contributor/contributing.html
diff --git a/HACKING.rst b/HACKING.rst
index f0bb269..6a91e0a 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -10,7 +10,7 @@
Shell script was chosen because it best illustrates the steps used to
set up and interact with OpenStack components.
-DevStack's official repository is located on git.openstack.org at
+DevStack's official repository is located on opendev.org at
https://opendev.org/openstack/devstack. Besides the master branch that
tracks the OpenStack trunk branches a separate branch is maintained for all
OpenStack releases starting with Diablo (stable/diablo).
@@ -23,7 +23,7 @@
.. _contribute: https://docs.openstack.org/infra/manual/developers.html
__ lp_
-.. _lp: https://launchpad.net/~devstack
+.. _lp: https://launchpad.net/devstack
The `Gerrit review
queue <https://review.opendev.org/#/q/project:openstack/devstack>`__
@@ -74,8 +74,7 @@
``tools`` - Contains a collection of stand-alone scripts. While these
may reference the top-level DevStack configuration they can generally be
-run alone. There are also some sub-directories to support specific
-environments such as XenServer.
+run alone.
Scripts
@@ -163,7 +162,7 @@
The DevStack repo now contains all of the static pages of devstack.org in
the ``doc/source`` directory. The OpenStack CI system rebuilds the docs after every
-commit and updates devstack.org (now a redirect to docs.openstack.org/developer/devstack).
+commit and updates devstack.org (now a redirect to https://docs.openstack.org/devstack/latest/).
All of the scripts are processed with shocco_ to render them with the comments
as text describing the script below. For this reason we tend to be a little
@@ -275,9 +274,6 @@
even years from now -- why we were motivated to make a change at the
time.
-* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people
- that should be added to reviews of various sub-systems.
-
Making Changes, Testing, and CI
-------------------------------
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
deleted file mode 100644
index d4968a6..0000000
--- a/MAINTAINERS.rst
+++ /dev/null
@@ -1,92 +0,0 @@
-MAINTAINERS
-===========
-
-
-Overview
---------
-
-The following is a list of people known to have interests in
-particular areas or sub-systems of devstack.
-
-It is a rather general guide intended to help seed the initial
-reviewers list of a change. A +1 on a review from someone identified
-as being a maintainer of its affected area is a very positive flag to
-the core team for the veracity of the change.
-
-The ``devstack-core`` group can still be added to all reviews.
-
-
-Format
-~~~~~~
-
-The format of the file is the name of the maintainer and their
-gerrit-registered email.
-
-
-Maintainers
------------
-
-.. contents:: :local:
-
-
-Ceph
-~~~~
-
-* Sebastien Han <sebastien.han@enovance.com>
-
-Cinder
-~~~~~~
-
-Fedora/CentOS/RHEL
-~~~~~~~~~~~~~~~~~~
-
-* Ian Wienand <iwienand@redhat.com>
-
-Neutron
-~~~~~~~
-
-MidoNet
-~~~~~~~
-
-* Jaume Devesa <devvesa@gmail.com>
-* Ryu Ishimoto <ryu@midokura.com>
-* YAMAMOTO Takashi <yamamoto@midokura.com>
-
-OpenDaylight
-~~~~~~~~~~~~
-
-* Kyle Mestery <mestery@mestery.com>
-
-OpenFlow Agent (ofagent)
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
-* Fumihiko Kakuma <kakuma@valinux.co.jp>
-
-Swift
-~~~~~
-
-* Chmouel Boudjnah <chmouel@enovance.com>
-
-SUSE
-~~~~
-
-* Ralf Haferkamp <rhafer@suse.de>
-* Vincent Untz <vuntz@suse.com>
-
-Tempest
-~~~~~~~
-
-Xen
-~~~
-* Bob Ball <bob.ball@citrix.com>
-
-Zaqar (Marconi)
-~~~~~~~~~~~~~~~
-
-* Flavio Percoco <flaper87@gmail.com>
-* Malini Kamalambal <malini.kamalambal@rackspace.com>
-
-Oracle Linux
-~~~~~~~~~~~~
-* Wiekus Beukes <wiekus.beukes@oracle.com>
diff --git a/clean.sh b/clean.sh
index d6c6b40..870dfd4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -113,7 +113,7 @@
cleanup_database
# Clean out data and status
-sudo rm -rf $DATA_DIR $DEST/status
+sudo rm -rf $DATA_DIR $DEST/status $DEST/async
# Clean out the log file and log directories
if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then
@@ -123,12 +123,10 @@
sudo rm -rf $LOGDIR
fi
-# Clean out the systemd user unit files if systemd was used.
-if [[ "$USE_SYSTEMD" = "True" ]]; then
- sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
- # Make systemd aware of the deletion.
- $SYSTEMCTL daemon-reload
-fi
+# Clean out the systemd unit files.
+sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
+# Make systemd aware of the deletion.
+$SYSTEMCTL daemon-reload
# Clean up venvs
DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack"
@@ -147,12 +145,5 @@
rm -rf ~/.config/openstack
-# Clean up all *.pyc files
-if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
- find_version=`find --version | awk '{ print $NF; exit}'`
- if vercmp "$find_version" "<" "4.2.3" ; then
- sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm
- else
- sudo find $DEST -name "*.pyc" -delete
- fi
-fi
+# Clear any fstab entries made
+sudo sed -i '/.*comment=devstack-.*/ d' /etc/fstab
diff --git a/doc/requirements.txt b/doc/requirements.txt
index fffb83d..ffce3ff 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -2,8 +2,8 @@
Pygments
docutils
-sphinx>=1.6.2
-openstackdocstheme>=1.20.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
nwdiag
blockdiag
sphinxcontrib-blockdiag
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 56043ba..2e17da1 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -30,9 +30,15 @@
'sphinxcontrib.nwdiag' ]
# openstackdocstheme options
-repository_name = 'openstack-dev/devstack'
-bug_project = 'devstack'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack-dev/devstack'
+openstackdocs_pdf_link = True
+openstackdocs_bug_project = 'devstack'
+openstackdocs_bug_tag = ''
+openstackdocs_auto_name = False
+# This repo is not tagged, so don't set versions
+openstackdocs_auto_version = False
+version = ''
+release = ''
todo_include_todos = True
@@ -81,7 +87,7 @@
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['DevStack-doc.']
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 62571e0..2d0c894 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -326,23 +326,29 @@
Database Backend
----------------
-Support for the MySQL database backend is included. Addition database backends
-may be available via external plugins. Enabling of disabling MySQL is handled
-via the usual service functions and ``ENABLED_SERVICES``. For example, to
-disable MySQL in ``local.conf``::
+Multiple database backends are available. The available databases are defined
+in the lib/databases directory.
+``mysql`` is the default database, choose a different one by putting the
+following in the ``localrc`` section::
disable_service mysql
+ enable_service postgresql
+
+``mysql`` is the default database.
RPC Backend
-----------
-Support for a RabbitMQ RPC backend is included. Additional RPC backends may be
-available via external plugins. Enabling or disabling RabbitMQ is handled via
-the usual service functions and ``ENABLED_SERVICES``. For example, to disable
-RabbitMQ in ``local.conf``::
+Support for a RabbitMQ RPC backend is included. Additional RPC
+backends may be available via external plugins. Enabling or disabling
+RabbitMQ is handled via the usual service functions and
+``ENABLED_SERVICES``.
+
+Example disabling RabbitMQ in ``local.conf``::
disable_service rabbit
+
Apache Frontend
---------------
@@ -424,17 +430,6 @@
ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
-Use python3
-------------
-
-By default ``stack.sh`` uses python2 (the exact version set by the
-``PYTHON2_VERSION``). This can be overriden so devstack will run
-python3 (the exact version set by ``PYTHON3_VERSION``).
-
-::
-
- USE_PYTHON3=True
-
A clean install every time
--------------------------
@@ -633,12 +628,6 @@
INSTALL_TEMPEST=True
-Xenserver
-~~~~~~~~~
-
-If you would like to use Xenserver as the hypervisor, please refer to
-the instructions in ``./tools/xen/README.md``.
-
Cinder
~~~~~~
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..5e0df56
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,56 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Devstack.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at FreeNode
+* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses)
+ http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Devstack Core Team
+<https://review.opendev.org/#/admin/groups/50,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Devstack features are tracked on `Launchpad BP <https://blueprints.launchpad.net/devstack>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/devstack>`_.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/devstack/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Devstack require two ``Code-Review +2`` votes from
+Devstack core reviewers before one of the core reviewers can approve the patch
+by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
+which can be approved by single core reviewers.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index 82e0dd6..e7ec629 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -10,6 +10,7 @@
.. toctree::
:glob:
+ :hidden:
:maxdepth: 1
guides/single-vm
@@ -68,6 +69,11 @@
Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
+Configure Load-Balancer Version 2
+-----------------------------------
+
+Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+
Deploying DevStack with LDAP
----------------------------
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 669a70d..5d96ca7 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -41,9 +41,6 @@
# If you are enabling barbican for TLS offload in Octavia, include it here.
# enable_plugin barbican https://opendev.org/openstack/barbican
- # If you have python3 available:
- # USE_PYTHON3=True
-
# ===== BEGIN localrc =====
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
@@ -62,7 +59,7 @@
ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
ENABLED_SERVICES+=,placement-api,placement-client
# Glance
- ENABLED_SERVICES+=,g-api,g-reg
+ ENABLED_SERVICES+=,g-api
# Neutron
ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 15f02a0..c0b3f58 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -169,17 +169,12 @@
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
GLANCE_HOSTPORT=$SERVICE_HOST:9292
- ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client
+ ENABLED_SERVICES=n-cpu,q-agt,c-vol,placement-client
NOVA_VNC_ENABLED=True
NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
-**Note:** the ``n-api-meta`` service is a version of the api server
-that only serves the metadata service. It's needed because the
-computes created won't have a routing path to the metadata service on
-the controller.
-
Fire up OpenStack:
::
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 6694022..8b8acde 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,8 +38,7 @@
Start with a clean and minimal install of a Linux system. DevStack
attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 7, as well as Debian and
-OpenSUSE.
+latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the
most tested, and will probably go the smoothest.
@@ -151,6 +150,13 @@
with devstack, and help us by :doc:`contributing to the project
<hacking>`.
+If you are a new contributor to devstack please refer: :doc:`contributor/contributing`
+
+.. toctree::
+ :hidden:
+
+ contributor/contributing
+
Contents
++++++++
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index 74010cd..e65c7ef 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -40,7 +40,7 @@
Locally Accessible Guests
=========================
-If you want to make you guests accessible from other machines on your
+If you want to make your guests accessible from other machines on your
network, we have to connect ``br-ex`` to a physical interface.
Dedicated Guest Interface
@@ -81,7 +81,7 @@
[[local|localrc]]
PUBLIC_INTERFACE=eth0
HOST_IP=10.42.0.52
- FLOATING_RANGE=10.42.0.52/24
+ FLOATING_RANGE=10.42.0.0/24
PUBLIC_NETWORK_GATEWAY=10.42.0.1
Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 5cbe4ed..4e7c2d7 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -31,16 +31,16 @@
openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
openstack/cinderlib `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
openstack/cloudkitty `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
-openstack/congress `https://opendev.org/openstack/congress <https://opendev.org/openstack/congress>`__
openstack/cyborg `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
openstack/designate `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 <https://opendev.org/openstack/devstack-plugin-amqp1>`__
openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph <https://opendev.org/openstack/devstack-plugin-ceph>`__
openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container <https://opendev.org/openstack/devstack-plugin-container>`__
openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
+openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
+openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika <https://opendev.org/openstack/devstack-plugin-pika>`__
openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq <https://opendev.org/openstack/devstack-plugin-zmq>`__
-openstack/dragonflow `https://opendev.org/openstack/dragonflow <https://opendev.org/openstack/dragonflow>`__
openstack/ec2-api `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
openstack/freezer `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
openstack/freezer-api `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
@@ -50,6 +50,7 @@
openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard <https://opendev.org/openstack/heat-dashboard>`__
openstack/ironic `https://opendev.org/openstack/ironic <https://opendev.org/openstack/ironic>`__
openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector <https://opendev.org/openstack/ironic-inspector>`__
+openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter <https://opendev.org/openstack/ironic-prometheus-exporter>`__
openstack/ironic-ui `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
openstack/karbor `https://opendev.org/openstack/karbor <https://opendev.org/openstack/karbor>`__
openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard <https://opendev.org/openstack/karbor-dashboard>`__
@@ -75,35 +76,31 @@
openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
-openstack/networking-calico `https://opendev.org/openstack/networking-calico <https://opendev.org/openstack/networking-calico>`__
openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw <https://opendev.org/openstack/networking-l2gw>`__
openstack/networking-midonet `https://opendev.org/openstack/networking-midonet <https://opendev.org/openstack/networking-midonet>`__
openstack/networking-odl `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
-openstack/networking-onos `https://opendev.org/openstack/networking-onos <https://opendev.org/openstack/networking-onos>`__
-openstack/networking-ovn `https://opendev.org/openstack/networking-ovn <https://opendev.org/openstack/networking-ovn>`__
openstack/networking-powervm `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
openstack/networking-sfc `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
openstack/neutron `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
-openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
-openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
openstack/nova-powervm `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
openstack/octavia `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
+openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
openstack/openstacksdk `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze <https://opendev.org/openstack/os-loganalyze>`__
openstack/osprofiler `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
openstack/panko `https://opendev.org/openstack/panko <https://opendev.org/openstack/panko>`__
openstack/patrole `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
openstack/qinling `https://opendev.org/openstack/qinling <https://opendev.org/openstack/qinling>`__
openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard <https://opendev.org/openstack/qinling-dashboard>`__
-openstack/rally `https://opendev.org/openstack/rally <https://opendev.org/openstack/rally>`__
openstack/rally-openstack `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
openstack/sahara `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
@@ -115,7 +112,6 @@
openstack/storlets `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
openstack/tacker `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
openstack/telemetry-tempest-plugin `https://opendev.org/openstack/telemetry-tempest-plugin <https://opendev.org/openstack/telemetry-tempest-plugin>`__
-openstack/tricircle `https://opendev.org/openstack/tricircle <https://opendev.org/openstack/tricircle>`__
openstack/trove `https://opendev.org/openstack/trove <https://opendev.org/openstack/trove>`__
openstack/trove-dashboard `https://opendev.org/openstack/trove-dashboard <https://opendev.org/openstack/trove-dashboard>`__
openstack/vitrage `https://opendev.org/openstack/vitrage <https://opendev.org/openstack/vitrage>`__
@@ -123,6 +119,7 @@
openstack/vitrage-tempest-plugin `https://opendev.org/openstack/vitrage-tempest-plugin <https://opendev.org/openstack/vitrage-tempest-plugin>`__
openstack/watcher `https://opendev.org/openstack/watcher <https://opendev.org/openstack/watcher>`__
openstack/watcher-dashboard `https://opendev.org/openstack/watcher-dashboard <https://opendev.org/openstack/watcher-dashboard>`__
+openstack/whitebox-tempest-plugin `https://opendev.org/openstack/whitebox-tempest-plugin <https://opendev.org/openstack/whitebox-tempest-plugin>`__
openstack/zaqar `https://opendev.org/openstack/zaqar <https://opendev.org/openstack/zaqar>`__
openstack/zaqar-ui `https://opendev.org/openstack/zaqar-ui <https://opendev.org/openstack/zaqar-ui>`__
openstack/zun `https://opendev.org/openstack/zun <https://opendev.org/openstack/zun>`__
@@ -135,19 +132,17 @@
starlingx/metal `https://opendev.org/starlingx/metal <https://opendev.org/starlingx/metal>`__
starlingx/nfv `https://opendev.org/starlingx/nfv <https://opendev.org/starlingx/nfv>`__
starlingx/update `https://opendev.org/starlingx/update <https://opendev.org/starlingx/update>`__
+vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator <https://opendev.org/vexxhost/openstack-operator>`__
x/almanach `https://opendev.org/x/almanach <https://opendev.org/x/almanach>`__
x/apmec `https://opendev.org/x/apmec <https://opendev.org/x/apmec>`__
x/bilean `https://opendev.org/x/bilean <https://opendev.org/x/bilean>`__
x/broadview-collector `https://opendev.org/x/broadview-collector <https://opendev.org/x/broadview-collector>`__
x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins <https://opendev.org/x/collectd-openstack-plugins>`__
x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos <https://opendev.org/x/devstack-plugin-additional-pkg-repos>`__
-x/devstack-plugin-bdd `https://opendev.org/x/devstack-plugin-bdd <https://opendev.org/x/devstack-plugin-bdd>`__
x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs <https://opendev.org/x/devstack-plugin-glusterfs>`__
x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs <https://opendev.org/x/devstack-plugin-hdfs>`__
x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu <https://opendev.org/x/devstack-plugin-libvirt-qemu>`__
x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb <https://opendev.org/x/devstack-plugin-mariadb>`__
-x/devstack-plugin-nfs `https://opendev.org/x/devstack-plugin-nfs <https://opendev.org/x/devstack-plugin-nfs>`__
-x/devstack-plugin-sheepdog `https://opendev.org/x/devstack-plugin-sheepdog <https://opendev.org/x/devstack-plugin-sheepdog>`__
x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax <https://opendev.org/x/devstack-plugin-vmax>`__
x/drbd-devstack `https://opendev.org/x/drbd-devstack <https://opendev.org/x/drbd-devstack>`__
x/fenix `https://opendev.org/x/fenix <https://opendev.org/x/fenix>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index a18a786..7d70d74 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -241,7 +241,7 @@
on Ubuntu, Debian or Linux Mint.
- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
- on Red Hat, Fedora, CentOS or XenServer.
+ on Red Hat, Fedora, or CentOS.
- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
running on SUSE Linux or openSUSE.
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 15b3f75..7853520 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -196,30 +196,6 @@
.. _`remote-pdb`: https://pypi.org/project/remote-pdb/
-Known Issues
-============
-
-Be careful about systemd python libraries. There are 3 of them on
-pypi, and they are all very different. They unfortunately all install
-into the ``systemd`` namespace, which can cause some issues.
-
-- ``systemd-python`` - this is the upstream maintained library, it has
- a version number like systemd itself (currently ``234``). This is
- the one you want.
-- ``systemd`` - a python 3 only library, not what you want.
-- ``python-systemd`` - another library you don't want. Installing it
- on a system will break ansible's ability to run.
-
-
-If we were using user units, the ``[Service]`` - ``Group=`` parameter
-doesn't seem to work with user units, even though the documentation
-says that it should. This means that we will need to do an explicit
-``/usr/bin/sg``. This has the downside of making the SYSLOG_IDENTIFIER
-be ``sg``. We can explicitly set that with ``SyslogIdentifier=``, but
-it's really unfortunate that we're going to need this work
-around. This is currently not a problem because we're only using
-system units.
-
Future Work
===========
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
index 66f8251..c43603e 100644
--- a/doc/source/zuul_ci_jobs_migration.rst
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -195,12 +195,6 @@
- A bridge called br-infra is set up for all jobs that inherit
from multinode with a dedicated `bridge role
<https://zuul-ci.org/docs/zuul-jobs/general-roles.html#role-multi-node-bridge>`_.
- * - DEVSTACK_GATE_FEATURE_MATRIX
- - devstack-gate
- - ``test_matrix_features`` variable of the test-matrix role in
- devstack-gate. This is a temporary solution, feature matrix
- will go away. In the future services will be defined in jobs
- only.
* - DEVSTACK_CINDER_VOLUME_CLEAR
- devstack
- *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the
@@ -302,7 +296,10 @@
- This will probably be implemented on ironic side.
* - DEVSTACK_GATE_POSTGRES
- Legacy
- - This has no effect in d-g.
+ - This flag exists in d-g but the only thing that it does is
+ capture postgres logs. This is already supported by the roles
+ in post, so the flag is useless in the new jobs. postgres
+ itself can be enabled via the devstack_service job variable.
* - DEVSTACK_GATE_ZEROMQ
- Legacy
- This has no effect in d-g.
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 15ecfe3..06c73ec 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -6,7 +6,7 @@
source $TOP_DIR/lib/tempest
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Tempest"
- install_tempest
+ async_runfunc install_tempest
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# Tempest config must come after layer 2 services are running
:
@@ -17,6 +17,7 @@
# local.conf Tempest option overrides
:
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
+ async_wait install_tempest
echo_summary "Initializing Tempest"
configure_tempest
echo_summary "Installing Tempest Plugins"
diff --git a/files/debs/cinder b/files/debs/cinder
index c1b79fd..5d390e2 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -1,6 +1,4 @@
lvm2
-open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:precise
qemu-utils
tgt # NOPRIME
thin-provisioning-tools
diff --git a/files/debs/dstat b/files/debs/dstat
index 0d9da44..2b643b8 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1,2 +1 @@
dstat
-python-psutil
diff --git a/files/debs/general b/files/debs/general
index df872a0..7e481b4 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,11 +1,11 @@
apache2
apache2-dev
bc
-bridge-utils
bsdmainutils
curl
default-jre-headless # NOPRIME
g++
+gawk
gcc
gettext # used for compiling message catalogs
git
@@ -27,9 +27,10 @@
openssl
pkg-config
psmisc
-python2.7
-python-dev
-python-gdbm # needed for testr
+python3-dev
+python3-pip
+python3-systemd
+python3-venv
tar
tcpdump
unzip
diff --git a/files/debs/keystone b/files/debs/keystone
index fd0317b..1cfa6ff 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -2,5 +2,5 @@
libldap2-dev
libsasl2-dev
memcached
-python-mysqldb
+python3-mysqldb
sqlite3
diff --git a/files/debs/ldap b/files/debs/ldap
index aa3a934..54896bb 100644
--- a/files/debs/ldap
+++ b/files/debs/ldap
@@ -1,3 +1,3 @@
ldap-utils
-python-ldap
+python3-ldap
slapd
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 636644f..54d6fa3 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -5,7 +5,7 @@
lvm2 # NOPRIME
netcat-openbsd
open-iscsi
-python-guestfs # NOPRIME
+python3-guestfs # NOPRIME
qemu-utils
sg3-utils
sysfsutils
diff --git a/files/debs/neutron-common b/files/debs/neutron-common
index e30f678..e548396 100644
--- a/files/debs/neutron-common
+++ b/files/debs/neutron-common
@@ -1,6 +1,6 @@
acl
dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:precise
+dnsmasq-utils # for dhcp_release
ebtables
haproxy # to serve as metadata proxy inside router/dhcp namespaces
iptables
@@ -9,7 +9,7 @@
libmysqlclient-dev
mysql-server #NOPRIME
postgresql-server-dev-all
-python-mysqldb
+python3-mysqldb
rabbitmq-server # NOPRIME
radvd # NOPRIME
sqlite3
diff --git a/files/debs/nova b/files/debs/nova
index 5e14aec..e194414 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -3,22 +3,19 @@
dnsmasq-base
dnsmasq-utils # for dhcp_release
ebtables
-gawk
genisoimage # required for config_drive
iptables
iputils-arping
kpartx
libjs-jquery-tablesorter # Needed for coverage html reports
libmysqlclient-dev
-libvirt-bin # dist:xenial NOPRIME
-libvirt-clients # not:xenial NOPRIME
-libvirt-daemon-system # not:xenial NOPRIME
+libvirt-clients # NOPRIME
+libvirt-daemon-system # NOPRIME
libvirt-dev # NOPRIME
mysql-server # NOPRIME
parted
pm-utils
-python-mysqldb
-qemu # dist:wheezy,jessie NOPRIME
+python3-mysqldb
qemu-kvm # NOPRIME
rabbitmq-server # NOPRIME
socat # used by ajaxterm
diff --git a/files/debs/os-brick b/files/debs/os-brick
new file mode 100644
index 0000000..4148b0c
--- /dev/null
+++ b/files/debs/os-brick
@@ -0,0 +1,3 @@
+lsscsi
+open-iscsi
+open-iscsi-utils # Deprecated since quantal dist:precise
diff --git a/files/debs/ovn b/files/debs/ovn
new file mode 100644
index 0000000..81eea5e
--- /dev/null
+++ b/files/debs/ovn
@@ -0,0 +1,3 @@
+ovn-central
+ovn-controller-vtep
+ovn-host
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index 189a232..b39cc79 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,4 +1,3 @@
lvm2
-open-iscsi
qemu-tools
tgt # NOPRIME
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
index 0d9da44..2b643b8 100644
--- a/files/rpms-suse/dstat
+++ b/files/rpms-suse/dstat
@@ -1,2 +1 @@
dstat
-python-psutil
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index b870d72..f636110 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,9 +1,9 @@
apache2
apache2-devel
bc
-bridge-utils
ca-certificates-mozilla
curl
+gawk
gcc
gcc-c++
git-core
@@ -21,10 +21,10 @@
pcre-devel # python-pcre
postgresql-devel # psycopg2
psmisc
+python3-systemd
python-cmd2 # dist:opensuse-12.3
python-devel # pyOpenSSL
python-xml
-systemd-devel # for systemd-python
tar
tcpdump
unzip
diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common
index d1cc73f..e3799a9 100644
--- a/files/rpms-suse/neutron-common
+++ b/files/rpms-suse/neutron-common
@@ -5,7 +5,6 @@
haproxy # to serve as metadata proxy inside router/dhcp namespaces
iptables
iputils
-mariadb # NOPRIME
rabbitmq-server # NOPRIME
radvd # NOPRIME
sqlite3
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 1d58121..1cc2f62 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -4,14 +4,12 @@
dnsmasq
dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
ebtables
-gawk
iptables
iputils
kpartx
kvm # NOPRIME
libvirt # NOPRIME
libvirt-python # NOPRIME
-mariadb # NOPRIME
# mkisofs is required for config_drive
mkisofs # not:sle12
parted
diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick
new file mode 100644
index 0000000..67b33a9
--- /dev/null
+++ b/files/rpms-suse/os-brick
@@ -0,0 +1,2 @@
+lsscsi
+open-iscsi
diff --git a/files/rpms/cinder b/files/rpms/cinder
index e6b33dc..375f93e 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,3 @@
-iscsi-initiator-utils
lvm2
qemu-img
-scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29 NOPRIME
-targetcli # dist:rhel7,f25,f26,f27,f28,f29 NOPRIME
+targetcli
diff --git a/files/rpms/dstat b/files/rpms/dstat
index b058c27..6524bed 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1,3 +1 @@
-dstat # not:f29
-pcp-system-tools # dist:f29
-python-psutil
+pcp-system-tools
diff --git a/files/rpms/general b/files/rpms/general
index 5bf1e9a..33da0a5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,7 +1,7 @@
bc
-bridge-utils
curl
dbus
+gawk
gcc
gcc-c++
gettext # used for compiling message catalogs
@@ -9,15 +9,13 @@
graphviz # needed only for docs
httpd
httpd-devel
-iptables-services # NOPRIME f25,f26,f27,f28,f29
-java-1.7.0-openjdk-headless # NOPRIME rhel7
-java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29
+iptables-services
+java-1.8.0-openjdk-headless
libffi-devel
libjpeg-turbo-devel # Pillow 3.0.0
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
-mariadb-devel # MySQL-python
net-tools
openssh-server
openssl
@@ -26,10 +24,10 @@
pkgconfig
postgresql-devel # psycopg2
psmisc
-pyOpenSSL # version in pip uses too much memory
-python-devel
+python3-devel
+python3-pip
+python3-systemd
redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # for systemd-python
tar
tcpdump
unzip
diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common
index 0cc8d11..fe25f57 100644
--- a/files/rpms/neutron-common
+++ b/files/rpms/neutron-common
@@ -5,8 +5,6 @@
haproxy # to serve as metadata proxy inside router/dhcp namespaces
iptables
iputils
-mysql-devel
-mysql-server # NOPRIME
openvswitch # NOPRIME
rabbitmq-server # NOPRIME
radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 639d793..8ea8ccc 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,16 +3,11 @@
dnsmasq # for q-dhcp
dnsmasq-utils # for dhcp_release
ebtables
-gawk
genisoimage # required for config_drive
iptables
iputils
-kernel-modules # dist:f25,f26,f27,f28,f29
+kernel-modules
kpartx
-libxml2-python
-m2crypto
-mysql-devel
-mysql-server # NOPRIME
parted
polkit
rabbitmq-server # NOPRIME
diff --git a/files/rpms/os-brick b/files/rpms/os-brick
new file mode 100644
index 0000000..14ff870
--- /dev/null
+++ b/files/rpms/os-brick
@@ -0,0 +1,2 @@
+iscsi-initiator-utils
+lsscsi
diff --git a/files/rpms/ovn b/files/rpms/ovn
new file mode 100644
index 0000000..698e57b
--- /dev/null
+++ b/files/rpms/ovn
@@ -0,0 +1,3 @@
+ovn-central
+ovn-host
+ovn-vtep
diff --git a/files/rpms/swift b/files/rpms/swift
index be524d1..376c6f3 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,8 +1,7 @@
curl
liberasurecode-devel
memcached
-pyxattr
-rsync-daemon # dist:f25,f26,f27,f28,f29
+rsync-daemon
sqlite
xfsprogs
xinetd
diff --git a/functions b/functions
index 8eeb032..ccca5cd 100644
--- a/functions
+++ b/functions
@@ -21,6 +21,7 @@
source ${FUNC_DIR}/inc/meta-config
source ${FUNC_DIR}/inc/python
source ${FUNC_DIR}/inc/rootwrap
+source ${FUNC_DIR}/inc/async
# Save trace setting
_XTRACE_FUNCTIONS=$(set +o | grep xtrace)
@@ -77,6 +78,48 @@
fi
}
+# Generate image property arguments for OSC
+#
+# Arguments: properties, one per, like propname=value
+#
+# Result is --property propname1=value1 --property propname2=value2
+function _image_properties_to_arg {
+ local result=""
+ for property in $*; do
+ result+=" --property $property"
+ done
+ echo $result
+}
+
+# Upload an image to glance using the configured mechanism
+#
+# Arguments:
+# image name
+# container format
+# disk format
+# path to image file
+# optional properties (format of propname=value)
+#
+function _upload_image {
+ local image_name="$1"
+ shift
+ local container="$1"
+ shift
+ local disk="$1"
+ shift
+ local image="$1"
+ shift
+ local properties
+ local useimport
+
+ properties=$(_image_properties_to_arg $*)
+
+ if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+ useimport="--import"
+ fi
+
+ openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
+}
# Retrieve an image from a URL and upload into Glance.
# Uses the following variables:
@@ -118,7 +161,7 @@
# OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
if [[ "$image_url" =~ 'openvz' ]]; then
image_name="${image_fname%.tar.gz}"
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
+ _upload_image "$image_name" ami ami "$image"
return
fi
@@ -232,42 +275,8 @@
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
- return
- fi
+ _upload_image "$image_name" bare vmdk "$image" vmware_disktype="$vmdk_disktype" vmware_adaptertype="$vmdk_adapter_type" hw_vif_model="$vmdk_net_adapter"
- # XenServer-vhd-ovf-format images are provided as .vhd.tgz
- # and should not be decompressed prior to loading
- if [[ "$image_url" =~ '.vhd.tgz' ]]; then
- image_name="${image_fname%.vhd.tgz}"
- local force_vm_mode=""
- if [[ "$image_name" =~ 'cirros' ]]; then
- # Cirros VHD image currently only boots in PV mode.
- # Nova defaults to PV for all VHD images, but
- # the glance setting is needed for booting
- # directly from volume.
- force_vm_mode="--property vm_mode=xen"
- fi
- openstack \
- --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
- image create \
- "$image_name" --public \
- --container-format=ovf --disk-format=vhd \
- $force_vm_mode < "${image}"
- return
- fi
-
- # .xen-raw.tgz suggests a Xen capable raw image inside a tgz.
- # and should not be decompressed prior to loading.
- # Setting metadata, so PV mode is used.
- if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
- image_name="${image_fname%.xen-raw.tgz}"
- openstack \
- --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
- image create \
- "$image_name" --public \
- --container-format=tgz --disk-format=raw \
- --property vm_mode=xen < "${image}"
return
fi
@@ -278,12 +287,7 @@
die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image"
fi
- openstack \
- --os-cloud=devstack-admin --os-region-name="$REGION_NAME" \
- image create \
- "$image_name" --public \
- --container-format=bare --disk-format=ploop \
- --property vm_mode=$vm_mode < "${image}"
+ _upload_image "$image_name" bare ploop "$image" vm_mode=$vm_mode
return
fi
@@ -293,6 +297,15 @@
local container_format=""
local unpack=""
local img_property=""
+
+ # NOTE(danms): If we're on libvirt/qemu or libvirt/kvm, set the hw_rng_model
+ # to libvirt in the image properties.
+ if [[ "$VIRT_DRIVER" == "libvirt" ]]; then
+ if [[ "$LIBVIRT_TYPE" == "qemu" || "$LIBVIRT_TYPE" == "kvm" ]]; then
+ img_property="hw_rng_model=virtio"
+ fi
+ fi
+
case "$image_fname" in
*.tar.gz|*.tgz)
# Extract ami and aki files
@@ -341,6 +354,12 @@
disk_format=qcow2
container_format=bare
;;
+ *.qcow2.xz)
+ image_name=$(basename "$image" ".qcow2.xz")
+ disk_format=qcow2
+ container_format=bare
+ unpack=unxz
+ ;;
*.raw)
image_name=$(basename "$image" ".raw")
disk_format=raw
@@ -364,20 +383,30 @@
esac
if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then
- img_property="--property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
+ img_property="$img_property hw_cdrom_bus=scsi os_command_line=console=hvc0"
fi
if is_arch "aarch64"; then
- img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'"
+ img_property="$img_property hw_machine_type=virt hw_cdrom_bus=scsi hw_scsi_model=virtio-scsi os_command_line='console=ttyAMA0'"
fi
if [ "$container_format" = "bare" ]; then
if [ "$unpack" = "zcat" ]; then
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+ _upload_image "$image_name" $container_format $disk_format <(zcat --force "$image") $img_property
elif [ "$unpack" = "bunzip2" ]; then
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}")
+ _upload_image "$image_name" $container_format $disk_format <(bunzip2 -cdk "$image") $img_property
+ elif [ "$unpack" = "unxz" ]; then
+ # NOTE(brtknr): unxz the file first and cleanup afterwards to
+ # prevent timeout while Glance tries to upload image (e.g. to Swift).
+ local tmp_dir
+ local image_path
+ tmp_dir=$(mktemp -d)
+ image_path="$tmp_dir/$image_name"
+ unxz -cv "${image}" > "$image_path"
+ _upload_image "$image_name" $container_format $disk_format "$image_path" $img_property
+ rm -rf $tmp_dir
else
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
+ _upload_image "$image_name" $container_format $disk_format "$image" $img_property
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -385,12 +414,12 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
fi
- openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
+ _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
fi
}
@@ -400,8 +429,7 @@
# initialized yet, just save the configuration selection and call back later
# to validate it.
#
-# ``$1`` - the name of the database backend to use (only mysql is currently
-# supported)
+# ``$1`` - the name of the database backend to use (mysql, postgresql, ...)
function use_database {
if [[ -z "$DATABASE_BACKENDS" ]]; then
# No backends registered means this is likely called from ``localrc``
@@ -636,40 +664,29 @@
# This sets up defaults we like in devstack for logging for tracking
# down issues, and makes sure everything is done the same between
# projects.
+# NOTE(jh): Historically this function switched between three different
+# functions: setup_systemd_logging, setup_colorized_logging and
+# setup_standard_logging_identity. Since we always run with systemd now,
+# this could be cleaned up, but the other functions may still be in use
+# by plugins. Since deprecations haven't worked in the past, we'll just
+# leave them in place.
function setup_logging {
- local conf_file=$1
- local other_cond=${2:-"False"}
- if [[ "$USE_SYSTEMD" == "True" ]]; then
- setup_systemd_logging $conf_file
- elif [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then
- setup_colorized_logging $conf_file
- else
- setup_standard_logging_identity $conf_file
- fi
+ setup_systemd_logging $1
}
# This function sets log formatting options for colorizing log
# output to stdout. It is meant to be called by lib modules.
-# The last two parameters are optional and can be used to specify
-# non-default value for project and user format variables.
-# Defaults are respectively 'project_name' and 'user_name'
-#
-# setup_colorized_logging something.conf SOMESECTION
function setup_colorized_logging {
local conf_file=$1
- local conf_section="DEFAULT"
- local project_var="project_name"
- local user_var="user_name"
# Add color to logging output
- iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%("$project_var")s %("$user_var")s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
- iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
+ iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
+ iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
}
function setup_systemd_logging {
local conf_file=$1
- local conf_section="DEFAULT"
# NOTE(sdague): this is a nice to have, and means we're using the
# native systemd path, which provides for things like search on
# request-id. However, there may be an eventlet interaction here,
@@ -677,16 +694,16 @@
USE_JOURNAL=$(trueorfalse False USE_JOURNAL)
local pidstr=""
if [[ "$USE_JOURNAL" == "True" ]]; then
- iniset $conf_file $conf_section use_journal "True"
+ iniset $conf_file DEFAULT use_journal "True"
# if we are using the journal directly, our process id is already correct
else
pidstr="(pid=%(process)d) "
fi
- iniset $conf_file $conf_section logging_debug_format_suffix "[00;33m{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}[00m"
+ iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33m{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}[00m"
- iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
+ iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
}
function setup_standard_logging_identity {
@@ -710,23 +727,22 @@
fi
-# create_disk - Create backing disk
+# create_disk - Create, configure, and mount a backing disk
function create_disk {
local node_number
local disk_image=${1}
local storage_data_dir=${2}
local loopback_disk_size=${3}
+ local key
- # Create a loopback disk and format it to XFS.
- if [[ -e ${disk_image} ]]; then
- if egrep -q ${storage_data_dir} /proc/mounts; then
- sudo umount ${storage_data_dir}
- sudo rm -f ${disk_image}
- fi
- fi
+ key=$(echo $disk_image | sed 's#/.##')
+ key="devstack-$key"
- sudo mkdir -p ${storage_data_dir}/drives/images
+ destroy_disk $disk_image $storage_data_dir
+ # Create an empty file of the correct size (and ensure the
+ # directory structure up to that path exists)
+ sudo mkdir -p $(dirname ${disk_image})
sudo truncate -s ${loopback_disk_size} ${disk_image}
# Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
@@ -736,11 +752,31 @@
# Swift and Ceph.
sudo mkfs.xfs -f -i size=1024 ${disk_image}
- # Mount the disk with mount options to make it as efficient as possible
- if ! egrep -q ${storage_data_dir} /proc/mounts; then
- sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
- ${disk_image} ${storage_data_dir}
+ # Install a new loopback fstab entry for this disk image, and mount it
+ echo "$disk_image $storage_data_dir xfs loop,noatime,nodiratime,logbufs=8,comment=$key 0 0" | sudo tee -a /etc/fstab
+ sudo mkdir -p $storage_data_dir
+ sudo mount -v $storage_data_dir
+}
+
+# Unmount, de-configure, and destroy a backing disk
+function destroy_disk {
+ local disk_image=$1
+ local storage_data_dir=$2
+ local key
+
+ key=$(echo $disk_image | sed 's#/.##')
+ key="devstack-$key"
+
+ # Unmount the target, if mounted
+ if egrep -q $storage_data_dir /proc/mounts; then
+ sudo umount $storage_data_dir
fi
+
+ # Clear any fstab rules
+ sudo sed -i '/.*comment=$key.*/ d' /etc/fstab
+
+ # Delete the file
+ sudo rm -f $disk_image
}
diff --git a/functions-common b/functions-common
index a13d611..340da75 100644
--- a/functions-common
+++ b/functions-common
@@ -27,7 +27,6 @@
# - ``RECLONE``
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
-# - ``TRACK_DEPENDS``
# - ``http_proxy``, ``https_proxy``, ``no_proxy``
#
@@ -44,12 +43,11 @@
declare -A -g GITBRANCH
declare -A -g GITDIR
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
KILL_PATH="$(which kill)"
# Save these variables to .stackenv
STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
- KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \
+ KEYSTONE_SERVICE_URI \
LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
HOST_IPV6 SERVICE_IP_VERSION"
@@ -131,6 +129,11 @@
--os-password $ADMIN_PASSWORD \
--os-system-scope all
+ cat >> $CLOUDS_YAML <<EOF
+functional:
+ image_name: $DEFAULT_IMAGE_NAME
+EOF
+
# CLean up any old clouds.yaml files we had laying around
rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml
}
@@ -331,9 +334,6 @@
sudo zypper -n install lsb-release
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
sudo dnf install -y redhat-lsb-core
- elif [[ -x $(command -v yum 2>/dev/null) ]]; then
- # all rh patforms (fedora, centos, rhel) have this pkg
- sudo yum install -y redhat-lsb-core
else
die $LINENO "Unable to find or auto-install lsb_release"
fi
@@ -397,8 +397,6 @@
# Drop the . release as we assume it's compatible
# XXX re-evaluate when we get RHEL10
DISTRO="rhel${os_RELEASE::1}"
- elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
- DISTRO="xs${os_RELEASE%.*}"
else
# We can't make a good choice here. Setting a sensible DISTRO
# is part of the problem, but not the major issue -- we really
@@ -451,8 +449,9 @@
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
- [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \
- [ "$os_VENDOR" = "Virtuozzo" ]
+ [ "$os_VENDOR" = "RedHatEnterprise" ] || \
+ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
+ [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
}
@@ -1219,10 +1218,16 @@
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
fi
+ if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+ file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+ fi
elif [[ $service == c-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then
file_to_parse="${file_to_parse} ${package_dir}/cinder"
fi
+ if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+ file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+ fi
elif [[ $service == s-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/swift ]]; then
file_to_parse="${file_to_parse} ${package_dir}/swift"
@@ -1231,6 +1236,9 @@
if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
file_to_parse="${file_to_parse} ${package_dir}/nova"
fi
+ if [[ ! $file_to_parse =~ $package_dir/os-brick ]]; then
+ file_to_parse="${file_to_parse} ${package_dir}/os-brick"
+ fi
elif [[ $service == g-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
@@ -1363,7 +1371,7 @@
if is_ubuntu; then
apt_get purge "$@"
elif is_fedora; then
- sudo ${YUM:-yum} remove -y "$@" ||:
+ sudo dnf remove -y "$@" ||:
elif is_suse; then
sudo zypper remove -y "$@" ||:
else
@@ -1371,8 +1379,11 @@
fi
}
-# Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
+# Wrapper for ``dnf`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# The name is kept for backwards compatability with external
+# callers, despite none of our supported platforms using yum
+# any more.
# yum_install package [package ...]
function yum_install {
local result parse_yum_result
@@ -1380,44 +1391,8 @@
[[ "$OFFLINE" = "True" ]] && return
time_start "yum_install"
-
- # This is a bit tricky, because yum -y assumes missing or failed
- # packages are OK (see [1]). We want devstack to stop if we are
- # installing missing packages.
- #
- # Thus we manually match on the output (stack.sh runs in a fixed
- # locale, so lang shouldn't change).
- #
- # If yum returns !0, we echo the result as "YUM_FAILED" and return
- # that from the awk (we're subverting -e with this trick).
- # Otherwise we use awk to look for failure strings and return "2"
- # to indicate a terminal failure.
- #
- # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
- parse_yum_result=' \
- BEGIN { result=0 } \
- /^YUM_FAILED/ { result=$2 } \
- /^No package/ { result=2 } \
- /^Failed:/ { result=2 } \
- //{ print } \
- END { exit result }'
- (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
- | awk "$parse_yum_result" && result=$? || result=$?
-
+ sudo_with_proxies dnf install -y "$@"
time_stop "yum_install"
-
- # if we return 1, then the wrapper functions will run an update
- # and try installing the package again as a defense against bad
- # mirrors. This can hide failures, especially when we have
- # packages that are in the "Failed:" section because their rpm
- # install scripts failed to run correctly (in this case, the
- # package looks installed, so when the retry happens we just think
- # the package is OK, and incorrectly continue on).
- if [ "$result" == 2 ]; then
- die "Detected fatal package install failure"
- fi
-
- return "$result"
}
# zypper wrapper to set arguments correctly
@@ -1445,7 +1420,8 @@
local pkgs
if [[ ! -f $file ]]; then
- die $LINENO "Can not find bindep file: $file"
+ warn $LINENO "Can not find bindep file: $file"
+ return
fi
# converting here makes it much easier to work with passing
@@ -1631,10 +1607,6 @@
}
-function tail_log {
- deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens"
-}
-
# Plugin Functions
# =================
@@ -2075,11 +2047,7 @@
return 0
fi
- if [[ $TRACK_DEPENDS = True ]]; then
- sudo_cmd="env"
- else
- sudo_cmd="sudo"
- fi
+ sudo_cmd="sudo"
$xtrace
$sudo_cmd $@
@@ -2441,6 +2409,13 @@
$xtrace
}
+function clean_pyc_files {
+ # Clean up all *.pyc files
+ if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
+ sudo find $DEST -name "*.pyc" -delete
+ fi
+}
+
# Restore xtrace
$_XTRACE_FUNCTIONS_COMMON
diff --git a/inc/async b/inc/async
new file mode 100644
index 0000000..56338f5
--- /dev/null
+++ b/inc/async
@@ -0,0 +1,256 @@
+#!/bin/bash
+#
+# Symbolic asynchronous tasks for devstack
+#
+# Usage:
+#
+# async_runfunc my_shell_func foo bar baz
+#
+# ... do other stuff ...
+#
+# async_wait my_shell_func
+#
+
+DEVSTACK_PARALLEL=$(trueorfalse True DEVSTACK_PARALLEL)
+_ASYNC_BG_TIME=0
+
+# Keep track of how much total time was spent in background tasks
+# Takes a job runtime in ms.
+function _async_incr_bg_time {
+ local elapsed_ms="$1"
+ _ASYNC_BG_TIME=$(($_ASYNC_BG_TIME + $elapsed_ms))
+}
+
+# Get the PID of a named future to wait on
+function async_pidof {
+ local name="$1"
+ local inifile="${DEST}/async/${name}.ini"
+
+ if [ -f "$inifile" ]; then
+ iniget $inifile job pid
+ else
+ echo 'UNKNOWN'
+ return 1
+ fi
+}
+
+# Log a message about a job. If the message contains "%command" then the
+# full command line of the job will be substituted in the output
+function async_log {
+ local name="$1"
+ shift
+ local message="$*"
+ local inifile=${DEST}/async/${name}.ini
+ local pid
+ local command
+
+ pid=$(iniget $inifile job pid)
+ command=$(iniget $inifile job command | tr '#' '-')
+ message=$(echo "$message" | sed "s#%command#$command#g")
+
+ echo "[$BASHPID Async ${name}:${pid}]: $message"
+}
+
+# Inner function that actually runs the requested task. We wrap it like this
+# just so we can emit a finish message as soon as the work is done, to make
+# it easier to find the tracking just before an error.
+function async_inner {
+ local name="$1"
+ local rc
+ local fifo="${DEST}/async/${name}.fifo"
+ shift
+ set -o xtrace
+ if $* >${DEST}/async/${name}.log 2>&1; then
+ rc=0
+ set +o xtrace
+ async_log "$name" "finished successfully"
+ else
+ rc=$?
+ set +o xtrace
+ async_log "$name" "FAILED with rc $rc"
+ fi
+ iniset ${DEST}/async/${name}.ini job end_time $(date "+%s%3N")
+ # Block on the fifo until we are signaled to exit by the main process
+ cat $fifo
+ return $rc
+}
+
+# Run something async. Takes a symbolic name and a list of arguments of
+# what to run. Ideally this would be rarely used and async_runfunc() would
+# be used everywhere for readability.
+#
+# This spawns the work in a background worker, records a "future" to be
+# collected by a later call to async_wait()
+function async_run {
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
+ local name="$1"
+ shift
+ local inifile=${DEST}/async/${name}.ini
+ local fifo=${DEST}/async/${name}.fifo
+
+ touch $inifile
+ iniset $inifile job command "$*"
+ iniset $inifile job start_time $(date +%s%3N)
+
+ if [[ "$DEVSTACK_PARALLEL" = "True" ]]; then
+ mkfifo $fifo
+ async_inner $name $* &
+ iniset $inifile job pid $!
+ async_log "$name" "running: %command"
+ $xtrace
+ else
+ iniset $inifile job pid "self"
+ async_log "$name" "Running synchronously: %command"
+ $xtrace
+ $*
+ return $?
+ fi
+}
+
+# Shortcut for running a shell function async. Uses the function name as the
+# async name.
+function async_runfunc {
+ async_run $1 $*
+}
+
+# Dump some information to help debug a failed wait
+function async_wait_dump {
+ local failpid=$1
+
+ echo "=== Wait failure dump from $BASHPID ==="
+ echo "Processes:"
+ ps -f
+ echo "Waiting jobs:"
+ for name in $(ls ${DEST}/async/*.ini); do
+ echo "Job $name :"
+ cat "$name"
+ done
+ echo "Failed PID status:"
+ sudo cat /proc/$failpid/status
+ sudo cat /proc/$failpid/cmdline
+ echo "=== End wait failure dump ==="
+}
+
+# Wait for an async future to complete. May return immediately if already
+# complete, or of the future has already been waited on (avoid this). May
+# block until the future completes.
+function async_wait {
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
+ local pid rc running inifile runtime fifo
+ rc=0
+ for name in $*; do
+ running=$(ls ${DEST}/async/*.ini 2>/dev/null | wc -l)
+ inifile="${DEST}/async/${name}.ini"
+ fifo="${DEST}/async/${name}.fifo"
+
+ if pid=$(async_pidof "$name"); then
+ async_log "$name" "Waiting for completion of %command" \
+ "running on PID $pid ($running other jobs running)"
+ time_start async_wait
+ if [[ "$pid" != "self" ]]; then
+ # Signal the child to go ahead and exit since we are about to
+ # wait for it to collect its status.
+ async_log "$name" "Signaling child to exit"
+ echo WAKEUP > $fifo
+ async_log "$name" "Signaled"
+ # Do not actually call wait if we ran synchronously
+ if wait $pid; then
+ rc=0
+ else
+ rc=$?
+ fi
+ cat ${DEST}/async/${name}.log
+ rm -f $fifo
+ fi
+ time_stop async_wait
+ local start_time
+ local end_time
+ start_time=$(iniget $inifile job start_time)
+ end_time=$(iniget $inifile job end_time)
+ _async_incr_bg_time $(($end_time - $start_time))
+ runtime=$((($end_time - $start_time) / 1000))
+ async_log "$name" "finished %command with result" \
+ "$rc in $runtime seconds"
+ rm -f $inifile
+ if [ $rc -ne 0 ]; then
+ async_wait_dump $pid
+ echo Stopping async wait due to error: $*
+ break
+ fi
+ else
+ # This could probably be removed - it is really just here
+ # to help notice if you wait for something by the wrong
+ # name, but it also shows up for things we didn't start
+ # because they were not enabled.
+ echo Not waiting for async task $name that we never started or \
+ has already been waited for
+ fi
+ done
+
+ $xtrace
+ return $rc
+}
+
+# Check for uncollected futures and wait on them
+function async_cleanup {
+ local name
+
+ if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+ return 0
+ fi
+
+ for inifile in $(find ${DEST}/async -name '*.ini'); do
+ name=$(basename $pidfile .ini)
+ echo "WARNING: uncollected async future $name"
+ async_wait $name || true
+ done
+}
+
+# Make sure our async dir is created and clean
+function async_init {
+ local async_dir=${DEST}/async
+
+ # Clean any residue if present from previous runs
+ rm -Rf $async_dir
+
+ # Make sure we have a state directory
+ mkdir -p $async_dir
+}
+
+function async_print_timing {
+ local bg_time_minus_wait
+ local elapsed_time
+ local serial_time
+ local speedup
+
+ if [[ "$DEVSTACK_PARALLEL" != "True" ]]; then
+ return 0
+ fi
+
+ # The logic here is: All the background task time would be
+ # serialized if we did not do them in the background. So we can
+ # add that to the elapsed time for the whole run. However, time we
+ # spend waiting for async things to finish adds to the elapsed
+ # time, but is time where we're not doing anything useful. Thus,
+ # we substract that from the would-be-serialized time.
+
+ bg_time_minus_wait=$((\
+ ($_ASYNC_BG_TIME - ${_TIME_TOTAL[async_wait]}) / 1000))
+ elapsed_time=$(($(date "+%s") - $_TIME_BEGIN))
+ serial_time=$(($elapsed_time + $bg_time_minus_wait))
+
+ echo
+ echo "================="
+ echo " Async summary"
+ echo "================="
+ echo " Time spent in the background minus waits: $bg_time_minus_wait sec"
+ echo " Elapsed time: $elapsed_time sec"
+ echo " Time if we did everything serially: $serial_time sec"
+ echo " Speedup: " $(echo | awk "{print $serial_time / $elapsed_time}")
+}
diff --git a/inc/python b/inc/python
index ea8ff67..8941fd0 100644
--- a/inc/python
+++ b/inc/python
@@ -21,6 +21,14 @@
# project. A null value installs to the system Python directories.
declare -A -g PROJECT_VENV
+# Utility Functions
+# =================
+
+# Joins bash array of extras with commas as expected by other functions
+function join_extras {
+ local IFS=","
+ echo "$*"
+}
# Python Functions
# ================
@@ -54,7 +62,7 @@
$xtrace
local PYTHON_PATH=/usr/local/bin
- ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin
+ is_suse && PYTHON_PATH=/usr/bin
echo $PYTHON_PATH
}
@@ -80,65 +88,13 @@
function pip_install_gr_extras {
local name=$1
local extras=$2
- local clean_name
- clean_name=$(get_from_global_requirements $name)
- pip_install $clean_name[$extras]
-}
-
-# python3_enabled_for() assumes the service(s) specified as arguments are
-# enabled for python 3 unless explicitly disabled. See python3_disabled_for().
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# python3_enabled_for dir [dir ...]
-function python3_enabled_for {
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
-
- local enabled=1
- local dirs=$@
- local dir
- for dir in ${dirs}; do
- if ! python3_disabled_for "${dir}"; then
- enabled=0
- fi
- done
-
- $xtrace
- return $enabled
-}
-
-# python3_disabled_for() checks if the service(s) specified as arguments are
-# disabled by the user in ``DISABLED_PYTHON3_PACKAGES``.
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# Uses global ``DISABLED_PYTHON3_PACKAGES``
-# python3_disabled_for dir [dir ...]
-function python3_disabled_for {
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
-
- local enabled=1
- local dirs=$@
- local dir
- for dir in ${dirs}; do
- [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
- done
-
- $xtrace
- return $enabled
+ local version_constraints
+ version_constraints=$(get_version_constraints_from_global_requirements $name)
+ pip_install $name[$extras]$version_constraints
}
# enable_python3_package() -- no-op for backwards compatibility
#
-# For example:
-# enable_python3_package nova
-#
# enable_python3_package dir [dir ...]
function enable_python3_package {
local xtrace
@@ -150,32 +106,22 @@
$xtrace
}
-# disable_python3_package() adds the services passed as argument to
-# the ``DISABLED_PYTHON3_PACKAGES`` list.
+# disable_python3_package() -- no-op for backwards compatibility
#
-# For example:
-# disable_python3_package swift
-#
-# Uses global ``DISABLED_PYTHON3_PACKAGES``
# disable_python3_package dir [dir ...]
function disable_python3_package {
local xtrace
xtrace=$(set +o | grep xtrace)
set +o xtrace
- local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}"
- local dir
- for dir in $@; do
- disabled_svcs+=",$dir"
- done
- DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs")
+ echo "It is no longer possible to call disable_python3_package()."
$xtrace
}
# Wrapper for ``pip install`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``PIP_UPGRADE``, ``*_proxy``,
# Usage:
# pip_install pip_arguments
function pip_install {
@@ -219,64 +165,27 @@
# this works (for now...)
local package_dir=${!#%\[*\]}
- if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
- # TRACK_DEPENDS=True installation creates a circular dependency when
- # we attempt to install virtualenv into a virtualenv, so we must global
- # that installation.
- source $DEST/.venv/bin/activate
- local cmd_pip=$DEST/.venv/bin/pip
+ if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
+ local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
- local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
- local sudo_pip="env"
- else
- local cmd_pip
- cmd_pip=$(get_pip_command $PYTHON2_VERSION)
- local sudo_pip="sudo -H"
- if python3_enabled; then
- # Special case some services that have experimental
- # support for python3 in progress, but don't claim support
- # in their classifier
- echo "Check python version for : $package_dir"
- if python3_disabled_for ${package_dir##*/}; then
- echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
- else
- # For everything that is not explicitly blacklisted with
- # DISABLED_PYTHON3_PACKAGES, assume it supports python3
- # and we will let pip sort out the install, regardless of
- # the package being local or remote.
- echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior"
- sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
- cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- fi
- fi
- fi
+ local cmd_pip="python$PYTHON3_VERSION -m pip"
+ # See
+ # https://github.com/pypa/setuptools/issues/2232
+ # http://lists.openstack.org/pipermail/openstack-discuss/2020-August/016905.html
+ # this makes setuptools >=50 use the platform distutils.
+ # We only want to do this on global pip installs, not if
+ # installing in a virtualenv
+ local sudo_pip="sudo -H LC_ALL=en_US.UTF-8 SETUPTOOLS_USE_DISTUTILS=stdlib "
+ echo "Using python $PYTHON3_VERSION to install $package_dir"
fi
cmd_pip="$cmd_pip install"
# Always apply constraints
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
- # FIXME(dhellmann): Need to force multiple versions of pip for
- # packages like setuptools?
- local pip_version
- pip_version=$(python -c "import pip; \
- print(pip.__version__.split('.')[0])")
- if (( pip_version<6 )); then
- die $LINENO "Currently installed pip version ${pip_version} does not" \
- "meet minimum requirements (>=6)."
- fi
-
$xtrace
- # Also install test requirements
- local install_test_reqs=""
- local test_req="${package_dir}/test-requirements.txt"
- if [[ -e "$test_req" ]]; then
- install_test_reqs="-r $test_req"
- fi
-
# adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
# the same behaviour of setuptools before version 25.0.0.
# related issue: https://github.com/pypa/pip/issues/3874
@@ -286,7 +195,7 @@
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
- $cmd_pip $upgrade $install_test_reqs \
+ $cmd_pip $upgrade \
$@
result=$?
@@ -303,9 +212,8 @@
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- local cmd_pip
- cmd_pip=$(get_pip_command $PYTHON2_VERSION)
- local sudo_pip="sudo -H"
+ local cmd_pip="python$PYTHON3_VERSION -m pip"
+ local sudo_pip="sudo -H LC_ALL=en_US.UTF-8"
fi
# don't error if we can't uninstall, it might not be there
$sudo_pip $cmd_pip uninstall -y $name || /bin/true
@@ -323,6 +231,19 @@
echo $required_pkg
}
+# get only version constraints of a package from global requirements file
+# get_version_constraints_from_global_requirements <package>
+function get_version_constraints_from_global_requirements {
+ local package=$1
+ local required_pkg_version_constraint
+ # drop the package name from output (\K)
+ required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+ if [[ $required_pkg_version_constraint == "" ]]; then
+ die $LINENO "Can't find package $package in requirements"
+ fi
+ echo $required_pkg_version_constraint
+}
+
# should we use this library from their git repo, or should we let it
# get pulled in via pip dependencies.
function use_library_from_git {
@@ -371,7 +292,7 @@
#
# use this for non namespaced libraries
#
-# setup_dev_lib [-bindep] <name>
+# setup_dev_lib [-bindep] <name> [<extras>]
function setup_dev_lib {
local bindep
if [[ $1 == -bindep* ]]; then
@@ -380,17 +301,8 @@
fi
local name=$1
local dir=${GITDIR[$name]}
- if python3_enabled; then
- # Turn off Python 3 mode and install the package again,
- # forcing a Python 2 installation. This ensures that all libs
- # being used for development are installed under both versions
- # of Python.
- echo "Installing $name again without Python 3 enabled"
- USE_PYTHON3=False
- setup_develop $bindep $dir
- USE_PYTHON3=True
- fi
- setup_develop $bindep $dir
+ local extras=$2
+ setup_develop $bindep $dir $extras
}
# this should be used if you want to install globally, all libraries should
@@ -538,12 +450,15 @@
}
# Report whether python 3 should be used
+# TODO(frickler): drop this once all legacy uses are removed
function python3_enabled {
- if [[ $USE_PYTHON3 == "True" ]]; then
- return 0
- else
- return 1
- fi
+ return 0
+}
+
+# Provide requested python version and sets PYTHON variable
+function install_python {
+ install_python3
+ export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null)
}
# Install python3 packages
@@ -552,6 +467,12 @@
apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev
elif is_suse; then
install_package python3-devel python3-dbm
+ elif is_fedora; then
+ if [ "$os_VENDOR" = "Fedora" ]; then
+ install_package python${PYTHON3_VERSION//.}
+ else
+ install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel
+ fi
fi
}
diff --git a/lib/apache b/lib/apache
index 84cec73..870a65a 100644
--- a/lib/apache
+++ b/lib/apache
@@ -82,26 +82,51 @@
apxs="apxs"
fi
- # Ubuntu xenial is back level on uwsgi so the proxy doesn't
- # actually work. Hence we have to build from source for now.
+ # This varies based on packaged/installed. If we've
+ # pip_installed, then the pip setup will only build a "python"
+ # module that will be either python2 or python3 depending on what
+ # it was built with.
#
- # Centos 7 actually has the module in epel, but there was a big
- # push to disable epel by default. As such, compile from source
- # there as well.
+ # For package installs, the distro ships both plugins and you need
+ # to select the right one ... it will not be autodetected.
+ UWSGI_PYTHON_PLUGIN=python3
- local dir
- dir=$(mktemp -d)
- pushd $dir
- pip_install uwsgi
- pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
- local uwsgi
- uwsgi=$(ls uwsgi*)
- tar xvf $uwsgi
- cd uwsgi*/apache2
- sudo $apxs -i -c mod_proxy_uwsgi.c
- popd
- # delete the temp directory
- sudo rm -rf $dir
+ if is_ubuntu; then
+ local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
+ if [[ "$DISTRO" == 'bionic' ]]; then
+ pkg_list="${pkg_list} uwsgi-plugin-python"
+ fi
+ install_package ${pkg_list}
+ elif is_fedora; then
+ # Note httpd comes with mod_proxy_uwsgi and it is loaded by
+ # default; the mod_proxy_uwsgi package actually conflicts now.
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1574335
+ #
+ # Thus there is nothing else to do after this install
+ install_package uwsgi \
+ uwsgi-plugin-python3
+ elif [[ $os_VENDOR =~ openSUSE ]]; then
+ install_package uwsgi \
+ uwsgi-python3 \
+ apache2-mod_uwsgi
+ else
+ # Compile uwsgi from source.
+ local dir
+ dir=$(mktemp -d)
+ pushd $dir
+ pip_install uwsgi
+ pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
+ local uwsgi
+ uwsgi=$(ls uwsgi*)
+ tar xvf $uwsgi
+ cd uwsgi*/apache2
+ sudo $apxs -i -c mod_proxy_uwsgi.c
+ popd
+ # delete the temp directory
+ sudo rm -rf $dir
+ UWSGI_PYTHON_PLUGIN=python
+ fi
if is_ubuntu || is_suse ; then
# we've got to enable proxy and proxy_uwsgi for this to work
@@ -121,17 +146,13 @@
if is_ubuntu; then
# Install apache2, which is NOPRIME'd
install_package apache2
- if python3_enabled; then
- if is_package_installed libapache2-mod-wsgi; then
- uninstall_package libapache2-mod-wsgi
- fi
- install_package libapache2-mod-wsgi-py3
- else
- install_package libapache2-mod-wsgi
+ if is_package_installed libapache2-mod-wsgi; then
+ uninstall_package libapache2-mod-wsgi
fi
+ install_package libapache2-mod-wsgi-py3
elif is_fedora; then
sudo rm -f /etc/httpd/conf.d/000-*
- install_package httpd mod_wsgi
+ install_package httpd python3-mod_wsgi
# For consistency with Ubuntu, switch to the worker mpm, as
# the default is event
sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
@@ -265,7 +286,7 @@
# configured after graceful shutdown
iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins python
+ iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -318,7 +339,7 @@
iniset "$file" uwsgi die-on-term true
iniset "$file" uwsgi exit-on-reload false
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins python
+ iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -351,6 +372,25 @@
restart_apache_server
}
+# Write a straight-through proxy for a service that runs locally and just needs
+# to be reachable via the main http proxy at $loc
+function write_local_proxy_http_config {
+ local name=$1
+ local url=$2
+ local loc=$3
+ local apache_conf
+ apache_conf=$(apache_site_config_for $name)
+
+ enable_apache_mod proxy
+ enable_apache_mod proxy_http
+
+ echo "KeepAlive Off" | sudo tee $apache_conf
+ echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
+ echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf
+ enable_apache_site $name
+ restart_apache_server
+}
+
function remove_uwsgi_config {
local file=$1
local wsgi=$2
diff --git a/lib/cinder b/lib/cinder
index 2e6e97a..cfa3693 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -31,6 +31,7 @@
CINDER_DRIVER=${CINDER_DRIVER:-default}
CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins
CINDER_BACKENDS=$TOP_DIR/lib/cinder_backends
+CINDER_BACKUPS=$TOP_DIR/lib/cinder_backups
# grab plugin config if specified via cinder_driver
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
@@ -87,17 +88,32 @@
CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
-# Centos7 and OpenSUSE switched to using LIO and that's all that's supported,
-# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI
+# Default to lioadm
+CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+
+# Bionic needs to default to tgtadm until support is dropped within devstack
+# as the rtslib-fb-targetctl service doesn't start after installing lioadm.
+if is_ubuntu && [[ "$DISTRO" == "bionic" ]]; then
+ CINDER_ISCSI_HELPER=tgtadm
+fi
+
+# EL and SUSE should only use lioadm
if is_fedora || is_suse; then
- CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
die "lioadm is the only valid Cinder target_helper config on this platform"
fi
-else
- CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
fi
+# For backward compatibility
+# Before CINDER_BACKUP_DRIVER was introduced, ceph backup driver was configured
+# along with ceph backend driver.
+if [[ -z "${CINDER_BACKUP_DRIVER}" && "$CINDER_ENABLED_BACKENDS" =~ "ceph" ]]; then
+ CINDER_BACKUP_DRIVER=ceph
+fi
+
+# Supported backup drivers are in lib/cinder_backups
+CINDER_BACKUP_DRIVER=${CINDER_BACKUP_DRIVER:-swift}
+
# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi
# reference should be cleaned up to more accurately refer to uwsgi.
CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True}
@@ -113,6 +129,15 @@
done
fi
+# Source the backup driver
+if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+ if [[ -r $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER ]]; then
+ source $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER
+ else
+ die "cinder backup driver $CINDER_BACKUP_DRIVER is not supported"
+ fi
+fi
+
# Environment variables to configure the image-volume cache
CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
@@ -189,6 +214,12 @@
done
fi
+ if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+ if type cleanup_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+ cleanup_cinder_backup_$CINDER_BACKUP_DRIVER
+ fi
+ fi
+
stop_process "c-api"
remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
}
@@ -236,6 +267,11 @@
iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
+ # Avoid RPC timeouts in slow CI and test environments by doubling the
+ # default response timeout set by RPC clients. See bug #1873234 for more
+ # details and example failures.
+ iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
+
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local enabled_backends=""
local default_name=""
@@ -258,13 +294,12 @@
configure_cinder_image_volume_cache
fi
- if is_service_enabled c-bak; then
- # NOTE(mriedem): The default backup driver uses swift and if we're
- # on a subnode we might not know if swift is enabled, but chances are
- # good that it is on the controller so configure the backup service
- # to use it. If we want to configure the backup service to use
- # a non-swift driver, we'll likely need environment variables.
- iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+ if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+ if type configure_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+ configure_cinder_backup_$CINDER_BACKUP_DRIVER
+ else
+ die "configure_cinder_backup_$CINDER_BACKUP_DRIVER doesn't exist in $CINDER_BACKUPS/$CINDER_BACKUP_DRIVER"
+ fi
fi
if is_service_enabled ceilometer; then
@@ -308,10 +343,6 @@
iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE
fi
- if [ "$GLANCE_V1_ENABLED" != "True" ]; then
- iniset $CINDER_CONF DEFAULT glance_api_version 2
- fi
-
# Set nova credentials (used for os-assisted-snapshots)
configure_keystone_authtoken_middleware $CINDER_CONF nova nova
iniset $CINDER_CONF nova region_name "$REGION_NAME"
@@ -406,6 +437,12 @@
done
fi
+ if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
+ if type init_cinder_backup_$CINDER_BACKUP_DRIVER >/dev/null 2>&1; then
+ init_cinder_backup_$CINDER_BACKUP_DRIVER
+ fi
+ fi
+
mkdir -p $CINDER_STATE_PATH/volumes
}
@@ -416,7 +453,7 @@
if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
install_package tgt
elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
- if [[ ${DISTRO} == "bionic" ]]; then
+ if is_ubuntu; then
# TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
sudo mkdir -p /etc/target
@@ -489,7 +526,7 @@
start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
fi
else
- run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
+ run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
fi
fi
@@ -535,6 +572,14 @@
OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name}
fi
done
+
+ # Increase quota for the service project if glance is using cinder,
+ # since it's likely to occasionally go above the default 10 in parallel
+ # test execution.
+ if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+ openstack --os-region-name="$REGION_NAME" \
+ quota set --volumes 50 "$SERVICE_PROJECT_NAME"
+ fi
fi
}
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 33c9706..0b46573 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -6,12 +6,6 @@
# Enable with:
#
# CINDER_ENABLED_BACKENDS+=,ceph:ceph
-#
-# Optional parameters:
-# CINDER_BAK_CEPH_POOL=<pool-name>
-# CINDER_BAK_CEPH_USER=<user>
-# CINDER_BAK_CEPH_POOL_PG=<pg-num>
-# CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
# Dependencies:
#
@@ -29,11 +23,6 @@
# Defaults
# --------
-CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
-CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
-CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
-CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
-
# Entry Points
# ------------
@@ -52,27 +41,6 @@
iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
iniset $CINDER_CONF DEFAULT glance_api_version 2
-
- if is_service_enabled c-bak; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
- if [ "$REMOTE_CEPH" = "False" ]; then
- # Configure Cinder backup service options, ceph pool, ceph user and ceph key
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
- if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
- fi
- fi
- sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
- sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-
- iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
- iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
- iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
- iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
- iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
- iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
- iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
- fi
}
# Restore xtrace
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
new file mode 100644
index 0000000..e4003c0
--- /dev/null
+++ b/lib/cinder_backups/ceph
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# lib/cinder_backups/ceph
+# Configure the ceph backup driver
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=ceph
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_CEPH=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+function configure_cinder_backup_ceph {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+ if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
+ iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+ iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+ iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+}
+
+# init_cinder_backup_ceph: nothing to do
+# cleanup_cinder_backup_ceph: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift
new file mode 100644
index 0000000..6fb2486
--- /dev/null
+++ b/lib/cinder_backups/s3_swift
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# lib/cinder_backups/s3_swift
+# Configure the s3 backup driver with swift s3api
+#
+# TODO: create lib/cinder_backup/s3 for external s3 compatible storage
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=s3_swift
+# enable_service s3api s-proxy s-object s-container s-account
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_cinder_backup_s3_swift {
+ # This configuration requires swift and s3api. If we're
+ # on a subnode we might not know if they are enabled
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT"
+}
+
+function init_cinder_backup_s3_swift {
+ openstack ec2 credential create
+ iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)"
+ iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)"
+ if is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE"
+ fi
+}
+
+# cleanup_cinder_backup_s3_swift: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_S3_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
new file mode 100644
index 0000000..d7c977e
--- /dev/null
+++ b/lib/cinder_backups/swift
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# lib/cinder_backups/swift
+# Configure the swift backup driver
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=swift
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+
+function configure_cinder_backup_swift {
+ # NOTE(mriedem): The default backup driver uses swift and if we're
+ # on a subnode we might not know if swift is enabled, but chances are
+ # good that it is on the controller so configure the backup service
+ # to use it.
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+}
+
+# init_cinder_backup_swift: nothing to do
+# cleanup_cinder_backup_swift: nothing to do
+
+
+# Restore xtrace
+$_XTRACE_CINDER_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS
deleted file mode 100644
index 92135e7..0000000
--- a/lib/cinder_plugins/XenAPINFS
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/XenAPINFS
-# Configure the XenAPINFS driver
-
-# Enable with:
-#
-# CINDER_DRIVER=XenAPINFS
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
- iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver"
- iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
- iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
- iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
- iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
- iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_XENAPINFS
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog
deleted file mode 100644
index 558de46..0000000
--- a/lib/cinder_plugins/sheepdog
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/sheepdog
-# Configure the sheepdog driver
-
-# Enable with:
-#
-# CINDER_DRIVER=sheepdog
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
- iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_SHEEPDOG
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 4d0f5f3..d4969d7 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -15,15 +15,17 @@
register_database mysql
-MYSQL_SERVICE_NAME=mysql
-if is_fedora && ! is_oraclelinux; then
- MYSQL_SERVICE_NAME=mariadb
-elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
- # Older mariadb packages on SLES 12 provided mysql.service. The
- # newer ones on SLES 12 and 15 use mariadb.service; they also
- # provide a mysql.service symlink for backwards-compatibility, but
- # let's not rely on that.
- MYSQL_SERVICE_NAME=mariadb
+if [[ -z "$MYSQL_SERVICE_NAME" ]]; then
+ MYSQL_SERVICE_NAME=mysql
+ if is_fedora && ! is_oraclelinux; then
+ MYSQL_SERVICE_NAME=mariadb
+ elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
+ # Older mariadb packages on SLES 12 provided mysql.service. The
+ # newer ones on SLES 12 and 15 use mariadb.service; they also
+ # provide a mysql.service symlink for backwards-compatibility, but
+ # let's not rely on that.
+ MYSQL_SERVICE_NAME=mariadb
+ fi
fi
# Functions
@@ -92,8 +94,25 @@
# because the package might have been installed already.
sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+ # In case of Mariadb, giving hostname in arguments causes permission
+ # problems as it expects connection through socket
+ if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ local cmd_args="-uroot -p$DATABASE_PASSWORD "
+ else
+ local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 "
+ fi
+
+ # In mariadb e.g. on Ubuntu socket plugin is used for authentication
+ # as root so it works only as sudo. To restore old "mysql like" behaviour,
+ # we need to change auth plugin for root user
+ if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+ sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+ sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+ fi
+ # Create DB user if it does not already exist
+ sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
# Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
- sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+ sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
# Now update ``my.cnf`` for some local needs and restart the mysql service
@@ -103,8 +122,6 @@
iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
iniset -sudo $my_conf mysqld default-storage-engine InnoDB
iniset -sudo $my_conf mysqld max_connections 1024
- iniset -sudo $my_conf mysqld query_cache_type OFF
- iniset -sudo $my_conf mysqld query_cache_size 0
if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
echo_summary "Enabling MySQL query logging"
@@ -148,18 +165,24 @@
[client]
user=$DATABASE_USER
password=$DATABASE_PASSWORD
-host=$MYSQL_HOST
EOF
+
+ if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then
+ echo "host=$MYSQL_HOST" >> $HOME/.my.cnf
+ fi
chmod 0600 $HOME/.my.cnf
fi
# Install mysql-server
if is_oraclelinux; then
install_package mysql-community-server
- elif is_fedora || is_suse; then
+ elif is_fedora; then
+ install_package mariadb-server mariadb-devel
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_suse; then
install_package mariadb-server
sudo systemctl enable $MYSQL_SERVICE_NAME
elif is_ubuntu; then
- install_package mysql-server
+ install_package $MYSQL_SERVICE_NAME-server
else
exit_distro_not_supported "mysql installation"
fi
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
new file mode 100644
index 0000000..618834b
--- /dev/null
+++ b/lib/databases/postgresql
@@ -0,0 +1,137 @@
+#!/bin/bash
+#
+# lib/databases/postgresql
+# Functions to control the configuration and operation of the **PostgreSQL** database backend
+
+# Dependencies:
+#
+# - DATABASE_{HOST,USER,PASSWORD} must be defined
+
+# Save trace setting
+_XTRACE_PG=$(set +o | grep xtrace)
+set +o xtrace
+
+
+MAX_DB_CONNECTIONS=${MAX_DB_CONNECTIONS:-200}
+
+
+register_database postgresql
+
+
+# Functions
+# ---------
+
+function get_database_type_postgresql {
+ echo postgresql
+}
+
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database_postgresql {
+ stop_service postgresql
+ if is_ubuntu; then
+ # Get ruthless with mysql
+ apt_get purge -y postgresql*
+ return
+ elif is_fedora || is_suse; then
+ uninstall_package postgresql-server
+ else
+ return
+ fi
+}
+
+function recreate_database_postgresql {
+ local db=$1
+ # Avoid unsightly error when calling dropdb when the database doesn't exist
+ psql -h$DATABASE_HOST -U$DATABASE_USER -dtemplate1 -c "DROP DATABASE IF EXISTS $db"
+ createdb -h $DATABASE_HOST -U$DATABASE_USER -l C -T template0 -E utf8 $db
+}
+
+function configure_database_postgresql {
+ local pg_conf pg_dir pg_hba check_role version
+ echo_summary "Configuring and starting PostgreSQL"
+ if is_fedora; then
+ pg_hba=/var/lib/pgsql/data/pg_hba.conf
+ pg_conf=/var/lib/pgsql/data/postgresql.conf
+ if ! sudo [ -e $pg_hba ]; then
+ sudo postgresql-setup initdb
+ fi
+ elif is_ubuntu; then
+ version=`psql --version | cut -d ' ' -f3 | cut -d. -f1-2`
+ if vercmp $version '>=' 9.3; then
+ if [ -z "`pg_lsclusters -h`" ]; then
+ echo 'No PostgreSQL clusters exist; will create one'
+ sudo pg_createcluster $version main --start
+ fi
+ fi
+ pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname`
+ pg_hba=$pg_dir/pg_hba.conf
+ pg_conf=$pg_dir/postgresql.conf
+ elif is_suse; then
+ pg_hba=/var/lib/pgsql/data/pg_hba.conf
+ pg_conf=/var/lib/pgsql/data/postgresql.conf
+ # initdb is called when postgresql is first started
+ sudo [ -e $pg_hba ] || start_service postgresql
+ else
+ exit_distro_not_supported "postgresql configuration"
+ fi
+ # Listen on all addresses
+ sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf
+ # Set max_connections
+ sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf
+ # Do password auth from all IPv4 clients
+ sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba
+ # Do password auth for all IPv6 clients
+ sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba
+ restart_service postgresql
+
+ # Create the role if it's not here or else alter it.
+ check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'")
+ if [[ ${check_role} == *HERE ]];then
+ sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'"
+ else
+ sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'"
+ fi
+}
+
+function install_database_postgresql {
+ echo_summary "Installing postgresql"
+ deprecated "Use of postgresql in devstack is deprecated, and will be removed during the Pike cycle"
+ local pgpass=$HOME/.pgpass
+ if [[ ! -e $pgpass ]]; then
+ cat <<EOF > $pgpass
+*:*:*:$DATABASE_USER:$DATABASE_PASSWORD
+EOF
+ chmod 0600 $pgpass
+ else
+ sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass
+ fi
+ if is_ubuntu; then
+ install_package postgresql
+ elif is_fedora || is_suse; then
+ install_package postgresql-server
+ if is_fedora; then
+ sudo systemctl enable postgresql
+ fi
+ else
+ exit_distro_not_supported "postgresql installation"
+ fi
+}
+
+function install_database_python_postgresql {
+ # Install Python client module
+ pip_install_gr psycopg2
+ ADDITIONAL_VENV_PACKAGES+=",psycopg2"
+}
+
+function database_connection_url_postgresql {
+ local db=$1
+ echo "$BASE_SQL_CONN/$db?client_encoding=utf8"
+}
+
+
+# Restore xtrace
+$_XTRACE_PG
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/dstat b/lib/dstat
index fe38d75..eb03ae0 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -9,6 +9,7 @@
# ``stack.sh`` calls the entry points in this order:
#
+# - install_dstat
# - start_dstat
# - stop_dstat
@@ -16,6 +17,14 @@
_XTRACE_DSTAT=$(set +o | grep xtrace)
set +o xtrace
+# install_dstat() - Install prerequisites for dstat services
+function install_dstat {
+ if is_service_enabled memory_tracker; then
+ # Install python libraries required by tools/mlock_report.py
+ pip_install_gr psutil
+ fi
+}
+
# start_dstat() - Start running processes
function start_dstat {
# A better kind of sysstat, with the top process per time slice
@@ -26,10 +35,10 @@
# to your localrc
run_process memory_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root"
- # remove support for the old name when it's no longer used (sometime in Queens)
+ # TODO(jh): Fail when using the old service name otherwise consumers might
+ # never notice that is has been removed.
if is_service_enabled peakmem_tracker; then
- deprecated "Use of peakmem_tracker in devstack is deprecated, use memory_tracker instead"
- run_process peakmem_tracker "$TOP_DIR/tools/memory_tracker.sh" "" "root"
+ die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead"
fi
}
diff --git a/lib/glance b/lib/glance
index 54d3276..e789aff 100644
--- a/lib/glance
+++ b/lib/glance
@@ -41,22 +41,58 @@
GLANCE_BIN_DIR=$(get_python_exec_prefix)
fi
+# Cinder for Glance
+USE_CINDER_FOR_GLANCE=$(trueorfalse False USE_CINDER_FOR_GLANCE)
+# GLANCE_CINDER_DEFAULT_BACKEND should be one of the values
+# from CINDER_ENABLED_BACKENDS
+GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1}
+GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance
+# NOTE (abhishekk): For opensuse data files are stored in different directory
+if is_opensuse; then
+ GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance
+fi
+# Glance multi-store configuration
+# Boolean flag to enable multiple store configuration for glance
+GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES)
+
+# Comma separated list for configuring multiple file stores of glance,
+# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow
+GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast}
+
+# Default store/backend for glance, must be one of the store specified
+# in GLANCE_MULTIPLE_FILE_STORES option.
+GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
+
GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+
+# Full Glance functionality requires running in standalone mode. If we are
+# not in uwsgi mode, then we are standalone, otherwise allow separate control.
+if [[ "$WSGI_MODE" != "uwsgi" ]]; then
+ GLANCE_STANDALONE=True
+fi
+GLANCE_STANDALONE=${GLANCE_STANDALONE:-False}
+
+# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store
+# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES
+# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast
+# and $DATA_DIR/glance/cheap.
+GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance}
GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
+GLANCE_NFS_MOUNTPOINT=$GLANCE_IMAGE_DIR/mnt
GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks}
+GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store}
+GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store}
+
+GLANCE_USE_IMPORT_WORKFLOW=$(trueorfalse False GLANCE_USE_IMPORT_WORKFLOW)
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
-GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
-GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
-GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf
-GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False}
if is_service_enabled tls-proxy; then
GLANCE_SERVICE_PROTOCOL="https"
@@ -69,8 +105,6 @@
GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
-GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api
GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini
# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet
@@ -96,29 +130,147 @@
# cleanup_glance() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_glance {
- # delete image files (glance)
- sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR
+ # delete image files (glance) and all of the glance-remote temporary
+ # storage
+ sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR "${DATA_DIR}/glance-remote"
+
+ # Cleanup multiple stores directories
+ if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
+ local store file_dir
+ for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+ file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+ sudo rm -rf $file_dir
+ done
+
+ # Cleanup reserved stores directories
+ sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
+ fi
+}
+
+# Set multiple cinder store related config options for each of the cinder store
+#
+function configure_multiple_cinder_stores {
+
+ local be be_name be_type enabled_backends
+ for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
+ be_type=${be%%:*}
+ be_name=${be##*:}
+ enabled_backends+="${be_name}:cinder,"
+
+ set_common_cinder_store_params $be_name
+ iniset $GLANCE_API_CONF $be_name cinder_volume_type ${be_name}
+ if [[ "$be_type" == "nfs" ]]; then
+ mkdir -p "$GLANCE_NFS_MOUNTPOINT"
+ iniset $GLANCE_API_CONF $be_name cinder_mount_point_base "$GLANCE_NFS_MOUNTPOINT"
+ fi
+ done
+ iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1}
+ iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_CINDER_DEFAULT_BACKEND
+}
+
+# Set common cinder store options to given config section
+#
+# Arguments:
+# config_section
+#
+function set_common_cinder_store_params {
+ local config_section="$1"
+ iniset $GLANCE_API_CONF $config_section cinder_store_auth_address $KEYSTONE_SERVICE_URI_V3
+ iniset $GLANCE_API_CONF $config_section cinder_store_user_name glance
+ iniset $GLANCE_API_CONF $config_section cinder_store_password $SERVICE_PASSWORD
+ iniset $GLANCE_API_CONF $config_section cinder_store_project_name $SERVICE_PROJECT_NAME
+}
+
+# Configure multiple file stores options for each file store
+#
+# Arguments:
+#
+function configure_multiple_file_stores {
+ local store enabled_backends
+ enabled_backends=""
+ for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+ enabled_backends+="${store}:file,"
+ done
+ iniset $GLANCE_API_CONF DEFAULT enabled_backends ${enabled_backends::-1}
+
+ # Glance multiple store Store specific configs
+ iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND
+ local store
+ for store in $(echo $glance_multiple_file_stores | tr "," "\n"); do
+ iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+ done
+}
+
+# Set reserved stores for glance
+function configure_reserved_stores {
+ iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/"
+ iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/"
+}
+
+# Copy rootwrap file from glance_store/etc/glance to /etc/glance
+#
+# Arguments:
+# source_path Source path to copy rootwrap files from
+#
+function copy_rootwrap {
+ local source_path="$1"
+ # Make glance configuration directory if it is not exists
+ sudo install -d -o $STACK_USER $GLANCE_CONF_DIR
+ cp -r $source_path/rootwrap.* $GLANCE_CONF_DIR/
+}
+
+# Set glance_store related config options
+#
+# Arguments:
+# USE_CINDER_FOR_GLANCE
+# GLANCE_ENABLE_MULTIPLE_STORES
+#
+function configure_glance_store {
+ local use_cinder_for_glance="$1"
+ local glance_enable_multiple_stores="$2"
+ local be
+
+ if [[ "$glance_enable_multiple_stores" == "False" ]]; then
+ # Configure traditional glance_store
+ if [[ "$use_cinder_for_glance" == "True" ]]; then
+ # set common glance_store parameters
+ iniset $GLANCE_API_CONF glance_store stores "cinder,file,http"
+ iniset $GLANCE_API_CONF glance_store default_store cinder
+
+ # set cinder related store parameters
+ set_common_cinder_store_params glance_store
+ # set nfs mount_point dir
+ for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
+ local be_name=${be##*:}
+ if [[ "$be_name" == "nfs" ]]; then
+ mkdir -p $GLANCE_NFS_MOUNTPOINT
+ iniset $GLANCE_API_CONF glance_store cinder_mount_point_base $GLANCE_NFS_MOUNTPOINT
+ fi
+ done
+ fi
+ # Store specific configs
+ iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ else
+ if [[ "$use_cinder_for_glance" == "True" ]]; then
+ # Configure multiple cinder stores for glance
+ configure_multiple_cinder_stores
+ else
+ # Configure multiple file stores for glance
+ configure_multiple_file_stores
+ fi
+ # Configure reserved stores
+ configure_reserved_stores
+ fi
}
# configure_glance() - Set config files, create data dirs, etc
function configure_glance {
sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
- # Set non-default configuration options for registry
- iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
- iniset $GLANCE_REGISTRY_CONF DEFAULT workers $API_WORKERS
+ # Set non-default configuration options for the API server
local dburl
dburl=`database_connection_url glance`
- iniset $GLANCE_REGISTRY_CONF database connection $dburl
- iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
- iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
- configure_keystone_authtoken_middleware $GLANCE_REGISTRY_CONF glance
- iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2
- iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
- iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
- # Set non-default configuration options for the API server
iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_API_CONF database connection $dburl
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
@@ -128,23 +280,12 @@
configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2
iniset_rpc_backend glance $GLANCE_API_CONF
- if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
- iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
- fi
if [ "$VIRT_DRIVER" = 'libvirt' ] && [ "$LIBVIRT_TYPE" = 'parallels' ]; then
iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso,ploop"
fi
- # NOTE(flaper87): To uncomment as soon as all services consuming Glance are
- # able to consume V2 entirely.
- if [ "$GLANCE_V1_ENABLED" != "True" ]; then
- iniset $GLANCE_API_CONF DEFAULT enable_v1_api False
- fi
-
- # Store specific configs
- iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
- iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
+ # Configure glance_store
+ configure_glance_store $USE_CINDER_FOR_GLANCE $GLANCE_ENABLE_MULTIPLE_STORES
# CORS feature support - to allow calls from Horizon by default
if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
@@ -153,30 +294,27 @@
iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST"
fi
- # Store the images in swift if enabled.
- if is_service_enabled s-proxy; then
- iniset $GLANCE_API_CONF glance_store default_store swift
- iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
- if python3_enabled; then
- iniset $GLANCE_API_CONF glance_store swift_store_auth_insecure True
- fi
+ # No multiple stores for swift yet
+ if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "False" ]]; then
+ # Store the images in swift if enabled.
+ if is_service_enabled s-proxy; then
+ iniset $GLANCE_API_CONF glance_store default_store swift
+ iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
- iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
- iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
- iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
- iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
+ iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
+ iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
+ iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+ if is_service_enabled tls-proxy; then
+ iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE
+ fi
+ iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
- iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
- iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
- if python3_enabled; then
- # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag
- # or ability to specify the CACERT. So fallback to http:// url
- iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address ${KEYSTONE_SERVICE_URI/https/http}/v3
- else
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
fi
- iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
fi
# We need to tell glance what it's public endpoint is so that the version
@@ -185,32 +323,22 @@
if is_service_enabled tls-proxy; then
iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
- iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
-
- iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
- iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
- fi
-
- if is_service_enabled tls-proxy; then
- iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https
+ iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI
fi
# Format logging
setup_logging $GLANCE_API_CONF
- setup_logging $GLANCE_REGISTRY_CONF
- cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
# Set non-default configuration options for the glance-cache
iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
- iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI
+ iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
- iniset $GLANCE_CACHE_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
# Store specific confs
iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
@@ -220,7 +348,6 @@
iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin
iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject
- cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
@@ -233,10 +360,17 @@
iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
fi
- if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+ if [[ "$GLANCE_STANDALONE" == False ]]; then
write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image"
+ # Grab our uwsgi listen address and use that to fill out our
+ # worker_self_reference_url config
+ iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url \
+ $(awk '-F= ' '/^http-socket/ { print "http://"$2}' \
+ $GLANCE_UWSGI_CONF)
else
+ write_local_proxy_http_config glance "http://$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT_INT" "/image"
iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
+ iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
fi
}
@@ -301,11 +435,26 @@
# install_glance() - Collect source and prepare
function install_glance {
+ local glance_store_extras=()
+
+ if is_service_enabled cinder; then
+ glance_store_extras=("cinder" "${glance_store_extras[@]}")
+ fi
+
+ if is_service_enabled swift; then
+ glance_store_extras=("swift" "${glance_store_extras[@]}")
+ fi
+
# Install glance_store from git so we make sure we're testing
# the latest code.
if use_library_from_git "glance_store"; then
git_clone_by_name "glance_store"
- setup_dev_lib "glance_store"
+ setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}")
+ copy_rootwrap ${DEST}/glance_store/etc/glance
+ else
+ # we still need to pass extras
+ pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}")
+ copy_rootwrap $GLANCE_STORE_ROOTWRAP_BASE_DIR
fi
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
@@ -313,6 +462,67 @@
setup_develop $GLANCE_DIR
}
+# glance_remote_conf() - Return the path to an alternate config file for
+# the remote glance clone
+function glance_remote_conf {
+ echo $(dirname "${GLANCE_CONF_DIR}")/glance-remote/$(basename "$1")
+}
+
+# start_glance_remote_clone() - Clone the regular glance api worker
+function start_glance_remote_clone {
+ local glance_remote_conf_dir glance_remote_port remote_data
+ local glance_remote_uwsgi
+
+ glance_remote_conf_dir="$(glance_remote_conf "")"
+ glance_remote_port=$(get_random_port)
+ glance_remote_uwsgi="$(glance_remote_conf $GLANCE_UWSGI_CONF)"
+
+ # Clone the existing ready-to-go glance-api setup
+ sudo rm -Rf "$glance_remote_conf_dir"
+ sudo cp -r "$GLANCE_CONF_DIR" "$glance_remote_conf_dir"
+ sudo chown $STACK_USER -R "$glance_remote_conf_dir"
+
+ # Point this worker at different data dirs
+ remote_data="${DATA_DIR}/glance-remote"
+ mkdir -p $remote_data/os_glance_tasks_store \
+ "${remote_data}/os_glance_staging_store"
+ iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_staging_store \
+ filesystem_store_datadir "${remote_data}/os_glance_staging_store"
+ iniset $(glance_remote_conf "$GLANCE_API_CONF") os_glance_tasks_store \
+ filesystem_store_datadir "${remote_data}/os_glance_tasks_store"
+
+ # Change our uwsgi to our new port
+ sed -ri "s/^(http-socket.*):[0-9]+/\1:$glance_remote_port/" \
+ "$glance_remote_uwsgi"
+
+ # Update the self-reference url with our new port
+ iniset $(glance_remote_conf $GLANCE_API_CONF) DEFAULT \
+ worker_self_reference_url \
+ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+ "$glance_remote_uwsgi")
+
+ # We need to create the systemd service for the clone, but then
+ # change it to include an Environment line to point the WSGI app
+ # at the alternate config directory.
+ write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \
+ --procname-prefix \
+ glance-api-remote \
+ --ini $glance_remote_uwsgi" \
+ "" "$STACK_USER"
+ iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
+ "Service" "Environment" \
+ "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir"
+
+ # Reload and restart with the new config
+ $SYSTEMCTL daemon-reload
+ $SYSTEMCTL restart devstack@g-api-r
+
+ get_or_create_service glance_remote image_remote "Alternate glance"
+ get_or_create_endpoint image_remote $REGION_NAME \
+ $(awk '-F= ' '/^http-socket/ { print "http://"$2 }' \
+ $glance_remote_uwsgi)
+}
+
# start_glance() - Start running processes
function start_glance {
local service_protocol=$GLANCE_SERVICE_PROTOCOL
@@ -320,16 +530,19 @@
if [[ "$WSGI_MODE" != "uwsgi" ]]; then
start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
fi
- start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
fi
- run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
- if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
+ if [[ "$GLANCE_STANDALONE" == False ]]; then
+ run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
else
run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
fi
+ if is_service_enabled g-api-r; then
+ echo "Starting the g-api-r clone service..."
+ start_glance_remote_clone
+ fi
+
echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then
die $LINENO "g-api did not start"
@@ -339,7 +552,7 @@
# stop_glance() - Stop running processes
function stop_glance {
stop_process g-api
- stop_process g-reg
+ stop_process g-api-r
}
# Restore xtrace
diff --git a/lib/infra b/lib/infra
index cf003cc..b983f2b 100644
--- a/lib/infra
+++ b/lib/infra
@@ -29,7 +29,7 @@
# install_infra() - Collect source and prepare
function install_infra {
local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
- [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
+ [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
# We don't care about testing git pbr in the requirements venv.
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
diff --git a/lib/keystone b/lib/keystone
index 9ceb829..66e867c 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -115,7 +115,7 @@
KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI
# V3 URIs
-KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3
+KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3
KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3
# Security compliance
@@ -318,25 +318,25 @@
local admin_role="admin"
local member_role="member"
- get_or_add_user_domain_role $admin_role $admin_user default
+ async_run ks-domain-role get_or_add_user_domain_role $admin_role $admin_user default
# Create service project/role
get_or_create_domain "$SERVICE_DOMAIN_NAME"
- get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
+ async_run ks-project get_or_create_project "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME"
# Service role, so service users do not have to be admins
- get_or_create_role service
+ async_run ks-service get_or_create_role service
# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
# The admin role in swift allows a user to act as an admin for their project,
# but ResellerAdmin is needed for a user to act as any project. The name of this
# role is also configurable in swift-proxy.conf
- get_or_create_role ResellerAdmin
+ async_run ks-reseller get_or_create_role ResellerAdmin
# another_role demonstrates that an arbitrary role may be created and used
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
local another_role="anotherrole"
- get_or_create_role $another_role
+ async_run ks-anotherrole get_or_create_role $another_role
# invisible project - admin can't see this one
local invis_project
@@ -349,10 +349,12 @@
demo_user=$(get_or_create_user "demo" \
"$ADMIN_PASSWORD" "default" "demo@example.com")
- get_or_add_user_project_role $member_role $demo_user $demo_project
- get_or_add_user_project_role $admin_role $admin_user $demo_project
- get_or_add_user_project_role $another_role $demo_user $demo_project
- get_or_add_user_project_role $member_role $demo_user $invis_project
+ async_wait ks-{domain-role,domain,project,service,reseller,anotherrole}
+
+ async_run ks-demo-member get_or_add_user_project_role $member_role $demo_user $demo_project
+ async_run ks-demo-admin get_or_add_user_project_role $admin_role $admin_user $demo_project
+ async_run ks-demo-another get_or_add_user_project_role $another_role $demo_user $demo_project
+ async_run ks-demo-invis get_or_add_user_project_role $member_role $demo_user $invis_project
# alt_demo
local alt_demo_project
@@ -361,9 +363,9 @@
alt_demo_user=$(get_or_create_user "alt_demo" \
"$ADMIN_PASSWORD" "default" "alt_demo@example.com")
- get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
- get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
- get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
+ async_run ks-alt-member get_or_add_user_project_role $member_role $alt_demo_user $alt_demo_project
+ async_run ks-alt-admin get_or_add_user_project_role $admin_role $admin_user $alt_demo_project
+ async_run ks-alt-another get_or_add_user_project_role $another_role $alt_demo_user $alt_demo_project
# groups
local admin_group
@@ -373,11 +375,15 @@
non_admin_group=$(get_or_create_group "nonadmins" \
"default" "non-admin group")
- get_or_add_group_project_role $member_role $non_admin_group $demo_project
- get_or_add_group_project_role $another_role $non_admin_group $demo_project
- get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
- get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
- get_or_add_group_project_role $admin_role $admin_group $admin_project
+ async_run ks-group-memberdemo get_or_add_group_project_role $member_role $non_admin_group $demo_project
+ async_run ks-group-anotherdemo get_or_add_group_project_role $another_role $non_admin_group $demo_project
+ async_run ks-group-memberalt get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
+ async_run ks-group-anotheralt get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
+ async_run ks-group-admin get_or_add_group_project_role $admin_role $admin_group $admin_project
+
+ async_wait ks-demo-{member,admin,another,invis}
+ async_wait ks-alt-{member,admin,another}
+ async_wait ks-group-{memberdemo,anotherdemo,memberalt,anotheralt,admin}
if is_service_enabled ldap; then
create_ldap_domain
@@ -413,6 +419,7 @@
local section=${3:-keystone_authtoken}
iniset $conf_file $section auth_type password
+ iniset $conf_file $section interface public
iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
iniset $conf_file $section username $admin_user
iniset $conf_file $section password $SERVICE_PASSWORD
@@ -421,7 +428,7 @@
iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME"
iniset $conf_file $section cafile $SSL_BUNDLE_FILE
- iniset $conf_file $section memcached_servers localhost:11211
+ iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS
}
# configure_auth_token_middleware conf_file admin_user IGNORED [section]
@@ -504,8 +511,6 @@
if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
install_apache_wsgi
- elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then
- pip_install uwsgi
fi
}
@@ -523,7 +528,7 @@
enable_apache_site keystone
restart_apache_server
else # uwsgi
- run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
+ run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
fi
echo "Waiting for keystone to start..."
@@ -563,7 +568,6 @@
# - ``KEYSTONE_BIN_DIR``
# - ``ADMIN_PASSWORD``
# - ``IDENTITY_API_VERSION``
-# - ``KEYSTONE_AUTH_URI``
# - ``REGION_NAME``
# - ``KEYSTONE_SERVICE_PROTOCOL``
# - ``KEYSTONE_SERVICE_HOST``
diff --git a/lib/libraries b/lib/libraries
index b4f3c31..c7aa815 100644
--- a/lib/libraries
+++ b/lib/libraries
@@ -72,7 +72,7 @@
local name=$1
if use_library_from_git "$name"; then
git_clone_by_name "$name"
- setup_dev_lib "$name"
+ setup_dev_lib -bindep "$name"
fi
}
diff --git a/lib/lvm b/lib/lvm
index d9e78a0..b826c1b 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -124,13 +124,9 @@
local vg=$1
local size=$2
- # Start the lvmetad and tgtd services
- if is_fedora || is_suse; then
- # services is not started by default
- start_service lvm2-lvmetad
- if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
- start_service tgtd
- fi
+ # Start the tgtd service on Fedora and SUSE if tgtadm is used
+ if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+ start_service tgtd
fi
# Start with a clean volume group
diff --git a/lib/neutron b/lib/neutron
index 888b5e8..885df97 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -354,7 +354,6 @@
# if not passed $NOVA_CONF is used.
function configure_neutron_nova_new {
local conf=${1:-$NOVA_CONF}
- iniset $conf DEFAULT use_neutron True
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
iniset $conf neutron username neutron
@@ -365,8 +364,6 @@
iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
iniset $conf neutron region_name "$REGION_NAME"
- iniset $conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
-
# optionally set options in nova_conf
neutron_plugin_create_nova_conf $conf
@@ -466,7 +463,7 @@
done
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index dbd6e2c..791ff18 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -58,8 +58,6 @@
# Neutron Network Configuration
# -----------------------------
-deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future"
-
if is_service_enabled tls-proxy; then
Q_PROTOCOL="https"
fi
@@ -228,15 +226,17 @@
# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
-default_route_dev=$(ip route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
# With the linuxbridge agent, if using VLANs for tenant networks,
# or if using flat or VLAN provider networks, set in ``localrc`` to
# the name of the network interface to use for the physical
# network.
#
# Example: ``LB_PHYSICAL_INTERFACE=eth1``
-LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev}
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+ default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+ die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+ LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
# When Neutron tunnels are enabled it is needed to specify the
# IP address of the end point in the local server. This IP is set
@@ -364,6 +364,11 @@
_configure_neutron_ceilometer_notifications
fi
+ if [[ $Q_AGENT == "ovn" ]]; then
+ configure_ovn
+ configure_ovn_plugin
+ fi
+
iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
# devstack is not a tool for running uber scale OpenStack
# clouds, therefore running without a dedicated RPC worker
@@ -373,9 +378,8 @@
function create_nova_conf_neutron {
local conf=${1:-$NOVA_CONF}
- iniset $conf DEFAULT use_neutron True
iniset $conf neutron auth_type "password"
- iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI"
+ iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
iniset $conf neutron username "$Q_ADMIN_USERNAME"
iniset $conf neutron password "$SERVICE_PASSWORD"
iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
@@ -384,11 +388,6 @@
iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $conf neutron region_name "$REGION_NAME"
- if [[ "$Q_USE_SECGROUP" == "True" ]]; then
- LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
- iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
- fi
-
# optionally set options in nova_conf
neutron_plugin_create_nova_conf $conf
@@ -446,6 +445,10 @@
git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
setup_develop $NEUTRON_DIR
+
+ if [[ $Q_AGENT == "ovn" ]]; then
+ install_ovn
+ fi
}
# install_neutron_agent_packages() - Collect source and prepare
@@ -467,6 +470,22 @@
fi
}
+# Start running OVN processes
+function start_ovn_services {
+ if [[ $Q_AGENT == "ovn" ]]; then
+ init_ovn
+ start_ovn
+ if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
+ if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
+ echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
+ echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
+ else
+ create_public_bridge
+ fi
+ fi
+ fi
+}
+
# Start running processes
function start_neutron_service_and_check {
local service_port=$Q_PORT
@@ -483,7 +502,7 @@
# Start the Neutron service
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
enable_service neutron-api
- run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
@@ -583,6 +602,10 @@
function stop_mutnauq {
stop_mutnauq_other
stop_mutnauq_l2_agent
+
+ if [[ $Q_AGENT == "ovn" ]]; then
+ stop_ovn
+ fi
}
# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
@@ -630,7 +653,7 @@
IP_UP="sudo ip link set $to_intf up"
if [[ "$af" == "inet" ]]; then
IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
- ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP "
+ ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
fi
fi
@@ -676,6 +699,10 @@
for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
+
+ if [[ $Q_AGENT == "ovn" ]]; then
+ cleanup_ovn
+ fi
}
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index fa3f862..bdeaf0f 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -38,7 +38,7 @@
}
function neutron_plugin_install_agent_packages {
- install_package bridge-utils
+ :
}
function neutron_plugin_configure_dhcp_agent {
@@ -48,7 +48,7 @@
function neutron_plugin_configure_l3_agent {
local conf_file=$1
- sudo brctl addbr $PUBLIC_BRIDGE
+ sudo ip link add $PUBLIC_BRIDGE type bridge
set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU
}
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 497b6c6..ae4b251 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -7,6 +7,12 @@
_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace)
set +o xtrace
+# Default openvswitch L2 agent
+Q_AGENT=${Q_AGENT:-openvswitch}
+if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
+ source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
+fi
+
# Enable this to simply and quickly enable tunneling with ML2.
# Select either 'gre', 'vxlan', or 'gre,vxlan'
Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"}
@@ -17,12 +23,6 @@
Q_TUNNEL_TYPES=gre
fi
-# Default openvswitch L2 agent
-Q_AGENT=${Q_AGENT:-openvswitch}
-if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
- source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
-fi
-
# List of MechanismDrivers to load
Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge}
# Default GRE TypeDriver options
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index f39c7c4..8c75e15 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -11,8 +11,6 @@
local conf="$1"
NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE
- LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
- iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
}
function neutron_plugin_install_agent_packages {
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index b65a258..7fed8bf 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -15,6 +15,10 @@
function neutron_plugin_install_agent_packages {
_neutron_ovs_base_install_agent_packages
+ if use_library_from_git "os-ken"; then
+ git_clone_by_name "os-ken"
+ setup_dev_lib "os-ken"
+ fi
}
function neutron_plugin_configure_dhcp_agent {
@@ -41,8 +45,10 @@
# Setup physical network bridge mappings. Override
# ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
# complex physical network configurations.
- if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
- OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+ if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+ if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then
+ OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+ fi
# Configure bridge manually with physical interface as port for multi-node
_neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
new file mode 100644
index 0000000..e4d0d75
--- /dev/null
+++ b/lib/neutron_plugins/ovn_agent
@@ -0,0 +1,824 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+# Global Sources
+# --------------
+
+# There are some ovs functions OVN depends on that must be sourced from
+# the ovs neutron plugins.
+source ${TOP_DIR}/lib/neutron_plugins/ovs_base
+source ${TOP_DIR}/lib/neutron_plugins/openvswitch_agent
+
+# Load devstack ovs base functions
+source $NEUTRON_DIR/devstack/lib/ovs
+
+
+# Defaults
+# --------
+
+Q_BUILD_OVS_FROM_GIT=$(trueorfalse True Q_BUILD_OVS_FROM_GIT)
+
+# Set variables for building OVN from source
+OVN_REPO=${OVN_REPO:-https://github.com/ovn-org/ovn.git}
+OVN_REPO_NAME=$(basename ${OVN_REPO} | cut -f1 -d'.')
+OVN_REPO_NAME=${OVN_REPO_NAME:-ovn}
+OVN_BRANCH=${OVN_BRANCH:-v20.06.1}
+# The commit removing OVN bits from the OVS tree, it is the commit that is not
+# present in OVN tree and is used to distinguish if OVN is part of OVS or not.
+# https://github.com/openvswitch/ovs/commit/05bf1dbb98b0635a51f75e268ef8aed27601401d
+OVN_SPLIT_HASH=05bf1dbb98b0635a51f75e268ef8aed27601401d
+
+if is_service_enabled tls-proxy; then
+ OVN_PROTO=ssl
+else
+ OVN_PROTO=tcp
+fi
+
+# How to connect to ovsdb-server hosting the OVN SB database.
+OVN_SB_REMOTE=${OVN_SB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6642}
+
+# How to connect to ovsdb-server hosting the OVN NB database
+OVN_NB_REMOTE=${OVN_NB_REMOTE:-$OVN_PROTO:$SERVICE_HOST:6641}
+
+# ml2/config for neutron_sync_mode
+OVN_NEUTRON_SYNC_MODE=${OVN_NEUTRON_SYNC_MODE:-log}
+
+# Configured DNS servers to be used with internal_dns extension, only
+# if the subnet DNS is not configured.
+OVN_DNS_SERVERS=${OVN_DNS_SERVERS:-8.8.8.8}
+
+# The type of OVN L3 Scheduler to use. The OVN L3 Scheduler determines the
+# hypervisor/chassis where a routers gateway should be hosted in OVN. The
+# default OVN L3 scheduler is leastloaded
+OVN_L3_SCHEDULER=${OVN_L3_SCHEDULER:-leastloaded}
+
+# A UUID to uniquely identify this system. If one is not specified, a random
+# one will be generated. A randomly generated UUID will be saved in a file
+# $OVS_SYSCONFDIR/system-id.conf (typically /etc/openvswitch/system-id.conf)
+# so that the same one will be re-used if you re-run DevStack or restart
+# Open vSwitch service.
+OVN_UUID=${OVN_UUID:-}
+
+# Whether or not to build the openvswitch kernel module from ovs. This is required
+# unless the distro kernel includes ovs+conntrack support.
+OVN_BUILD_MODULES=$(trueorfalse False OVN_BUILD_MODULES)
+OVN_BUILD_FROM_SOURCE=$(trueorfalse False OVN_BUILD_FROM_SOURCE)
+
+# Whether or not to install the ovs python module from ovs source. This can be
+# used to test and validate new ovs python features. This should only be used
+# for development purposes since the ovs python version is controlled by OpenStack
+# requirements.
+OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE)
+
+# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version
+# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined
+# based on the ML2 overlay_ip_version option. The ML2 framework will use this to
+# configure the MTU DHCP option.
+OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38}
+
+# The log level of the OVN databases (north and south)
+OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
+
+OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
+OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
+
+export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
+ OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+fi
+
+OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
+
+OVS_PREFIX=
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ OVS_PREFIX=/usr/local
+fi
+OVS_SBINDIR=$OVS_PREFIX/sbin
+OVS_BINDIR=$OVS_PREFIX/bin
+OVS_RUNDIR=$OVS_PREFIX/var/run/openvswitch
+OVS_SHAREDIR=$OVS_PREFIX/share/openvswitch
+OVS_SCRIPTDIR=$OVS_SHAREDIR/scripts
+OVS_DATADIR=$DATA_DIR/ovs
+OVS_SYSCONFDIR=${OVS_SYSCONFDIR:-$OVS_PREFIX/etc/openvswitch}
+
+OVN_DATADIR=$DATA_DIR/ovn
+OVN_SHAREDIR=$OVS_PREFIX/share/ovn
+OVN_SCRIPTDIR=$OVN_SHAREDIR/scripts
+OVN_RUNDIR=$OVS_PREFIX/var/run/ovn
+
+NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix)
+NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent"
+
+STACK_GROUP="$( id --group --name "$STACK_USER" )"
+
+OVN_NORTHD_SERVICE=ovn-northd.service
+if is_ubuntu; then
+ # The ovn-central.service file on Ubuntu is responsible for starting
+ # ovn-northd and the OVN DBs (on CentOS this is done by ovn-northd.service)
+ OVN_NORTHD_SERVICE=ovn-central.service
+fi
+OVSDB_SERVER_SERVICE=ovsdb-server.service
+OVS_VSWITCHD_SERVICE=ovs-vswitchd.service
+OVN_CONTROLLER_SERVICE=ovn-controller.service
+OVN_CONTROLLER_VTEP_SERVICE=ovn-controller-vtep.service
+if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ OVSDB_SERVER_SERVICE=devstack@ovsdb-server.service
+ OVS_VSWITCHD_SERVICE=devstack@ovs-vswitchd.service
+ OVN_NORTHD_SERVICE=devstack@ovn-northd.service
+ OVN_CONTROLLER_SERVICE=devstack@ovn-controller.service
+ OVN_CONTROLLER_VTEP_SERVICE=devstack@ovn-controller-vtep.service
+fi
+
+# Defaults Overwrite
+# ------------------
+
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-ovn,logger}
+Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,geneve}
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
+Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"}
+Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-port_security,qos}
+# this one allows empty:
+ML2_L3_PLUGIN=${ML2_L3_PLUGIN-"ovn-router"}
+
+Q_LOG_DRIVER_RATE_LIMIT=${Q_LOG_DRIVER_RATE_LIMIT:-100}
+Q_LOG_DRIVER_BURST_LIMIT=${Q_LOG_DRIVER_BURST_LIMIT:-25}
+Q_LOG_DRIVER_LOG_BASE=${Q_LOG_DRIVER_LOG_BASE:-acl_log_meter}
+
+# Utility Functions
+# -----------------
+
+function wait_for_sock_file {
+ local count=0
+ while [ ! -S $1 ]; do
+ sleep 1
+ count=$((count+1))
+ if [ "$count" -gt 5 ]; then
+ die $LINENO "Socket $1 not found"
+ fi
+ done
+}
+
+function use_new_ovn_repository {
+ if [ -z "$is_new_ovn" ]; then
+ local ovs_repo_dir=$DEST/$OVS_REPO_NAME
+ if [ ! -d $ovs_repo_dir ]; then
+ git_timed clone $OVS_REPO $ovs_repo_dir
+ pushd $ovs_repo_dir
+ git checkout $OVS_BRANCH
+ popd
+ else
+ clone_repository $OVS_REPO $ovs_repo_dir $OVS_BRANCH
+ fi
+ # Check the split commit exists in the current branch
+ pushd $ovs_repo_dir
+ git log $OVS_BRANCH --pretty=format:"%H" | grep -q $OVN_SPLIT_HASH
+ is_new_ovn=$?
+ popd
+ fi
+ return $is_new_ovn
+}
+
+# NOTE(rtheis): Function copied from DevStack _neutron_ovs_base_setup_bridge
+# and _neutron_ovs_base_add_bridge with the call to neutron-ovs-cleanup
+# removed. The call is not relevant for OVN, as it is specific to the use
+# of Neutron's OVS agent and hangs when running stack.sh because
+# neutron-ovs-cleanup uses the OVSDB native interface.
+function ovn_base_setup_bridge {
+ local bridge=$1
+ local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge -- set bridge $bridge protocols=OpenFlow13,OpenFlow15"
+
+ if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
+ addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
+ fi
+
+ $addbr_cmd
+ sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
+}
+
+function _start_process {
+ $SYSTEMCTL daemon-reload
+ $SYSTEMCTL enable $1
+ $SYSTEMCTL restart $1
+}
+
+function _run_process {
+ local service=$1
+ local cmd="$2"
+ local stop_cmd="$3"
+ local group=$4
+ local user=${5:-$STACK_USER}
+
+ local systemd_service="devstack@$service.service"
+ local unit_file="$SYSTEMD_DIR/$systemd_service"
+ local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
+
+ echo "Starting $service executed command": $cmd
+
+ write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+ iniset -sudo $unit_file "Service" "Type" "forking"
+ iniset -sudo $unit_file "Service" "RemainAfterExit" "yes"
+ iniset -sudo $unit_file "Service" "KillMode" "mixed"
+ iniset -sudo $unit_file "Service" "LimitNOFILE" "65536"
+ iniset -sudo $unit_file "Service" "Environment" "$environment"
+ if [ -n "$stop_cmd" ]; then
+ iniset -sudo $unit_file "Service" "ExecStop" "$stop_cmd"
+ fi
+
+ _start_process $systemd_service
+
+ local testcmd="test -e $OVS_RUNDIR/$service.pid"
+ test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
+ sudo ovs-appctl -t $service vlog/set console:off syslog:info file:info
+}
+
+function clone_repository {
+ local repo=$1
+ local dir=$2
+ local branch=$3
+ # Set ERROR_ON_CLONE to false to avoid the need of having the
+ # repositories like OVN and OVS in the required_projects of the job
+ # definition.
+ ERROR_ON_CLONE=false git_clone $repo $dir $branch
+}
+
+function get_ext_gw_interface {
+ # Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH
+ # This function is copied directly from the devstack neutron-legacy script
+ if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then
+ echo $Q_PUBLIC_VETH_EX
+ else
+ # Disable in-band as we are going to use local port
+ # to communicate with VMs
+ sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \
+ other_config:disable-in-band=true
+ echo $PUBLIC_BRIDGE
+ fi
+}
+
+function create_public_bridge {
+ # Create the public bridge that OVN will use
+ # This logic is based on the devstack neutron-legacy _neutron_configure_router_v4 and _v6
+ local ext_gw_ifc
+ ext_gw_ifc=$(get_ext_gw_interface)
+
+ sudo ovs-vsctl --may-exist add-br $ext_gw_ifc -- set bridge $ext_gw_ifc protocols=OpenFlow13,OpenFlow15
+ sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$ext_gw_ifc
+ if [ -n "$FLOATING_RANGE" ]; then
+ local cidr_len=${FLOATING_RANGE#*/}
+ sudo ip addr replace $PUBLIC_NETWORK_GATEWAY/$cidr_len dev $ext_gw_ifc
+ fi
+
+ # Ensure IPv6 RAs are accepted on the interface with the default route.
+ # This is needed for neutron-based devstack clouds to work in
+ # IPv6-only clouds in the gate. Please do not remove this without
+ # talking to folks in Infra. This fix is based on a devstack fix for
+ # neutron L3 agent: https://review.openstack.org/#/c/359490/.
+ default_route_dev=$(ip route | grep ^default | awk '{print $5}')
+ sudo sysctl -w net.ipv6.conf.$default_route_dev.accept_ra=2
+
+ sudo sysctl -w net.ipv6.conf.all.forwarding=1
+ if [ -n "$IPV6_PUBLIC_RANGE" ]; then
+ local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
+ sudo ip -6 addr replace $IPV6_PUBLIC_NETWORK_GATEWAY/$ipv6_cidr_len dev $ext_gw_ifc
+ fi
+
+ sudo ip link set $ext_gw_ifc up
+}
+
+function _disable_libvirt_apparmor {
+ if ! sudo aa-status --enabled ; then
+ return 0
+ fi
+ # NOTE(arosen): This is used as a work around to allow newer versions
+ # of libvirt to work with ovs configured ports. See LP#1466631.
+ # requires the apparmor-utils
+ install_package apparmor-utils
+ # disables apparmor for libvirtd
+ sudo aa-complain /etc/apparmor.d/usr.sbin.libvirtd
+}
+
+
+# OVN compilation functions
+# -------------------------
+
+
+# compile_ovn() - Compile OVN from source and load needed modules
+# Accepts three parameters:
+# - first optional is False by default and means that
+# modules are built and installed.
+# - second optional parameter defines prefix for
+# ovn compilation
+# - third optional parameter defines localstatedir for
+# ovn single machine runtime
+function compile_ovn {
+ local build_modules=${1:-False}
+ local prefix=$2
+ local localstatedir=$3
+
+ if [ -n "$prefix" ]; then
+ prefix="--prefix=$prefix"
+ fi
+
+ if [ -n "$localstatedir" ]; then
+ localstatedir="--localstatedir=$localstatedir"
+ fi
+
+ clone_repository $OVN_REPO $DEST/$OVN_REPO_NAME $OVN_BRANCH
+ pushd $DEST/$OVN_REPO_NAME
+
+ if [ ! -f configure ] ; then
+ ./boot.sh
+ fi
+
+ if [ ! -f config.status ] || [ configure -nt config.status ] ; then
+ ./configure --with-ovs-source=$DEST/$OVS_REPO_NAME $prefix $localstatedir
+ fi
+ make -j$(($(nproc) + 1))
+ sudo make install
+ popd
+}
+
+
+# OVN Neutron driver functions
+# ----------------------------
+
+# OVN service sanity check
+function ovn_sanity_check {
+ if is_service_enabled q-agt neutron-agt; then
+ die $LINENO "The q-agt/neutron-agt service must be disabled with OVN."
+ elif is_service_enabled q-l3 neutron-l3; then
+ die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN."
+ elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_MECHANISM_DRIVERS =~ "ovn" ]]; then
+ die $LINENO "OVN needs to be enabled in \$Q_ML2_PLUGIN_MECHANISM_DRIVERS"
+ elif is_service_enabled q-svc neutron-api && [[ ! $Q_ML2_PLUGIN_TYPE_DRIVERS =~ "geneve" ]]; then
+ die $LINENO "Geneve needs to be enabled in \$Q_ML2_PLUGIN_TYPE_DRIVERS to be used with OVN"
+ fi
+}
+
+# install_ovn() - Collect source and prepare
+function install_ovn {
+ if [[ "$Q_BUILD_OVS_FROM_GIT" == "False" ]]; then
+ echo "Installation of OVS from source disabled."
+ return 0
+ fi
+
+ echo "Installing OVN and dependent packages"
+
+ # Check the OVN configuration
+ ovn_sanity_check
+
+ # Install tox, used to generate the config (see devstack/override-defaults)
+ pip_install tox
+
+ sudo mkdir -p $OVS_RUNDIR
+ sudo chown $(whoami) $OVS_RUNDIR
+ # NOTE(lucasagomes): To keep things simpler, let's reuse the same
+ # RUNDIR for both OVS and OVN. This way we avoid having to specify the
+ # --db option in the ovn-{n,s}bctl commands while playing with DevStack
+ sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
+
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ # If OVS is already installed, remove it, because we're about to
+ # re-install it from source.
+ for package in openvswitch openvswitch-switch openvswitch-common; do
+ if is_package_installed $package ; then
+ uninstall_package $package
+ fi
+ done
+
+ remove_ovs_packages
+ sudo rm -f $OVS_RUNDIR/*
+
+ compile_ovs $OVN_BUILD_MODULES
+ if use_new_ovn_repository; then
+ compile_ovn $OVN_BUILD_MODULES
+ fi
+
+ sudo mkdir -p $OVS_PREFIX/var/log/openvswitch
+ sudo chown $(whoami) $OVS_PREFIX/var/log/openvswitch
+ sudo mkdir -p $OVS_PREFIX/var/log/ovn
+ sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
+ else
+ fixup_ovn_centos
+ install_package $(get_packages openvswitch)
+ install_package $(get_packages ovn)
+ fi
+
+ # Ensure that the OVS commands are accessible in the PATH
+ export PATH=$OVS_BINDIR:$PATH
+
+ # Archive log files and create new
+ local log_archive_dir=$LOGDIR/archive
+ mkdir -p $log_archive_dir
+ for logfile in ovs-vswitchd.log ovn-northd.log ovn-controller.log ovn-controller-vtep.log ovs-vtep.log ovsdb-server.log ovsdb-server-nb.log ovsdb-server-sb.log; do
+ if [ -f "$LOGDIR/$logfile" ] ; then
+ mv "$LOGDIR/$logfile" "$log_archive_dir/$logfile.${CURRENT_LOG_TIME}"
+ fi
+ done
+
+ # Install ovsdbapp from source if requested
+ if use_library_from_git "ovsdbapp"; then
+ git_clone_by_name "ovsdbapp"
+ setup_dev_lib "ovsdbapp"
+ fi
+
+ # Install ovs python module from ovs source.
+ if [[ "$OVN_INSTALL_OVS_PYTHON_MODULE" == "True" ]]; then
+ sudo pip uninstall -y ovs
+ # Clone the OVS repository if it's not yet present
+ clone_repository $OVS_REPO $DEST/$OVS_REPO_NAME $OVS_BRANCH
+ sudo pip install -e $DEST/$OVS_REPO_NAME/python
+ fi
+}
+
+# filter_network_api_extensions() - Remove non-supported API extensions by
+# the OVN driver from the list of enabled API extensions
+function filter_network_api_extensions {
+ SUPPORTED_NETWORK_API_EXTENSIONS=$($PYTHON -c \
+ 'from neutron.common.ovn import extensions ;\
+ print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS))')
+ SUPPORTED_NETWORK_API_EXTENSIONS=$SUPPORTED_NETWORK_API_EXTENSIONS,$($PYTHON -c \
+ 'from neutron.common.ovn import extensions ;\
+ print(",".join(extensions.ML2_SUPPORTED_API_EXTENSIONS_OVN_L3))')
+ if is_service_enabled q-qos neutron-qos ; then
+ SUPPORTED_NETWORK_API_EXTENSIONS="$SUPPORTED_NETWORK_API_EXTENSIONS,qos"
+ fi
+ NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-$SUPPORTED_NETWORK_API_EXTENSIONS}
+ extensions=$(echo $NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u)
+ supported_ext=$(echo $SUPPORTED_NETWORK_API_EXTENSIONS | tr ', ' '\n' | sort -u)
+ enabled_ext=$(comm -12 <(echo -e "$extensions") <(echo -e "$supported_ext"))
+ disabled_ext=$(comm -3 <(echo -e "$extensions") <(echo -e "$enabled_ext"))
+
+ # Log a message in case some extensions had to be disabled because
+ # they are not supported by the OVN driver
+ if [ ! -z "$disabled_ext" ]; then
+ _disabled=$(echo $disabled_ext | tr ' ' ',')
+ echo "The folling network API extensions have been disabled because they are not supported by OVN: $_disabled"
+ fi
+
+ # Export the final list of extensions that have been enabled and are
+ # supported by OVN
+ export NETWORK_API_EXTENSIONS=$(echo $enabled_ext | tr ' ' ',')
+}
+
+function configure_ovn_plugin {
+ echo "Configuring Neutron for OVN"
+
+ if is_service_enabled q-svc ; then
+ filter_network_api_extensions
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE"
+ if is_service_enabled tls-proxy; then
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_ca_cert="$INT_CA_DIR/ca-chain.pem"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_ca_cert="$INT_CA_DIR/ca-chain.pem"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_certificate="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_private_key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+ fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP"
+ inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver
+
+ if is_service_enabled q-log neutron-log; then
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log rate_limit="$Q_LOG_DRIVER_RATE_LIMIT"
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE network_log burst_limit="$Q_LOG_DRIVER_BURST_LIMIT"
+ inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
+ fi
+
+ if is_service_enabled q-ovn-metadata-agent; then
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
+ else
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
+ fi
+
+ if is_service_enabled q-dns neutron-dns ; then
+ iniset $NEUTRON_CONF DEFAULT dns_domain openstackgate.local
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn dns_servers="$OVN_DNS_SERVERS"
+ fi
+
+ iniset $NEUTRON_CONF ovs igmp_snooping_enable $OVN_IGMP_SNOOPING_ENABLE
+ fi
+
+ if is_service_enabled q-dhcp neutron-dhcp ; then
+ iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification True
+ else
+ iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False
+ fi
+
+ if is_service_enabled n-api-meta ; then
+ if is_service_enabled q-ovn-metadata-agent ; then
+ iniset $NOVA_CONF neutron service_metadata_proxy True
+ fi
+ fi
+}
+
+function configure_ovn {
+ echo "Configuring OVN"
+
+ if [ -z "$OVN_UUID" ] ; then
+ if [ -f $OVS_SYSCONFDIR/system-id.conf ]; then
+ OVN_UUID=$(cat $OVS_SYSCONFDIR/system-id.conf)
+ else
+ OVN_UUID=$(uuidgen)
+ echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
+ fi
+ else
+ local ovs_uuid
+ ovs_uuid=$(cat $OVS_SYSCONFDIR/system-id.conf)
+ if [ "$ovs_uuid" != $OVN_UUID ]; then
+ echo $OVN_UUID | sudo tee $OVS_SYSCONFDIR/system-id.conf
+ fi
+ fi
+
+ # Erase the pre-set configurations from packages. DevStack will
+ # configure OVS and OVN accordingly for its use.
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "False" ]] && is_fedora; then
+ sudo truncate -s 0 /etc/openvswitch/default.conf
+ sudo truncate -s 0 /etc/sysconfig/openvswitch
+ sudo truncate -s 0 /etc/sysconfig/ovn
+ fi
+
+ # Metadata
+ if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then
+ sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
+
+ mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
+ (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+ cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF
+ configure_root_helper_options $OVN_META_CONF
+
+ iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
+ iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
+ iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
+ iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
+ iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
+ if is_service_enabled tls-proxy; then
+ iniset $OVN_META_CONF ovn \
+ ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem
+ iniset $OVN_META_CONF ovn \
+ ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt
+ iniset $OVN_META_CONF ovn \
+ ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key
+ fi
+ fi
+}
+
+function init_ovn {
+ # clean up from previous (possibly aborted) runs
+ # create required data files
+
+ # Assumption: this is a dedicated test system and there is nothing important
+ # in the ovn, ovn-nb, or ovs databases. We're going to trash them and
+ # create new ones on each devstack run.
+
+ _disable_libvirt_apparmor
+
+ mkdir -p $OVN_DATADIR
+ mkdir -p $OVS_DATADIR
+
+ rm -f $OVS_DATADIR/*.db
+ rm -f $OVS_DATADIR/.*.db.~lock~
+ rm -f $OVN_DATADIR/*.db
+ rm -f $OVN_DATADIR/.*.db.~lock~
+}
+
+function _start_ovs {
+ echo "Starting OVS"
+ if is_service_enabled ovn-controller ovn-controller-vtep ovn-northd; then
+ # ovsdb-server and ovs-vswitchd are used privately in OVN as openvswitch service names.
+ enable_service ovsdb-server
+ enable_service ovs-vswitchd
+
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ if [ ! -f $OVS_DATADIR/conf.db ]; then
+ ovsdb-tool create $OVS_DATADIR/conf.db $OVS_SHAREDIR/vswitch.ovsschema
+ fi
+
+ if is_service_enabled ovn-controller-vtep; then
+ if [ ! -f $OVS_DATADIR/vtep.db ]; then
+ ovsdb-tool create $OVS_DATADIR/vtep.db $OVS_SHAREDIR/vtep.ovsschema
+ fi
+ fi
+
+ local dbcmd="$OVS_SBINDIR/ovsdb-server --remote=punix:$OVS_RUNDIR/db.sock --remote=ptcp:6640:$OVSDB_SERVER_LOCAL_HOST --pidfile --detach --log-file"
+ dbcmd+=" --remote=db:Open_vSwitch,Open_vSwitch,manager_options"
+ if is_service_enabled ovn-controller-vtep; then
+ dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
+ fi
+ dbcmd+=" $OVS_DATADIR/conf.db"
+ _run_process ovsdb-server "$dbcmd"
+
+ # Note: ovn-controller will create and configure br-int once it is started.
+ # So, no need to create it now because nothing depends on that bridge here.
+ local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
+ _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+ else
+ _start_process "$OVSDB_SERVER_SERVICE"
+ _start_process "$OVS_VSWITCHD_SERVICE"
+ fi
+
+ echo "Configuring OVSDB"
+ if is_service_enabled tls-proxy; then
+ sudo ovs-vsctl --no-wait set-ssl \
+ $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key \
+ $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt \
+ $INT_CA_DIR/ca-chain.pem
+ fi
+
+ sudo ovs-vsctl --no-wait set-manager ptcp:6640:$OVSDB_SERVER_LOCAL_HOST
+ sudo ovs-vsctl --no-wait set open_vswitch . system-type="devstack"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:system-id="$OVN_UUID"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+ # Select this chassis to host gateway routers
+ if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
+ sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
+ fi
+
+ if is_provider_network || [[ $Q_USE_PROVIDERNET_FOR_PUBLIC == "True" ]]; then
+ ovn_base_setup_bridge $OVS_PHYSICAL_BRIDGE
+ sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${PHYSICAL_NETWORK}:${OVS_PHYSICAL_BRIDGE}
+ fi
+
+ if is_service_enabled ovn-controller-vtep ; then
+ ovn_base_setup_bridge br-v
+ vtep-ctl add-ps br-v
+ vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+
+ enable_service ovs-vtep
+ local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
+ _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root"
+
+ vtep-ctl set-manager tcp:$HOST_IP:6640
+ fi
+ fi
+}
+
+function _start_ovn_services {
+ _start_process "$OVSDB_SERVER_SERVICE"
+ _start_process "$OVS_VSWITCHD_SERVICE"
+
+ if is_service_enabled ovn-northd ; then
+ _start_process "$OVN_NORTHD_SERVICE"
+ fi
+ if is_service_enabled ovn-controller ; then
+ _start_process "$OVN_CONTROLLER_SERVICE"
+ fi
+ if is_service_enabled ovn-controller-vtep ; then
+ _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+ fi
+ if is_service_enabled ovs-vtep ; then
+ _start_process "devstack@ovs-vtep.service"
+ fi
+ if is_service_enabled q-ovn-metadata-agent; then
+ _start_process "devstack@q-ovn-metadata-agent.service"
+ fi
+}
+
+# start_ovn() - Start running processes, including screen
+function start_ovn {
+ echo "Starting OVN"
+
+ _start_ovs
+
+ local SCRIPTDIR=$OVN_SCRIPTDIR
+ if ! use_new_ovn_repository; then
+ SCRIPTDIR=$OVS_SCRIPTDIR
+ fi
+
+ if is_service_enabled ovn-northd ; then
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
+ local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
+
+ _run_process ovn-northd "$cmd" "$stop_cmd"
+ else
+ _start_process "$OVN_NORTHD_SERVICE"
+ fi
+
+ # Wait for the service to be ready
+ wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
+ wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+
+ if is_service_enabled tls-proxy; then
+ sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+ sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+ fi
+ sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+ sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+ sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+ sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+ fi
+
+ if is_service_enabled ovn-controller ; then
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
+ local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
+
+ _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+ else
+ _start_process "$OVN_CONTROLLER_SERVICE"
+ fi
+ fi
+
+ if is_service_enabled ovn-controller-vtep ; then
+ if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
+ local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
+ _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+ else
+ _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
+ fi
+ fi
+
+ if is_service_enabled q-ovn-metadata-agent; then
+ run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF"
+ # Format logging
+ setup_logging $OVN_META_CONF
+ fi
+
+ _start_ovn_services
+}
+
+function _stop_ovs_dp {
+ sudo ovs-dpctl dump-dps | sudo xargs -n1 ovs-dpctl del-dp
+ modprobe -q -r vport_geneve vport_vxlan openvswitch || true
+}
+
+function _stop_process {
+ local service=$1
+ echo "Stopping process $service"
+ if $SYSTEMCTL is-enabled $service; then
+ $SYSTEMCTL stop $service
+ $SYSTEMCTL disable $service
+ fi
+}
+
+function stop_ovn {
+ if is_service_enabled q-ovn-metadata-agent; then
+ sudo pkill -9 -f haproxy || :
+ _stop_process "devstack@q-ovn-metadata-agent.service"
+ fi
+ if is_service_enabled ovn-controller-vtep ; then
+ _stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
+ fi
+ if is_service_enabled ovn-controller ; then
+ _stop_process "$OVN_CONTROLLER_SERVICE"
+ fi
+ if is_service_enabled ovn-northd ; then
+ _stop_process "$OVN_NORTHD_SERVICE"
+ fi
+ if is_service_enabled ovs-vtep ; then
+ _stop_process "devstack@ovs-vtep.service"
+ fi
+
+ _stop_process "$OVS_VSWITCHD_SERVICE"
+ _stop_process "$OVSDB_SERVER_SERVICE"
+
+ _stop_ovs_dp
+}
+
+function _cleanup {
+ local path=${1:-$DEST/$OVN_REPO_NAME}
+ pushd $path
+ cd $path
+ sudo make uninstall
+ sudo make distclean
+ popd
+}
+
+# cleanup_ovn() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ovn {
+ local ovn_path=$DEST/$OVN_REPO_NAME
+ local ovs_path=$DEST/$OVS_REPO_NAME
+
+ if [ -d $ovn_path ]; then
+ _cleanup $ovn_path
+ fi
+
+ if [ -d $ovs_path ]; then
+ _cleanup $ovs_path
+ fi
+
+ sudo rm -f $OVN_RUNDIR
+}
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 69536bb..75a3567 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -101,7 +101,6 @@
SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
-die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
diff --git a/lib/nova b/lib/nova
index c41f881..930529a 100644
--- a/lib/nova
+++ b/lib/nova
@@ -83,6 +83,11 @@
# services and the compute node
NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False}
+# Validate configuration
+if ! is_service_enabled tls-proxy && [ "$NOVA_CONSOLE_PROXY_COMPUTE_TLS" == "True" ]; then
+ die $LINENO "enabling TLS for the console proxy requires the tls-proxy service"
+fi
+
# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
@@ -96,10 +101,6 @@
# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
-# Nova supports pluggable schedulers. The default ``FilterScheduler``
-# should work in most cases.
-SCHEDULER=${SCHEDULER:-filter_scheduler}
-
# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
# the default filters.
NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
@@ -139,7 +140,7 @@
# ``NOVA_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
# user token while communicating to external RESP API's like Neutron, Cinder
# and Glance.
-NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
+NOVA_USE_SERVICE_TOKEN=$(trueorfalse True NOVA_USE_SERVICE_TOKEN)
# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
# where there are at least two nova-computes.
@@ -259,6 +260,7 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
+ LIBVIRT_CPU_MODE=none
if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
@@ -296,13 +298,9 @@
fi
fi
- if is_fedora && [[ $DISTRO =~ f[0-9][0-9] ]]; then
- # There is an iscsi-initiator bug where it inserts
- # different whitespace that causes a bunch of output
- # matching to fail. We have not been able to get
- # fixed, yet :/ Exists in fedora 29 & 30 at least
- # https://bugzilla.redhat.com/show_bug.cgi?id=1676365
- sudo dnf copr enable -y iwienand/iscsi-initiator-utils
+ if is_fedora && [[ $DISTRO =~ f31] ]]; then
+ # For f31 use the rebased 2.1.0 version of the package.
+ sudo dnf copr enable -y lyarwood/iscsi-initiator-utils
sudo dnf update -y
fi
@@ -401,11 +399,8 @@
fi
iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI"
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
- iniset $NOVA_CONF scheduler driver "$SCHEDULER"
iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
- if [[ $SCHEDULER == "filter_scheduler" ]]; then
- iniset $NOVA_CONF scheduler workers "$API_WORKERS"
- fi
+ iniset $NOVA_CONF scheduler workers "$API_WORKERS"
iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
if [[ $SERVICE_IP_VERSION == 6 ]]; then
iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
@@ -468,11 +463,7 @@
fi
if is_service_enabled cinder; then
- if is_service_enabled tls-proxy; then
- CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
- CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
- iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
- fi
+ configure_cinder_access
fi
if [ -n "$NOVA_STATE_PATH" ]; then
@@ -520,8 +511,6 @@
# don't let the conductor get out of control now that we're using a pure python db driver
iniset $NOVA_CONF conductor workers "$API_WORKERS"
- iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
-
if is_service_enabled tls-proxy; then
iniset $NOVA_CONF DEFAULT glance_protocol https
iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -593,6 +582,29 @@
iniset $conf placement region_name "$REGION_NAME"
}
+# Configure access to cinder.
+function configure_cinder_access {
+ iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
+ iniset $NOVA_CONF cinder auth_type "password"
+ iniset $NOVA_CONF cinder auth_url "$KEYSTONE_SERVICE_URI"
+ # NOTE(mriedem): This looks a bit weird but we use the nova user here
+ # since it has the admin role and the cinder user does not. This is
+ # similar to using the nova user in init_nova_service_user_conf. We need
+ # to use a user with the admin role for background tasks in nova to
+ # be able to GET block-storage API resources owned by another project
+ # since cinder has low-level "is_admin" checks in its DB API.
+ iniset $NOVA_CONF cinder username nova
+ iniset $NOVA_CONF cinder password "$SERVICE_PASSWORD"
+ iniset $NOVA_CONF cinder user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $NOVA_CONF cinder project_name "$SERVICE_TENANT_NAME"
+ iniset $NOVA_CONF cinder project_domain_name "$SERVICE_DOMAIN_NAME"
+ if is_service_enabled tls-proxy; then
+ CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+ CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+ iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
+ fi
+}
+
function configure_console_compute {
# If we are running multiple cells (and thus multiple console proxies) on a
# single host, we offset the ports to avoid collisions. We need to
@@ -600,10 +612,10 @@
# can use the NOVA_CPU_CELL variable to know which cell we are for
# calculating the offset.
# Stagger the offset based on the total number of possible console proxies
- # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+ # (novnc, spice, serial) so that their ports will not collide if
# all are enabled.
local offset
- offset=$(((NOVA_CPU_CELL - 1) * 4))
+ offset=$(((NOVA_CPU_CELL - 1) * 3))
# Use the host IP instead of the service host because for multi-node, the
# service host will be the controller only.
@@ -611,7 +623,7 @@
default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip)
# All nova-compute workers need to know the vnc configuration options
- # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+ # These settings don't hurt anything if n-novnc is disabled
if is_service_enabled n-cpu; then
if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then
# Use the old URL when installing novnc packages.
@@ -624,13 +636,11 @@
NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"}
fi
iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
- XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"}
- iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
- SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"}
+ SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/spice_auto.html"}
iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
fi
- if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+ if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
# Address on which instance vncservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
@@ -653,7 +663,7 @@
if is_service_enabled n-sproxy; then
iniset $NOVA_CPU_CONF serial_console enabled True
- iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/"
+ iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6082 + offset))/"
fi
}
@@ -662,15 +672,13 @@
local conf=${1:-$NOVA_CONF}
local offset=${2:-0}
# Stagger the offset based on the total number of possible console proxies
- # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+ # (novnc, spice, serial) so that their ports will not collide if
# all are enabled.
- offset=$((offset * 4))
+ offset=$((offset * 3))
- if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
+ if is_service_enabled n-novnc || [ "$NOVA_VNC_ENABLED" != False ]; then
iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $conf vnc novncproxy_port $((6080 + offset))
- iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
- iniset $conf vnc xvpvncproxy_port $((6081 + offset))
if is_nova_console_proxy_compute_tls_enabled ; then
iniset $conf vnc auth_schemes "vencrypt"
@@ -702,12 +710,12 @@
if is_service_enabled n-spice; then
iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
- iniset $conf spice html5proxy_port $((6082 + offset))
+ iniset $conf spice html5proxy_port $((6081 + offset))
fi
if is_service_enabled n-sproxy; then
iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
- iniset $conf serial_console serialproxy_port $((6083 + offset))
+ iniset $conf serial_console serialproxy_port $((6082 + offset))
fi
}
@@ -734,30 +742,50 @@
sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys
}
+function init_nova_db {
+ local dbname="$1"
+ local conffile="$2"
+ recreate_database $dbname
+ $NOVA_BIN_DIR/nova-manage --config-file $conffile db sync --local_cell
+}
+
# init_nova() - Initialize databases, etc.
function init_nova {
# All nova components talk to a central database.
# Only do this step once on the API node for an entire cluster.
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
+ # (Re)create nova databases
+ if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+ # If we are doing singleconductor mode, we have some strange
+ # interdependencies. in that the main config refers to cell1
+ # instead of cell0. In that case, just make sure the cell0 database
+ # is created before we need it below, but don't db_sync it until
+ # after the cellN databases are there.
+ recreate_database nova_cell0
+ else
+ async_run nova-cell-0 init_nova_db nova_cell0 $NOVA_CONF
+ fi
+
+ for i in $(seq 1 $NOVA_NUM_CELLS); do
+ async_run nova-cell-$i init_nova_db nova_cell${i} $(conductor_conf $i)
+ done
+
recreate_database $NOVA_API_DB
$NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
- recreate_database nova_cell0
-
# map_cell0 will create the cell mapping record in the nova_api DB so
- # this needs to come after the api_db sync happens. We also want to run
- # this before the db sync below since that will migrate both the nova
- # and nova_cell0 databases.
+ # this needs to come after the api_db sync happens.
$NOVA_BIN_DIR/nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
- # (Re)create nova databases
- for i in $(seq 1 $NOVA_NUM_CELLS); do
- recreate_database nova_cell${i}
- $NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync --local_cell
+ # Wait for DBs to finish from above
+ for i in $(seq 0 $NOVA_NUM_CELLS); do
+ async_wait nova-cell-$i
done
- # Migrate nova and nova_cell0 databases.
- $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
+ if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+ # We didn't db sync cell0 above, so run it now
+ $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
+ fi
# Run online migrations on the new databases
# Needed for flavor conversion
@@ -850,7 +878,7 @@
start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
fi
else
- run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
+ run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
fi
@@ -899,6 +927,11 @@
# by the compute process.
configure_console_compute
+ # Configure the OVSDB connection for os-vif
+ if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
+ iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
+ fi
+
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# ``sg`` is used in run_process to execute nova-compute as a member of the
@@ -941,7 +974,7 @@
if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
else
- run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
+ run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
fi
export PATH=$old_path
@@ -949,7 +982,7 @@
function enable_nova_console_proxies {
for i in $(seq 1 $NOVA_NUM_CELLS); do
- for srv in n-novnc n-xvnc n-spice n-sproxy; do
+ for srv in n-novnc n-spice n-sproxy; do
if is_service_enabled $srv; then
enable_service ${srv}-cell${i}
fi
@@ -967,7 +1000,6 @@
# console proxies run globally for singleconductor, else they run per cell
if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
- run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
run_process n-sproxy "$NOVA_BIN_DIR/nova-serialproxy --config-file $api_cell_conf"
else
@@ -976,7 +1008,6 @@
local conf
conf=$(conductor_conf $i)
run_process n-novnc-cell${i} "$NOVA_BIN_DIR/nova-novncproxy --config-file $conf --web $NOVNC_WEB_DIR"
- run_process n-xvnc-cell${i} "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $conf"
run_process n-spice-cell${i} "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $conf --web $SPICE_WEB_DIR"
run_process n-sproxy-cell${i} "$NOVA_BIN_DIR/nova-serialproxy --config-file $conf"
done
@@ -1021,14 +1052,6 @@
# happen between here and the script ending. However, in multinode
# tests this can very often not be the case. So ensure that the
# compute is up before we move on.
-
- # TODO(sdague): honestly, this probably should be a plug point for
- # an external system.
- if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
- # xenserver encodes information in the hostname of the compute
- # because of the dom0/domU split. Just ignore for now.
- return
- fi
wait_for_compute $NOVA_READY_TIMEOUT
}
@@ -1067,13 +1090,13 @@
function stop_nova_console_proxies {
if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
- for srv in n-novnc n-xvnc n-spice n-sproxy; do
+ for srv in n-novnc n-spice n-sproxy; do
stop_process $srv
done
else
enable_nova_console_proxies
for i in $(seq 1 $NOVA_NUM_CELLS); do
- for srv in n-novnc n-xvnc n-spice n-sproxy; do
+ for srv in n-novnc n-spice n-sproxy; do
stop_process ${srv}-cell${i}
done
done
@@ -1107,19 +1130,19 @@
if is_service_enabled n-api; then
if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then
# Note that danms hates these flavors and apologizes for sdague
- openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 cirros256
- openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M
- openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G
- openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G
- openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G
+ openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256
+ openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M
+ openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G
+ openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G
+ openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G
fi
if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then
- openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny
- openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small
- openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium
- openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large
- openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge
+ openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny
+ openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small
+ openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium
+ openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large
+ openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge
fi
fi
}
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 4639869..d3827c3 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -24,17 +24,14 @@
# Currently fairly specific to OpenStackCI hosts
DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS)
-# Only Xenial is left with libvirt-bin. Everywhere else is libvirtd
-if is_ubuntu && [ ${DISTRO} == "xenial" ]; then
- LIBVIRT_DAEMON=libvirt-bin
-else
- LIBVIRT_DAEMON=libvirtd
-fi
+# Enable the Fedora Virtualization Preview Copr repo that provides the latest
+# rawhide builds of QEMU, Libvirt and other virt tools.
+ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO)
# Enable coredumps for libvirt
# Bug: https://bugs.launchpad.net/nova/+bug/1643911
function _enable_coredump {
- local confdir=/etc/systemd/system/${LIBVIRT_DAEMON}.service.d
+ local confdir=/etc/systemd/system/libvirtd.service.d
local conffile=${confdir}/coredump.conf
# Create a coredump directory, and instruct the kernel to save to
@@ -61,11 +58,9 @@
function install_libvirt {
if is_ubuntu; then
- install_package qemu-system
- if [[ ${DISTRO} == "xenial" ]]; then
- install_package libvirt-bin libvirt-dev
- else
- install_package libvirt-clients libvirt-daemon-system libvirt-dev
+ install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev
+ if is_arch "aarch64"; then
+ install_package qemu-efi
fi
# uninstall in case the libvirt version changed
pip_uninstall libvirt-python
@@ -73,6 +68,12 @@
#pip_install_gr <there-si-no-guestfs-in-pypi>
elif is_fedora || is_suse; then
+ # Optionally enable the virt-preview repo when on Fedora
+ if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then
+ # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/
+ sudo dnf copr enable -y @virtmaint-sig/virt-preview
+ fi
+
# Note that in CentOS/RHEL this needs to come from the RDO
# repositories (qemu-kvm-ev ... which provides this package)
# as the base system version is too old. We should have
@@ -80,6 +81,10 @@
install_package qemu-kvm
install_package libvirt libvirt-devel
+ if is_arch "aarch64"; then
+ install_package edk2.git-aarch64
+ fi
+
pip_uninstall libvirt-python
pip_install_gr libvirt-python
fi
@@ -150,26 +155,24 @@
fi
if is_nova_console_proxy_compute_tls_enabled ; then
- if is_service_enabled n-novnc ; then
- echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
- echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
+ echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
+ echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
- sudo mkdir -p /etc/pki/libvirt-vnc
- deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
- deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
- # OpenSSL 1.1.0 generates the key file with permissions: 600, by
- # default and the deploy_int* methods use 'sudo cp' to copy the
- # files, making them owned by root:root.
- # Change ownership of everything under /etc/pki/libvirt-vnc to
- # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key
- # file.
- sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
- fi
+ sudo mkdir -p /etc/pki/libvirt-vnc
+ deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
+ deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
+ # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+ # default and the deploy_int* methods use 'sudo cp' to copy the
+ # files, making them owned by root:root.
+ # Change ownership of everything under /etc/pki/libvirt-vnc to
+ # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key
+ # file.
+ sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
fi
# Service needs to be started on redhat/fedora -- do a restart for
# sanity after fiddling the config.
- restart_service $LIBVIRT_DAEMON
+ restart_service libvirtd
# Restart virtlogd companion service to ensure it is running properly
# https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index adcc278..bda6ef6 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -39,16 +39,14 @@
if ! is_ironic_hardware; then
configure_libvirt
fi
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
- iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
# ironic section
iniset $NOVA_CONF ironic auth_type password
iniset $NOVA_CONF ironic username admin
iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
- iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI
+ iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI
iniset $NOVA_CONF ironic project_domain_id default
iniset $NOVA_CONF ironic user_domain_id default
iniset $NOVA_CONF ironic project_name demo
@@ -70,13 +68,6 @@
return
fi
install_libvirt
- if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] && is_ubuntu; then
- # Ubuntu packaging+apparmor issue prevents libvirt from loading
- # the ROM from /usr/share/misc. Workaround by installing it directly
- # to a directory that it can read from. (LP: #1393548)
- sudo rm -rf /usr/share/qemu/sgabios.bin
- sudo cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
- fi
}
# start_nova_hypervisor - Start any required external services
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 3d676b9..321775d 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -39,14 +39,12 @@
function configure_nova_hypervisor {
configure_libvirt
iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
- iniset $NOVA_CONF libvirt cpu_mode "none"
+ iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
# Do not enable USB tablet input devices to avoid QEMU CPU overhead.
iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
# Power architecture currently does not support graphical consoles.
if is_arch "ppc64"; then
iniset $NOVA_CONF vnc enabled "false"
@@ -54,8 +52,6 @@
# arm64-specific configuration
if is_arch "aarch64"; then
- # arm64 architecture currently does not support graphical consoles.
- iniset $NOVA_CONF vnc enabled "false"
iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
fi
@@ -104,7 +100,7 @@
if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
if is_ubuntu; then
- install_package python-guestfs
+ install_package python3-guestfs
# NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725)
INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)"
@@ -119,7 +115,7 @@
# Workaround for missing dependencies in python-libguestfs
install_package python-libguestfs guestfs-data augeas augeas-lenses
elif is_fedora; then
- install_package python-libguestfs
+ install_package python3-libguestfs
fi
fi
}
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index 58ab5c1..57dc45c 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -38,8 +38,6 @@
function configure_nova_hypervisor {
iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
iniset $NOVA_CONF DEFAULT connection_type "openvz"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
}
# install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
deleted file mode 100644
index ccab18d..0000000
--- a/lib/nova_plugins/hypervisor-xenserver
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/bin/bash
-#
-# lib/nova_plugins/hypervisor-xenserver
-# Configure the XenServer hypervisor
-
-# Enable with:
-# VIRT_DRIVER=xenserver
-
-# Dependencies:
-# ``functions`` file
-# ``nova`` configuration
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-_XTRACE_XENSERVER=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
- # This function intentionally left blank
- :
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
- if [ -z "$XENAPI_CONNECTION_URL" ]; then
- die $LINENO "XENAPI_CONNECTION_URL is not specified"
- fi
-
- # Check os-xenapi plugin is enabled
- local plugins="${DEVSTACK_PLUGINS}"
- local plugin
- local found=0
- for plugin in ${plugins//,/ }; do
- if [[ "$plugin" = "os-xenapi" ]]; then
- found=1
- break
- fi
- done
- if [[ $found -ne 1 ]]; then
- die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf"
- fi
-
- iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
- iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL"
- iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER"
- iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD"
- iniset $NOVA_CONF DEFAULT flat_injected "False"
- # Need to avoid crash due to new firewall support
- XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
-
- local dom0_ip
- dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
-
- local ssh_dom0
- ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
-
- # install console logrotate script
- tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
- $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest'
-
- # Create a cron job that will rotate guest logs
- $ssh_dom0 crontab - << CRONTAB
-* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1
-CRONTAB
-
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
- # xenapi functionality is now included in os-xenapi library which houses the plugin
- # so this function intentionally left blank
- :
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
- # This function intentionally left blank
- :
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
- # This function intentionally left blank
- :
-}
-
-
-# Restore xtrace
-$_XTRACE_XENSERVER
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/placement b/lib/placement
index 785b0dd..b779866 100644
--- a/lib/placement
+++ b/lib/placement
@@ -144,11 +144,10 @@
# start_placement_api() - Start the API processes ahead of other things
function start_placement_api {
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
+ run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
else
enable_apache_site placement-api
restart_apache_server
- tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
fi
echo "Waiting for placement-api to start..."
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 1c7c82f..743b4ae 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -66,7 +66,12 @@
sudo systemctl restart epmd.socket epmd.service
fi
if is_fedora || is_suse; then
- sudo systemctl enable rabbitmq-server
+ # NOTE(jangutter): If rabbitmq is not running (as in a fresh
+ # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with
+ # socket activation. This fails the first time and does not get
+ # cleared. It is benign, but the workaround is to start rabbitmq a
+ # bit earlier for RPM based distros.
+ sudo systemctl --now enable rabbitmq-server
fi
fi
}
diff --git a/lib/swift b/lib/swift
index 5be9e35..790fb99 100644
--- a/lib/swift
+++ b/lib/swift
@@ -428,10 +428,13 @@
swift_pipeline+=" s3api"
fi
if is_service_enabled keystone; then
+ swift_pipeline+=" authtoken"
if is_service_enabled s3api;then
swift_pipeline+=" s3token"
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
fi
- swift_pipeline+=" authtoken keystoneauth"
+ swift_pipeline+=" keystoneauth"
fi
swift_pipeline+=" tempauth "
@@ -524,7 +527,7 @@
else
iniset ${testfile} func_test auth_port 80
fi
- iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI}
+ iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI}
if [[ "$auth_vers" == "3" ]]; then
iniset ${testfile} func_test auth_prefix /identity/v3/
else
@@ -738,7 +741,9 @@
function install_swift {
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
- setup_develop $SWIFT_DIR
+ # keystonemiddleware needs to be installed via keystone extras as defined
+ # in setup.cfg, see bug #1909018 for more details.
+ setup_develop $SWIFT_DIR keystone
if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
install_apache_wsgi
fi
diff --git a/lib/tempest b/lib/tempest
index 96c9ced..29a6229 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -27,6 +27,7 @@
# - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
# - ``DEFAULT_INSTANCE_TYPE``
# - ``DEFAULT_INSTANCE_USER``
+# - ``DEFAULT_INSTANCE_ALT_USER``
# - ``CINDER_ENABLED_BACKENDS``
# - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
#
@@ -107,7 +108,22 @@
function image_size_in_gib {
local size
size=$(openstack image show $1 -c size -f value)
- echo $size | python -c "import math; import six; print(int(math.ceil(float(int(six.moves.input()) / 1024.0 ** 3))))"
+ echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
+}
+
+function set_tempest_venv_constraints {
+ local tmp_c
+ tmp_c=$1
+ if [[ $TEMPEST_VENV_UPPER_CONSTRAINTS == "master" ]]; then
+ (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_c
+ else
+ echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
+ cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
+ # NOTE: setting both tox env var and once Tempest start using new var
+ # TOX_CONSTRAINTS_FILE then we can remove the old one.
+ export UPPER_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+ export TOX_CONSTRAINTS_FILE=$TEMPEST_VENV_UPPER_CONSTRAINTS
+ fi
}
# configure_tempest() - Set config files, create data dirs, etc
@@ -203,13 +219,13 @@
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
# Determine the flavor disk size based on the image size.
disk=$(image_size_in_gib $image_uuid)
- openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 m1.nano
+ openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
fi
flavor_ref=42
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
# Determine the alt flavor disk size based on the alt image size.
disk=$(image_size_in_gib $image_uuid_alt)
- openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 m1.micro
+ openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
fi
flavor_ref_alt=84
else
@@ -336,19 +352,20 @@
# so remove this once Tempest no longer supports Pike.
iniset $TEMPEST_CONFIG identity-feature-enabled application_credentials True
+ # In Train and later, access rules for application credentials are enabled
+ # by default so remove this once Tempest no longer supports Stein.
+ iniset $TEMPEST_CONFIG identity-feature-enabled access_rules True
+
# Image
# We want to be able to override this variable in the gate to avoid
# doing an external HTTP fetch for this test.
if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
fi
- if [ "$VIRT_DRIVER" = "xenserver" ]; then
- iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
- fi
-
- # Image Features
- if [ "$GLANCE_V1_ENABLED" != "True" ]; then
- iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False
+ iniset $TEMPEST_CONFIG image-feature-enabled import_image $GLANCE_USE_IMPORT_WORKFLOW
+ iniset $TEMPEST_CONFIG image-feature-enabled os_glance_reserved True
+ if is_service_enabled g-api-r; then
+ iniset $TEMPEST_CONFIG image alternate_image_endpoint image_remote
fi
# Compute
@@ -424,17 +441,9 @@
iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
# Scenario
- if [ "$VIRT_DRIVER" = "xenserver" ]; then
- SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
- SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
- iniset $TEMPEST_CONFIG scenario img_disk_format vhd
- iniset $TEMPEST_CONFIG scenario img_container_format ovf
- else
- SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
- SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
- fi
- iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR
- iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE
+ SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+ SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
+ iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE
# If using provider networking, use the physical network for validation rather than private
TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
@@ -445,7 +454,8 @@
iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-True}
iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
- iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
+ iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:=cirros}
+ iniset $TEMPEST_CONFIG validation image_alt_ssh_user ${DEFAULT_INSTANCE_ALT_USER:-$DEFAULT_INSTANCE_USER}
iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
# Volume
@@ -472,6 +482,11 @@
TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
fi
iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
+ # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends
+ if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then
+ TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
+ fi
+ iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
# Reset microversions to None where v2 is running which does not support microversion.
@@ -570,17 +585,23 @@
iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
else
+ iniset $TEMPEST_CONFIG compute-feature-enabled shelve_migrate True
+ iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True
iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True
fi
fi
# ``service_available``
#
- # this tempest service list needs to be all the services that
- # tempest supports, otherwise we can have an erroneous set of
+ # this tempest service list needs to be the services that
+ # tempest own, otherwise we can have an erroneous set of
# defaults (something defaulting true in Tempest, but not listed here).
+ # services tested by tempest plugins needs to be set on service devstack
+ # plugin side as devstack cannot keep track of all the tempest plugins
+ # services. Refer Bug#1743688 for more details.
+ # 'horizon' is also kept here as no devtack plugin for horizon.
local service
- local tempest_services="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove"
+ local tempest_services="key,glance,nova,neutron,cinder,swift,horizon"
for service in ${tempest_services//,/ }; do
if is_service_enabled $service ; then
iniset $TEMPEST_CONFIG service_available $service "True"
@@ -606,17 +627,18 @@
tox -revenv-tempest --notest
fi
- # The requirements might be on a different branch, while tempest needs master requirements.
- (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
- tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt
+ local tmp_u_c_m
+ tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+ set_tempest_venv_constraints $tmp_u_c_m
+ tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
+ rm -f $tmp_u_c_m
# Auth:
- iniset $TEMPEST_CONFIG auth tempest_roles "member"
if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
- tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
+ tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
else
- tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
+ tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
fi
iniset $TEMPEST_CONFIG auth use_dynamic_credentials False
iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
@@ -683,12 +705,25 @@
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
pip_install 'tox!=2.8.0'
pushd $TEMPEST_DIR
+ # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
+ # is tag name not master. git_clone would not checkout tag because
+ # TEMPEST_DIR already exist until RECLONE is true.
+ git checkout $TEMPEST_BRANCH
+
+ local tmp_u_c_m
+ tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+ set_tempest_venv_constraints $tmp_u_c_m
+
tox -r --notest -efull
+ # TODO: remove the trailing pip constraint when a proper fix
+ # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
+ $TEMPEST_DIR/.tox/tempest/bin/pip install -U -r $RC_DIR/tools/cap-pip.txt
# NOTE(mtreinish) Respect constraints in the tempest full venv, things that
# are using a tox job other than full will not be respecting constraints but
# running pip install -U on tempest requirements
- $TEMPEST_DIR/.tox/tempest/bin/pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
+ $TEMPEST_DIR/.tox/tempest/bin/pip install -c $tmp_u_c_m -r requirements.txt
PROJECT_VENV["tempest"]=${TEMPEST_DIR}/.tox/tempest
+ rm -f $tmp_u_c_m
popd
}
@@ -696,9 +731,11 @@
function install_tempest_plugins {
pushd $TEMPEST_DIR
if [[ $TEMPEST_PLUGINS != 0 ]] ; then
- # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements.
- (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
- tox -evenv-tempest -- pip install -c u-c-m.txt $TEMPEST_PLUGINS
+ local tmp_u_c_m
+ tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+ set_tempest_venv_constraints $tmp_u_c_m
+ tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS
+ rm -f $tmp_u_c_m
echo "Checking installed Tempest plugins:"
tox -evenv-tempest -- tempest list-plugins
fi
diff --git a/lib/tls b/lib/tls
index 65ffeb9..b3cc0b4 100644
--- a/lib/tls
+++ b/lib/tls
@@ -227,13 +227,7 @@
function init_cert {
if [[ ! -r $DEVSTACK_CERT ]]; then
if [[ -n "$TLS_IP" ]]; then
- if python3_enabled; then
- TLS_IP="IP:$TLS_IP"
- else
- # Lie to let incomplete match routines work with python2
- # see https://bugs.python.org/issue23239
- TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
- fi
+ TLS_IP="IP:$TLS_IP"
if [[ -n "$HOST_IPV6" ]]; then
TLS_IP="$TLS_IP,IP:$HOST_IPV6"
fi
@@ -255,7 +249,11 @@
if [ "$common_name" != "$SERVICE_HOST" ]; then
if is_ipv4_address "$SERVICE_HOST" ; then
- alt_names="$alt_names,IP:$SERVICE_HOST"
+ if [[ -z "$alt_names" ]]; then
+ alt_names="IP:$SERVICE_HOST"
+ else
+ alt_names="$alt_names,IP:$SERVICE_HOST"
+ fi
fi
fi
@@ -369,8 +367,7 @@
function fix_system_ca_bundle_path {
if is_service_enabled tls-proxy; then
local capath
- local python_cmd=${1:-python}
- capath=$($python_cmd -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+ capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
if is_fedora; then
@@ -573,14 +570,6 @@
restart_apache_server
}
-# Follow TLS proxy
-function follow_tls_proxy {
- sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
- tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
- sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
- tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
-}
-
# Cleanup Functions
# =================
diff --git a/openrc b/openrc
index 99d3351..beeaebe 100644
--- a/openrc
+++ b/openrc
@@ -87,9 +87,9 @@
# If you don't have a working .stackenv, this is the backup position
KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
-KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP}
+KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP}
-export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_AUTH_URI}
+export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI}
# Currently, in order to use openstackclient with Identity API v3,
# we need to set the domain which the user and project belong to.
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
index 60f365a..68cb1d8 100644
--- a/playbooks/pre.yaml
+++ b/playbooks/pre.yaml
@@ -19,14 +19,14 @@
{% endfor -%}
{{- mtus|min -}}
- name: Calculate external_bridge_mtu
- # 50 bytes is overhead for vxlan (which is greater than GRE
+ # 30 bytes is overhead for vxlan (which is greater than GRE
# allowing us to use either overlay option with this MTU.
+ # 40 bytes is overhead for IPv6, which will also support an IPv4 overlay.
# TODO(andreaf) This should work, but it may have to be reconcilied with
# the MTU setting used by the multinode setup roles in multinode pre.yaml
set_fact:
- external_bridge_mtu: "{{ local_mtu | int - 50 }}"
+ external_bridge_mtu: "{{ local_mtu | int - 30 - 40 }}"
roles:
- - test-matrix
- configure-swap
- setup-stack-user
- setup-tempest-user
diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml
index e85c2ee..e4043d8 100644
--- a/playbooks/tox/run-both.yaml
+++ b/playbooks/tox/run-both.yaml
@@ -7,4 +7,5 @@
bindep_dir: "{{ zuul_work_dir }}"
- test-setup
- ensure-tox
+ - get-devstack-os-environment
- tox
diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml
index 22f8209..0d065c6 100644
--- a/playbooks/tox/run.yaml
+++ b/playbooks/tox/run.yaml
@@ -1,3 +1,4 @@
- hosts: all
roles:
+ - get-devstack-os-environment
- tox
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index cbec444..db38b10 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -14,7 +14,7 @@
name=""
for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do
name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
- journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz
+ journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt
done
- name: Export legacy syslog.txt
@@ -29,7 +29,7 @@
-t sudo \
--no-pager \
--since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
- | gzip - > {{ stage_dir }}/logs/syslog.txt.gz
+ > {{ stage_dir }}/logs/syslog.txt
# TODO: convert this to ansible
# - make a list of the above units
@@ -45,7 +45,7 @@
cmd: |
journalctl -o export \
--since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
- | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
+ | gzip > {{ stage_dir }}/logs/devstack.journal.gz
- name: Save journal README
become: true
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
index 598eb7f..30519f6 100644
--- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -7,10 +7,10 @@
To use it, you will need to convert it so journalctl can read it
locally. After downloading the file:
- $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal
+ $ /lib/systemd/systemd-journal-remote <(zcat ./devstack.journal.gz) -o output.journal
Note this binary is not in the regular path. On Debian/Ubuntu
-platforms, you will need to have the "sytemd-journal-remote" package
+platforms, you will need to have the "systemd-journal-remote" package
installed.
It should result in something like:
diff --git a/roles/get-devstack-os-environment/README.rst b/roles/get-devstack-os-environment/README.rst
new file mode 100644
index 0000000..68ddce8
--- /dev/null
+++ b/roles/get-devstack-os-environment/README.rst
@@ -0,0 +1,40 @@
+Reads the OS_* variables set by devstack through openrc
+for the specified user and project and exports them as
+the os_env_vars fact.
+
+**WARNING**: this role is meant to be used as porting aid
+for the non-unified python-<service>client jobs which
+are already around, as those clients do not use clouds.yaml
+as openstackclient does.
+When those clients and their jobs are deprecated and removed,
+or anyway when the new code is able to read from clouds.yaml
+directly, this role should be removed as well.
+
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: openrc_file
+ :default: {{ devstack_base_dir }}/devstack/openrc
+
+ The location of the generated openrc file.
+
+.. zuul:rolevar:: openrc_user
+ :default: admin
+
+ The user whose credentials should be retrieved.
+
+.. zuul:rolevar:: openrc_project
+ :default: admin
+
+ The project (which openrc_user is part of) whose
+ access data should be retrieved.
+
+.. zuul:rolevar:: openrc_enable_export
+ :default: false
+
+ Set it to true to export os_env_vars.
diff --git a/roles/get-devstack-os-environment/defaults/main.yaml b/roles/get-devstack-os-environment/defaults/main.yaml
new file mode 100644
index 0000000..f68ea56
--- /dev/null
+++ b/roles/get-devstack-os-environment/defaults/main.yaml
@@ -0,0 +1,6 @@
+devstack_base_dir: "/opt/stack"
+openrc_file: "{{ devstack_base_dir }}/devstack/openrc"
+openrc_user: admin
+openrc_project: admin
+openrc_enable_export: false
+tox_environment: {}
diff --git a/roles/get-devstack-os-environment/tasks/main.yaml b/roles/get-devstack-os-environment/tasks/main.yaml
new file mode 100644
index 0000000..b2c5e93
--- /dev/null
+++ b/roles/get-devstack-os-environment/tasks/main.yaml
@@ -0,0 +1,14 @@
+- when: openrc_enable_export
+ block:
+ - name: Extract the OS_ environment variables
+ shell:
+ cmd: |
+ source {{ openrc_file }} {{ openrc_user }} {{ openrc_project }} &>/dev/null
+ env | awk -F= 'BEGIN {print "---" } /^OS_/ { print " "$1": \""$2"\""} '
+ args:
+ executable: "/bin/bash"
+ register: env_os
+
+ - name: Append the the OS_ environment variables to tox_environment
+ set_fact:
+ tox_environment: "{{ env_os.stdout|from_yaml|default({})|combine(tox_environment) }}"
diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml
index f747943..2b8ae01 100644
--- a/roles/orchestrate-devstack/tasks/main.yaml
+++ b/roles/orchestrate-devstack/tasks/main.yaml
@@ -18,6 +18,11 @@
name: sync-devstack-data
when: devstack_services['tls-proxy']|default(false)
+ - name: Sync controller ceph.conf and key rings to subnode
+ include_role:
+ name: sync-controller-ceph-conf-and-keys
+ when: devstack_plugins is defined and 'devstack-plugin-ceph' in devstack_plugins
+
- name: Run devstack on the sub-nodes
include_role:
name: run-devstack
diff --git a/roles/process-stackviz/README.rst b/roles/process-stackviz/README.rst
new file mode 100644
index 0000000..a8447d2
--- /dev/null
+++ b/roles/process-stackviz/README.rst
@@ -0,0 +1,22 @@
+Generate stackviz report.
+
+Generate stackviz report using subunit and dstat data, using
+the stackviz archive embedded in test images.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: stage_dir
+ :default: "{{ ansible_user_dir }}"
+
+ The stage directory where the input data can be found and
+ the output will be produced.
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ devstack_base_dir }}/tempest
+
+ Directory to work in. It has to be a fully qualified path.
diff --git a/roles/process-stackviz/defaults/main.yaml b/roles/process-stackviz/defaults/main.yaml
new file mode 100644
index 0000000..f3bc32b
--- /dev/null
+++ b/roles/process-stackviz/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
+zuul_work_dir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/process-stackviz/tasks/main.yaml b/roles/process-stackviz/tasks/main.yaml
new file mode 100644
index 0000000..3ba3d9c
--- /dev/null
+++ b/roles/process-stackviz/tasks/main.yaml
@@ -0,0 +1,73 @@
+- name: Process Stackviz
+ block:
+
+ - name: Devstack checks if stackviz archive exists
+ stat:
+ path: "/opt/cache/files/stackviz-latest.tar.gz"
+ register: stackviz_archive
+
+ - debug:
+ msg: "Stackviz archive could not be found in /opt/cache/files/stackviz-latest.tar.gz"
+ when: not stackviz_archive.stat.exists
+
+ - name: Check if subunit data exists
+ stat:
+ path: "{{ zuul_work_dir }}/testrepository.subunit"
+ register: subunit_input
+
+ - debug:
+ msg: "Subunit file could not be found at {{ zuul_work_dir }}/testrepository.subunit"
+ when: not subunit_input.stat.exists
+
+ - name: Install stackviz
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ block:
+ - include_role:
+ name: ensure-pip
+
+ - pip:
+ name: "file://{{ stackviz_archive.stat.path }}"
+ virtualenv: /tmp/stackviz
+ virtualenv_command: '{{ ensure_pip_virtualenv_command }}'
+ extra_args: -U
+
+ - name: Deploy stackviz static html+js
+ command: cp -pR /tmp/stackviz/share/stackviz-html {{ stage_dir }}/stackviz
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+ - name: Check if dstat data exists
+ stat:
+ path: "{{ devstack_base_dir }}/logs/dstat-csv.log"
+ register: dstat_input
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+
+ - name: Run stackviz with dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --dstat "{{ devstack_base_dir }}/logs/dstat-csv.log" \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - dstat_input.stat.exists
+
+ - name: Run stackviz without dstat
+ shell: |
+ cat {{ subunit_input.stat.path }} | \
+ /tmp/stackviz/bin/stackviz-export \
+ --env --stdin \
+ {{ stage_dir }}/stackviz/data
+ when:
+ - stackviz_archive.stat.exists
+ - subunit_input.stat.exists
+ - not dstat_input.stat.exists
+
+ ignore_errors: yes
diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml
index fea05c8..77a74d7 100644
--- a/roles/setup-devstack-source-dirs/defaults/main.yaml
+++ b/roles/setup-devstack-source-dirs/defaults/main.yaml
@@ -1 +1,9 @@
devstack_base_dir: /opt/stack
+devstack_source_dirs:
+ - src/opendev.org/opendev
+ - src/opendev.org/openstack
+ - src/opendev.org/openstack-dev
+ - src/opendev.org/openstack-infra
+ - src/opendev.org/starlingx
+ - src/opendev.org/x
+ - src/opendev.org/zuul
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index 160757e..294c29c 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -1,13 +1,6 @@
- name: Find all OpenStack source repos used by this job
find:
- paths:
- - src/opendev.org/opendev
- - src/opendev.org/openstack
- - src/opendev.org/openstack-dev
- - src/opendev.org/openstack-infra
- - src/opendev.org/starlingx
- - src/opendev.org/x
- - src/opendev.org/zuul
+ paths: "{{ devstack_source_dirs }}"
file_type: directory
register: found_repos
diff --git a/roles/sync-controller-ceph-conf-and-keys/README.rst b/roles/sync-controller-ceph-conf-and-keys/README.rst
new file mode 100644
index 0000000..e3d2bb4
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/README.rst
@@ -0,0 +1,3 @@
+Sync ceph config and keys between controller and subnodes
+
+Simply copy the contents of /etc/ceph on the controller to subnodes.
diff --git a/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
new file mode 100644
index 0000000..71ece57
--- /dev/null
+++ b/roles/sync-controller-ceph-conf-and-keys/tasks/main.yaml
@@ -0,0 +1,15 @@
+- name: Ensure /etc/ceph exists on subnode
+ become: true
+ file:
+ path: /etc/ceph
+ state: directory
+
+- name: Copy /etc/ceph from controller to subnode
+ become: true
+ synchronize:
+ owner: yes
+ group: yes
+ perms: yes
+ src: /etc/ceph/
+ dest: /etc/ceph/
+ delegate_to: controller
diff --git a/setup.cfg b/setup.cfg
index 4e27ad8..146f010 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,6 +10,3 @@
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
-
-[wheel]
-universal = 1
diff --git a/stack.sh b/stack.sh
index c652f65..163fc5b 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
# a multi-node developer install.
# To keep this script simple we assume you are running on a recent **Ubuntu**
-# (16.04 Xenial or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
# (7 or newer) machine. (It may work on other platforms but support for those
# platforms is left to those who added them to DevStack.) It should work in
# a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -96,19 +96,25 @@
# templates and other useful files in the ``files`` subdirectory
FILES=$TOP_DIR/files
if [ ! -d $FILES ]; then
- die $LINENO "missing devstack/files"
+ set +o xtrace
+ echo "missing devstack/files"
+ exit 1
fi
# ``stack.sh`` keeps function libraries here
# Make sure ``$TOP_DIR/inc`` directory is present
if [ ! -d $TOP_DIR/inc ]; then
- die $LINENO "missing devstack/inc"
+ set +o xtrace
+ echo "missing devstack/inc"
+ exit 1
fi
# ``stack.sh`` keeps project libraries here
# Make sure ``$TOP_DIR/lib`` directory is present
if [ ! -d $TOP_DIR/lib ]; then
- die $LINENO "missing devstack/lib"
+ set +o xtrace
+ echo "missing devstack/lib"
+ exit 1
fi
# Check if run in POSIX shell
@@ -221,7 +227,9 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then
+SUPPORTED_DISTROS="bionic|focal|f31|f32|opensuse-15.2|opensuse-tumbleweed|rhel8"
+
+if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -283,74 +291,20 @@
# to pick up required packages.
function _install_epel {
- # NOTE: We always remove and install latest -- some environments
- # use snapshot images, and if EPEL version updates they break
- # unless we update them to latest version.
- if sudo yum repolist enabled epel | grep -q 'epel'; then
- uninstall_package epel-release || true
- fi
+ # epel-release is in extras repo which is enabled by default
+ install_package epel-release
- # This trick installs the latest epel-release from a bootstrap
- # repo, then removes itself (as epel-release installed the
- # "real" repo).
- #
- # You would think that rather than this, you could use
- # $releasever directly in .repo file we create below. However
- # RHEL gives a $releasever of "6Server" which breaks the path;
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
- cat <<EOF | sudo tee /etc/yum.repos.d/epel-bootstrap.repo
-[epel-bootstrap]
-name=Bootstrap EPEL
-mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=\$basearch
-failovermethod=priority
-enabled=0
-gpgcheck=0
-EOF
- # Enable a bootstrap repo. It is removed after finishing
- # the epel-release installation.
- is_package_installed yum-utils || install_package yum-utils
- sudo yum-config-manager --enable epel-bootstrap
- yum_install epel-release || \
- die $LINENO "Error installing EPEL repo, cannot continue"
- sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
+ # RDO repos are not tested with epel and may have incompatibilities so
+ # let's limit the packages fetched from epel to the ones not in RDO repos.
+ sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel
}
function _install_rdo {
- # There are multiple options for this, including using CloudSIG
- # repositories (centos-release-*), trunk versions, etc. Since
- # we're not interested in the actual openstack distributions
- # (since we're using git to run!) but only peripherial packages
- # like kvm or ovs, this has been reliable.
-
- # TODO(ianw): figure out how to best mirror -- probably use infra
- # mirror RDO reverse proxy. We could either have test
- # infrastructure set it up disabled like EPEL, or fiddle it here.
- # Per the point above, it's a bunch of repos so starts getting a
- # little messy...
- if ! is_package_installed rdo-release ; then
- if [[ "$TARGET_BRANCH" == "master" ]]; then
- yum_install https://rdoproject.org/repos/rdo-release.rpm
- else
- # Get latest rdo-release-$rdo_release RPM package version
- rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
- yum_install https://rdoproject.org/repos/openstack-$rdo_release/rdo-release-$rdo_release.rpm
- fi
- fi
-
- # Also enable optional for RHEL7 proper. Note this is a silent
- # no-op on other platforms.
- sudo yum-config-manager --enable rhel-7-server-optional-rpms
-
- # Enable the Software Collections (SCL) repository for CentOS.
- # This repository includes useful software (e.g. the Go Toolset)
- # which is not present in the main repository.
- if [[ "$os_VENDOR" =~ (CentOS) ]]; then
- yum_install centos-release-scl
- fi
-
- if is_oraclelinux; then
- sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
- fi
+ # NOTE(ianw) 2020-04-30 : when we have future branches, we
+ # probably want to install the relevant branch RDO release as
+ # well. But for now it's all master.
+ sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ sudo dnf -y update
}
@@ -382,6 +336,9 @@
safe_chmod 0755 $DATA_DIR
fi
+# Create and/or clean the async state directory
+async_init
+
# Configure proper hostname
# Certain services such as rabbitmq require that the local hostname resolves
# correctly. Make sure it exists in /etc/hosts so that is always true.
@@ -395,15 +352,22 @@
# to speed things up
SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
-if [[ $DISTRO == "rhel7" ]]; then
+if [[ $DISTRO == "rhel8" ]]; then
# If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI
# node, where EPEL is installed (but disabled) and already
# pointing at our internal mirror
if [[ -f /etc/ci/mirror_info.sh ]]; then
SKIP_EPEL_INSTALL=True
- sudo yum-config-manager --enable epel
+ sudo dnf config-manager --set-enabled epel
fi
+ # PowerTools repo provides libyaml-devel required by devstack itself and
+ # EPEL packages assume that the PowerTools repository is enable.
+ sudo dnf config-manager --set-enabled PowerTools
+
+ # CentOS 8.3 changed the repository name to lower case.
+ sudo dnf config-manager --set-enabled powertools
+
if [[ ${SKIP_EPEL_INSTALL} != True ]]; then
_install_epel
fi
@@ -411,11 +375,17 @@
# available in RDO repositories (e.g. OVS, or later versions of
# kvm) to run.
_install_rdo
+
+ # NOTE(cgoncalves): workaround RHBZ#1154272
+ # dnf fails for non-privileged users when expired_repos.json doesn't exist.
+ # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
+ # Patch: https://github.com/rpm-software-management/dnf/pull/1448
+ echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
fi
# Ensure python is installed
# --------------------------
-is_package_installed python || install_package python
+install_python
# Configure Logging
@@ -494,14 +464,14 @@
_of_args="$_of_args --no-timestamp"
fi
# Set fd 1 and 2 to write the log file
- exec 1> >( $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
+ exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
# Set fd 6 to summary log file
- exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
else
# Set fd 1 and 2 to primary logfile
- exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
+ exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
# Set fd 6 to summary logfile and stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
fi
echo_summary "stack.sh log $LOGFILE"
@@ -518,7 +488,7 @@
exec 1>/dev/null 2>&1
fi
# Always send summary fd to original stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 )
fi
# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
@@ -554,9 +524,9 @@
generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
fi
if [[ -z $LOGDIR ]]; then
- $TOP_DIR/tools/worlddump.py
+ ${PYTHON} $TOP_DIR/tools/worlddump.py
else
- $TOP_DIR/tools/worlddump.py -d $LOGDIR
+ ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR
fi
else
# If we error before we've installed os-testr, this will fail.
@@ -695,11 +665,14 @@
# Database Configuration
# ----------------------
-# DevStack provides a MySQL database backend. Additional backends may be
-# provided by external plugins and can be enabled using the usual service
-# functions and ``ENABLED_SERVICES``. For example, to disable MySQL:
+# To select between database backends, add the following to ``local.conf``:
#
# disable_service mysql
+# enable_service postgresql
+#
+# The available database backends are listed in ``DATABASE_BACKENDS`` after
+# ``lib/database`` is sourced. ``mysql`` is the default.
+
if initialize_database_backends; then
echo "Using $DATABASE_TYPE database backend"
# Last chance for the database password. This must be handled here
@@ -745,16 +718,6 @@
fi
-# Nova
-# -----
-
-if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
- # Look for the backend password here because read_password
- # is not a library function.
- read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
-fi
-
-
# Swift
# -----
@@ -793,19 +756,6 @@
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
fi
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
-
-# Install Python packages into a virtualenv so that we can track them
-if [[ $TRACK_DEPENDS = True ]]; then
- echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
- pip_install -U virtualenv
-
- rm -rf $DEST/.venv
- virtualenv --system-site-packages $DEST/.venv
- source $DEST/.venv/bin/activate
- $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
-fi
-
# Do the ugly hacks for broken packages and distros
source $TOP_DIR/tools/fixup_stuff.sh
fixup_all
@@ -813,13 +763,10 @@
# Install subunit for the subunit output stream
pip_install -U os-testr
-if [[ "$USE_SYSTEMD" == "True" ]]; then
- pip_install_gr systemd-python
- # the default rate limit of 1000 messages / 30 seconds is not
- # sufficient given how verbose our logging is.
- iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
- sudo systemctl restart systemd-journald
-fi
+# the default rate limit of 1000 messages / 30 seconds is not
+# sufficient given how verbose our logging is.
+iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
+sudo systemctl restart systemd-journald
# Virtual Environment
# -------------------
@@ -877,6 +824,13 @@
init_cert
fi
+# Dstat
+# -----
+
+# Install dstat services prerequisites
+install_dstat
+
+
# Check Out and Install Source
# ----------------------------
@@ -971,9 +925,6 @@
if is_service_enabled tls-proxy; then
fix_system_ca_bundle_path
- if python3_enabled ; then
- fix_system_ca_bundle_path python3
- fi
fi
# Extras Install
@@ -994,17 +945,6 @@
# osc commands. Alias dies with stack.sh.
install_oscwrap
-if [[ $TRACK_DEPENDS = True ]]; then
- $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
- if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
- echo "Detect some changes for installed packages of pip, in depend tracking mode"
- cat $DEST/requires.diff
- fi
- echo "Ran stack.sh in depend tracking mode, bailing out now"
- exit 0
-fi
-
-
# Syslog
# ------
@@ -1116,7 +1056,7 @@
# Set up password auth credentials now that Keystone is bootstrapped
export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
export OS_USERNAME=admin
export OS_USER_DOMAIN_ID=default
export OS_PASSWORD=$ADMIN_PASSWORD
@@ -1144,19 +1084,19 @@
create_keystone_accounts
if is_service_enabled nova; then
- create_nova_accounts
+ async_runfunc create_nova_accounts
fi
if is_service_enabled glance; then
- create_glance_accounts
+ async_runfunc create_glance_accounts
fi
if is_service_enabled cinder; then
- create_cinder_accounts
+ async_runfunc create_cinder_accounts
fi
if is_service_enabled neutron; then
- create_neutron_accounts
+ async_runfunc create_neutron_accounts
fi
if is_service_enabled swift; then
- create_swift_accounts
+ async_runfunc create_swift_accounts
fi
fi
@@ -1169,16 +1109,19 @@
if is_service_enabled horizon; then
echo_summary "Configuring Horizon"
- configure_horizon
+ async_runfunc configure_horizon
fi
+async_wait create_nova_accounts create_glance_accounts create_cinder_accounts
+async_wait create_neutron_accounts create_swift_accounts configure_horizon
# Glance
# ------
-if is_service_enabled g-reg; then
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
echo_summary "Configuring Glance"
- init_glance
+ async_runfunc init_glance
fi
@@ -1192,7 +1135,7 @@
# Run init_neutron only on the node hosting the Neutron API server
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then
- init_neutron
+ async_runfunc init_neutron
fi
fi
@@ -1222,7 +1165,7 @@
if is_service_enabled swift; then
echo_summary "Configuring Swift"
- init_swift
+ async_runfunc init_swift
fi
@@ -1231,7 +1174,7 @@
if is_service_enabled cinder; then
echo_summary "Configuring Cinder"
- init_cinder
+ async_runfunc init_cinder
fi
# Placement Service
@@ -1239,9 +1182,16 @@
if is_service_enabled placement; then
echo_summary "Configuring placement"
- init_placement
+ async_runfunc init_placement
fi
+# Wait for neutron and placement before starting nova
+async_wait init_neutron
+async_wait init_placement
+async_wait init_glance
+async_wait init_swift
+async_wait init_cinder
+
# Compute Service
# ---------------
@@ -1253,7 +1203,7 @@
# TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
# not, remove the if here
if is_service_enabled neutron; then
- configure_neutron_nova
+ async_runfunc configure_neutron_nova
fi
fi
@@ -1284,51 +1234,36 @@
start_swift
fi
-# Launch the Glance services
-if is_service_enabled glance; then
- echo_summary "Starting Glance"
- start_glance
-fi
-
-
-# Install Images
-# ==============
-
-# Upload an image to Glance.
-#
-# The default image is CirrOS, a small testing image which lets you login as **root**
-# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending
-# scripts as userdata.
-# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
-
-if is_service_enabled g-reg; then
-
- echo_summary "Uploading images"
-
- for image_url in ${IMAGE_URLS//,/ }; do
- upload_image $image_url
- done
-fi
-
# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack
# deployments. This ensures the keys match across nova and cinder across all
# hosts.
FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec}
+if is_service_enabled cinder; then
+ iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
+fi
+
+async_wait configure_neutron_nova
+
+# NOTE(clarkb): This must come after async_wait configure_neutron_nova because
+# configure_neutron_nova modifies $NOVA_CONF and $NOVA_CPU_CONF as well. If
+# we don't wait then these two ini updates race either other and can result
+# in unexpected configs.
if is_service_enabled nova; then
iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY"
iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY"
fi
-if is_service_enabled cinder; then
- iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
-fi
-
# Launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
echo_summary "Starting Nova API"
start_nova_api
fi
+if is_service_enabled ovn-controller ovn-controller-vtep; then
+ echo_summary "Starting OVN services"
+ start_ovn_services
+fi
+
if is_service_enabled neutron-api; then
echo_summary "Starting Neutron"
start_neutron_api
@@ -1351,13 +1286,20 @@
# Once neutron agents are started setup initial network elements
if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
echo_summary "Creating initial neutron network elements"
- create_neutron_initial_network
+ # Here's where plugins can wire up their own networks instead
+ # of the code in lib/neutron_plugins/services/l3
+ if type -p neutron_plugin_create_initial_networks > /dev/null; then
+ neutron_plugin_create_initial_networks
+ else
+ create_neutron_initial_network
+ fi
+
fi
if is_service_enabled nova; then
echo_summary "Starting Nova"
start_nova
- create_flavors
+ async_runfunc create_flavors
fi
if is_service_enabled cinder; then
echo_summary "Starting Cinder"
@@ -1365,6 +1307,40 @@
create_volume_types
fi
+# This sleep is required for cinder volume service to become active and
+# publish capabilities to cinder scheduler before creating the image-volume
+if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+ sleep 30
+fi
+
+# Launch the Glance services
+# NOTE (abhishekk): We need to start glance api service only after cinder
+# service has started as on glance startup glance-api queries cinder for
+# validating volume_type configured for cinder store of glance.
+if is_service_enabled glance; then
+ echo_summary "Starting Glance"
+ start_glance
+fi
+
+# Install Images
+# ==============
+
+# Upload an image to Glance.
+#
+# The default image is CirrOS, a small testing image which lets you login as **root**
+# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending
+# scripts as userdata.
+# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
+
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
+ echo_summary "Uploading images"
+
+ for image_url in ${IMAGE_URLS//,/ }; do
+ upload_image $image_url
+ done
+fi
+
if is_service_enabled horizon; then
echo_summary "Starting Horizon"
@@ -1372,6 +1348,8 @@
start_horizon
fi
+async_wait create_flavors
+
# Create account rc files
# =======================
@@ -1508,8 +1486,12 @@
exec 1>&3
fi
+# Make sure we didn't leak any background tasks
+async_cleanup
+
# Dump out the time totals
time_totals
+async_print_timing
# Using the cloud
# ===============
@@ -1542,14 +1524,11 @@
echo
fi
-# If USE_SYSTEMD is enabled, tell the user about using it.
-if [[ "$USE_SYSTEMD" == "True" ]]; then
- echo
- echo "Services are running under systemd unit files."
- echo "For more information see: "
- echo "https://docs.openstack.org/devstack/latest/systemd.html"
- echo
-fi
+echo
+echo "Services are running under systemd unit files."
+echo "For more information see: "
+echo "https://docs.openstack.org/devstack/latest/systemd.html"
+echo
# Useful info on current state
cat /etc/devstack-version
diff --git a/stackrc b/stackrc
index 3fcdadf..196f61f 100644
--- a/stackrc
+++ b/stackrc
@@ -69,7 +69,7 @@
# Placement service needed for Nova
ENABLED_SERVICES+=,placement-api,placement-client
# Glance services needed for Nova
- ENABLED_SERVICES+=,g-api,g-reg
+ ENABLED_SERVICES+=,g-api
# Cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol
# Neutron
@@ -89,6 +89,15 @@
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=osapi_compute,metadata
+# allow local overrides of env variables, including repo config
+if [[ -f $RC_DIR/localrc ]]; then
+ # Old-style user-supplied config
+ source $RC_DIR/localrc
+elif [[ -f $RC_DIR/.localrc.auto ]]; then
+ # New-style user-supplied config extracted from local.conf
+ source $RC_DIR/.localrc.auto
+fi
+
# CELLSV2_SETUP - how we should configure services with cells v2
#
# - superconductor - this is one conductor for the api services, and
@@ -100,9 +109,7 @@
# Set the root URL for Horizon
HORIZON_APACHE_ROOT="/dashboard"
-# Whether to use SYSTEMD to manage services, we only do this from
-# Queens forward.
-USE_SYSTEMD="True"
+# Whether to use user specific units for running services or global ones.
USER_UNITS=$(trueorfalse False USER_UNITS)
if [[ "$USER_UNITS" == "True" ]]; then
SYSTEMD_DIR="$HOME/.local/share/systemd/user"
@@ -127,37 +134,17 @@
fi
# Control whether Python 3 should be used at all.
-export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
+# TODO(frickler): Drop this when all consumers are fixed
+export USE_PYTHON3=True
-# Explicitly list services not to run under Python 3. See
-# disable_python3_package to edit this variable.
-export DISABLED_PYTHON3_PACKAGES=""
-
-# When Python 3 is supported by an application, adding the specific
-# version of Python 3 to this variable will install the app using that
-# version of the interpreter instead of 2.7.
+# Adding the specific version of Python 3 to this variable will install
+# the app using that version of the interpreter instead of just 3.
_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
-export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
-
-# Just to be more explicit on the Python 2 version to use.
-_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
-export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
+export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}}
# Create a virtualenv with this
-if [[ ${USE_PYTHON3} == True ]]; then
- export VIRTUALENV_CMD="python3 -m venv"
-else
- export VIRTUALENV_CMD="virtualenv "
-fi
-
-# allow local overrides of env variables, including repo config
-if [[ -f $RC_DIR/localrc ]]; then
- # Old-style user-supplied config
- source $RC_DIR/localrc
-elif [[ -f $RC_DIR/.localrc.auto ]]; then
- # New-style user-supplied config extracted from local.conf
- source $RC_DIR/.localrc.auto
-fi
+# Use the built-in venv to avoid more dependencies
+export VIRTUALENV_CMD="python3 -m venv"
# Default for log coloring is based on interactive-or-not.
# Baseline assumption is that non-interactive invocations are for CI,
@@ -240,7 +227,7 @@
GIT_BASE=${GIT_BASE:-https://opendev.org}
# The location of REQUIREMENTS once cloned
-REQUIREMENTS_DIR=$DEST/requirements
+REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements}
# Which libraries should we install from git instead of using released
# versions on pypi?
@@ -258,7 +245,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="ussuri"
+DEVSTACK_SERIES="xena"
##############
#
@@ -286,10 +273,6 @@
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH}
-# neutron fwaas service
-NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
-NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH}
-
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH}
@@ -315,6 +298,7 @@
# Tempest test suite
TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master}
##############
@@ -566,6 +550,16 @@
GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git}
GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH}
+# ovsdbapp used by neutron
+GITREPO["ovsdbapp"]=${OVSDBAPP_REPO:-${GIT_BASE}/openstack/ovsdbapp.git}
+GITBRANCH["ovsdbapp"]=${OVSDBAPP_BRANCH:-$TARGET_BRANCH}
+GITDIR["ovsdbapp"]=$DEST/ovsdbapp
+
+# os-ken used by neutron
+GITREPO["os-ken"]=${OS_KEN_REPO:-${GIT_BASE}/openstack/os-ken.git}
+GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
+GITDIR["os-ken"]=$DEST/os-ken
+
##################
#
# TripleO / Heat Agent Components
@@ -603,7 +597,7 @@
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -617,14 +611,13 @@
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
-# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core
-# is installed, the default will be XenAPI
+# also install an **LXC** or **OpenVZ** based system.
DEFAULT_VIRT_DRIVER=libvirt
-is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
+ LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
@@ -644,21 +637,10 @@
fake)
NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
;;
- xenserver)
- # Xen config common to nova and neutron
- XENAPI_USER=${XENAPI_USER:-"root"}
- # This user will be used for dom0 - domU communication
- # should be able to log in to dom0 without a password
- # will be used to install the plugins
- DOMZERO_USER=${DOMZERO_USER:-"domzero"}
- ;;
*)
;;
esac
-# By default, devstack will use Ubuntu Cloud Archive.
-ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE)
-
# Images
# ------
@@ -681,7 +663,7 @@
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
-CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -709,11 +691,6 @@
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME}
IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";;
- xenserver)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk}
- DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
- IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
- IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
fake)
# Use the same as the default for libvirt
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
@@ -781,8 +758,8 @@
fi
done
-# 24Gb default volume backing file size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G}
+# 30Gb default volume backing file size
+VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-30G}
# Prefixes for volume and instance names
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
@@ -818,15 +795,6 @@
# Service graceful shutdown timeout
WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
-# Choose DNF on RedHat/Fedora platforms with it, or otherwise default
-# to YUM. Can remove this when only dnf is supported (i.e. centos7
-# disappears)
-if [[ -e /usr/bin/dnf ]]; then
- YUM=${YUM:-dnf}
-else
- YUM=${YUM:-yum}
-fi
-
# Common Configuration
# --------------------
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index c3b4457..5b53389 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -44,7 +44,7 @@
ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive"
ALL_LIBS+=" keystoneauth ironic-lib neutron-lib oslo.privsep"
ALL_LIBS+=" diskimage-builder os-vif python-brick-cinderclient-ext"
-ALL_LIBS+=" castellan python-barbicanclient"
+ALL_LIBS+=" castellan python-barbicanclient ovsdbapp os-ken"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tests/test_python.sh b/tests/test_python.sh
deleted file mode 100755
index 1f5453c..0000000
--- a/tests/test_python.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-# Tests for DevStack INI functions
-
-TOP=$(cd $(dirname "$0")/.. && pwd)
-
-source $TOP/functions-common
-source $TOP/inc/python
-
-source $TOP/tests/unittest.sh
-
-echo "Testing Python 3 functions"
-
-# Initialize variables manipulated by functions under test.
-export DISABLED_PYTHON3_PACKAGES=""
-
-assert_true "should be enabled by default" python3_enabled_for testpackage1
-
-assert_false "should not be disabled yet" python3_disabled_for testpackage2
-
-disable_python3_package testpackage2
-assert_equal "$DISABLED_PYTHON3_PACKAGES" "testpackage2" "unexpected result"
-assert_true "should be disabled" python3_disabled_for testpackage2
-
-report_results
diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh
index f407d40..9196525 100755
--- a/tests/test_worlddump.sh
+++ b/tests/test_worlddump.sh
@@ -8,7 +8,7 @@
OUT_DIR=$(mktemp -d)
-$TOP/tools/worlddump.py -d $OUT_DIR
+${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR
if [[ $? -ne 0 ]]; then
fail "worlddump failed"
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
index f5278d7..8ee551b 100644
--- a/tools/cap-pip.txt
+++ b/tools/cap-pip.txt
@@ -1 +1 @@
-pip!=8,<10
+pip<20.3
diff --git a/tools/debug_function.sh b/tools/debug_function.sh
new file mode 100755
index 0000000..68bd85d
--- /dev/null
+++ b/tools/debug_function.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# This is a small helper to speed development and debug with devstack.
+# It is intended to help you run a single function in a project module
+# without having to re-stack.
+#
+# For example, to run the just start_glance function, do this:
+#
+# ./tools/debug_function.sh glance start_glance
+
+if [ ! -f "lib/$1" ]; then
+ echo "Usage: $0 [project] [function] [function...]"
+fi
+
+source stackrc
+source lib/$1
+shift
+set -x
+while [ "$1" ]; do
+ echo ==== Running $1 ====
+ $1
+ echo ==== Done with $1 ====
+ shift
+done
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index d298937..25f7268 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -5,16 +5,6 @@
# fixup_stuff.sh
#
# All distro and package specific hacks go in here
-#
-# - prettytable 0.7.2 permissions are 600 in the package and
-# pip 1.4 doesn't fix it (1.3 did)
-#
-# - httplib2 0.8 permissions are 600 in the package and
-# pip 1.4 doesn't fix it (1.3 did)
-#
-# - Fedora:
-# - set selinux not enforcing
-# - uninstall firewalld (f20 only)
# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
@@ -71,15 +61,9 @@
# Ubuntu Repositories
#--------------------
-# We've found that Libvirt on Xenial is flaky and crashes enough to be
-# a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to
-# get newer Libvirt.
-# Make it possible to switch this based on an environment variable as
-# libvirt 2.5.0 doesn't handle nested virtualization quite well and this
-# is required for the trove development environment.
-# Also enable universe since it is missing when installing from ISO.
+# Enable universe for bionic since it is missing when installing from ISO.
function fixup_ubuntu {
- if [[ "$DISTRO" != "xenial" && "$DISTRO" != "bionic" ]]; then
+ if [[ "$DISTRO" != "bionic" ]]; then
return
fi
@@ -89,67 +73,32 @@
# Enable universe
sudo add-apt-repository -y universe
- if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then
- return
- fi
- # Use UCA for newer libvirt.
if [[ -f /etc/ci/mirror_info.sh ]] ; then
- # If we are on a nodepool provided host and it has told us about where
- # we can find local mirrors then use that mirror.
+ # If we are on a nodepool provided host and it has told us about
+ # where we can find local mirrors then use that mirror.
source /etc/ci/mirror_info.sh
-
- sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/queens main"
+ sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR bionic-updates/ussuri main"
else
- # Otherwise use upstream UCA
- sudo add-apt-repository -y cloud-archive:queens
+ # Enable UCA:ussuri for updated versions of QEMU and libvirt
+ sudo add-apt-repository -y cloud-archive:ussuri
fi
-
- # Disable use of libvirt wheel since a cached wheel build might be
- # against older libvirt binary. Particularly a problem if using
- # the openstack wheel mirrors, but can hit locally too.
- # TODO(clarkb) figure out how to use upstream wheel again.
- iniset -sudo /etc/pip.conf "global" "no-binary" "libvirt-python"
-
- # Force update our APT repos, since we added UCA above.
REPOS_UPDATED=False
apt_get_update
+
+ # Since pip10, pip will refuse to uninstall files from packages
+ # that were created with distutils (rather than more modern
+ # setuptools). This is because it technically doesn't have a
+ # manifest of what to remove. However, in most cases, simply
+ # overwriting works. So this hacks around those packages that
+ # have been dragged in by some other system dependency
+ sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
}
# Python Packages
# ---------------
-# get_package_path python-package # in import notation
-function get_package_path {
- local package=$1
- echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])")
-}
-
-
-# Pre-install affected packages so we can fix the permissions
-# These can go away once we are confident that pip 1.4.1+ is available everywhere
-
-function fixup_python_packages {
- # Fix prettytable 0.7.2 permissions
- # Don't specify --upgrade so we use the existing package if present
- pip_install 'prettytable>=0.7'
- PACKAGE_DIR=$(get_package_path prettytable)
- # Only fix version 0.7.2
- dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
- if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
- fi
-
- # Fix httplib2 0.8 permissions
- # Don't specify --upgrade so we use the existing package if present
- pip_install httplib2
- PACKAGE_DIR=$(get_package_path httplib2)
- # Only fix version 0.8
- dir=$(echo $PACKAGE_DIR-0.8*)
- if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
- fi
-}
-
function fixup_fedora {
if ! is_fedora; then
return
@@ -187,42 +136,13 @@
fi
fi
- if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then
- # requests ships vendored version of chardet/urllib3, but on
- # fedora these are symlinked back to the primary versions to
- # avoid duplication of code on disk. This is fine when
- # maintainers keep things in sync, but since devstack takes
- # over and installs later versions via pip we can end up with
- # incompatible versions.
- #
- # The rpm package is not removed to preserve the dependent
- # packages like cloud-init; rather we remove the symlinks and
- # force a re-install of requests so the vendored versions it
- # wants are present.
- #
- # Realted issues:
- # https://bugs.launchpad.net/glance/+bug/1476770
- # https://bugzilla.redhat.com/show_bug.cgi?id=1253823
-
- base_path=$(get_package_path requests)/packages
- if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then
- sudo rm -f $base_path/{chardet,urllib3}
- # install requests with the bundled urllib3 to avoid conflicts
- pip_install --upgrade --force-reinstall requests
- fi
-
- fi
-
# Since pip10, pip will refuse to uninstall files from packages
# that were created with distutils (rather than more modern
# setuptools). This is because it technically doesn't have a
# manifest of what to remove. However, in most cases, simply
# overwriting works. So this hacks around those packages that
# have been dragged in by some other system dependency
- sudo rm -rf /usr/lib/python2.7/site-packages/enum34*.egg-info
- sudo rm -rf /usr/lib/python2.7/site-packages/ipaddress*.egg-info
- sudo rm -rf /usr/lib/python2.7/site-packages/ply-*.egg-info
- sudo rm -rf /usr/lib/python2.7/site-packages/typing-*.egg-info
+ sudo rm -rf /usr/lib64/python3*/site-packages/PyYAML-*.egg-info
}
function fixup_suse {
@@ -257,42 +177,24 @@
# have been dragged in by some other system dependency
sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info
+
+ # Ensure trusted CA certificates are up to date
+ # See https://bugzilla.suse.com/show_bug.cgi?id=1154871
+ # May be removed once a new opensuse-15 image is available in nodepool
+ sudo zypper up -y p11-kit ca-certificates-mozilla
}
-# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
-# connection issues under proxy so re-install the latest version using
-# pip. To avoid having pip's virtualenv overwritten by the distro's
-# package (e.g. due to installing a distro package with a dependency
-# on python-virtualenv), first install the distro python-virtualenv
-# to satisfy any dependencies then use pip to overwrite it.
-
-# ... but, for infra builds, the pip-and-virtualenv [1] element has
-# already done this to ensure the latest pip, virtualenv and
-# setuptools on the base image for all platforms. It has also added
-# the packages to the yum/dnf ignore list to prevent them being
-# overwritten with old versions. F26 and dnf 2.0 has changed
-# behaviour that means re-installing python-virtualenv fails [2].
-# Thus we do a quick check if we're in the infra environment by
-# looking for the mirror config script before doing this, and just
-# skip it if so.
-
-# [1] https://opendev.org/openstack/diskimage-builder/src/branch/master/ \
-# diskimage_builder/elements/pip-and-virtualenv/ \
-# install.d/pip-and-virtualenv-source-install/04-install-pip
-# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
-
-function fixup_virtualenv {
- if [[ ! -f /etc/ci/mirror_info.sh ]]; then
- install_package python-virtualenv
- pip_install -U --force-reinstall virtualenv
+function fixup_ovn_centos {
+ if [[ $os_VENDOR != "CentOS" ]]; then
+ return
fi
+ # OVN packages are part of this release for CentOS
+ yum_install centos-release-openstack-victoria
}
function fixup_all {
fixup_keystone
fixup_ubuntu
- fixup_python_packages
fixup_fedora
fixup_suse
- fixup_virtualenv
}
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index d39b801..1cacd06 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
@@ -45,9 +45,14 @@
def is_in_wanted_namespace(proj):
# only interested in openstack or x namespace (e.g. not retired
- # stackforge, etc)
+ # stackforge, etc).
+ #
+ # openstack/openstack "super-repo" of openstack projects as
+ # submodules, that can cause gitea to 500 timeout and thus stop
+ # this script. Skip it.
if proj.startswith('stackforge/') or \
- proj.startswith('stackforge-attic/'):
+ proj.startswith('stackforge-attic/') or \
+ proj == "openstack/openstack":
return False
else:
return True
diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh
index a3aa7ba..3307943 100755
--- a/tools/generate-devstack-plugins-list.sh
+++ b/tools/generate-devstack-plugins-list.sh
@@ -54,7 +54,7 @@
cat data/devstack-plugins-registry.header
fi
-sorted_plugins=$(python tools/generate-devstack-plugins-list.py)
+sorted_plugins=$(python3 tools/generate-devstack-plugins-list.py)
# find the length of the name column & pad
name_col_len=$(echo "${sorted_plugins}" | wc -L)
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 3a27c4a..81231be 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -22,7 +22,7 @@
# Possible virt drivers, if we have more, add them here. Always keep
# dummy in the end position to trigger the fall through case.
-DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
+DRIVERS="openvz ironic libvirt vsphere dummy"
# Extra variables to trigger getting additional images.
export ENABLED_SERVICES="h-api,tr-api"
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 2b6aa4c..9afd2e5 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -5,7 +5,7 @@
# Update pip and friends to a known common version
# Assumptions:
-# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed
+# - PYTHON3_VERSION refers to a version already installed
set -o errexit
@@ -53,6 +53,8 @@
else
echo "pip: Not Installed"
fi
+ # Show python3 module version
+ python${PYTHON3_VERSION} -m pip --version
}
@@ -89,10 +91,9 @@
die $LINENO "Download of get-pip.py failed"
touch $LOCAL_PIP.downloaded
fi
- sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
- if python3_enabled; then
- sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
- fi
+ # TODO: remove the trailing pip constraint when a proper fix
+ # arrives for bug https://bugs.launchpad.net/devstack/+bug/1906322
+ sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
}
@@ -125,7 +126,14 @@
# Show starting versions
get_versions
-# Do pip
+if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
+ configure_pypi_alternative_url
+fi
+
+# Just use system pkgs on Focal
+if [[ "$DISTRO" == focal ]]; then
+ exit 0
+fi
# Eradicate any and all system packages
@@ -133,16 +141,13 @@
# results in a nonfunctional system. pip on fedora installs to /usr so pip
# can safely override the system pip for all versions of fedora
if ! is_fedora && ! is_suse; then
- uninstall_package python-pip
- uninstall_package python3-pip
+ if is_package_installed python3-pip ; then
+ uninstall_package python3-pip
+ fi
fi
install_get_pip
-if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
- configure_pypi_alternative_url
-fi
-
set -x
# Note setuptools is part of requirements.txt and we want to make sure
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index da59093..a7c03d2 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -81,12 +81,6 @@
fi
fi
-if python3_enabled; then
- install_python3
- export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
-else
- export PYTHON=$(which python 2>/dev/null)
-fi
# Mark end of run
# ---------------
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index 63f25ca..6c36534 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -14,7 +14,7 @@
set -o errexit
-PYTHON=${PYTHON:-python}
+PYTHON=${PYTHON:-python3}
# time to sleep between checks
SLEEP_TIME=20
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
old mode 100755
new mode 100644
index 07716b0..1b081bb
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# This tool lists processes that lock memory pages from swapping to disk.
import re
@@ -26,17 +24,19 @@
# iterate over the /proc/%pid/status files manually
try:
s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r')
- except EnvironmentError:
+ with s:
+ for line in s:
+ result = LCK_SUMMARY_REGEX.search(line)
+ if result:
+ locked = int(result.group('locked'))
+ if locked:
+ mlock_users.append({'name': proc.name(),
+ 'pid': proc.pid,
+ 'locked': locked})
+ except OSError:
+ # pids can disappear, we're ok with that
continue
- with s:
- for line in s:
- result = LCK_SUMMARY_REGEX.search(line)
- if result:
- locked = int(result.group('locked'))
- if locked:
- mlock_users.append({'name': proc.name(),
- 'pid': proc.pid,
- 'locked': locked})
+
# produce a single line log message with per process mlock stats
if mlock_users:
diff --git a/tools/outfilter.py b/tools/outfilter.py
old mode 100755
new mode 100644
index cf09124..e910f79
--- a/tools/outfilter.py
+++ b/tools/outfilter.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-#
+#!/usr/bin/env python3
+
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
deleted file mode 100644
index 1d994a6..0000000
--- a/tools/uec/meta.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import BaseHTTPServer
-import SimpleHTTPServer
-import sys
-
-
-def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler,
- ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
- """simple http server that listens on a give address:port."""
-
- server_address = (host, port)
-
- HandlerClass.protocol_version = protocol
- httpd = ServerClass(server_address, HandlerClass)
-
- sa = httpd.socket.getsockname()
- print("Serving HTTP on", sa[0], "port", sa[1], "...")
- httpd.serve_forever()
-
-if __name__ == '__main__':
- if sys.argv[1:]:
- address = sys.argv[1]
- else:
- address = '0.0.0.0'
- if ':' in address:
- host, port = address.split(':')
- else:
- host = address
- port = 8080
-
- main(host, int(port))
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 9187c66..7be995e 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
diff --git a/tools/worlddump.py b/tools/worlddump.py
index d1453ca..22770f1 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
@@ -17,14 +17,12 @@
"""Dump the state of the world for post mortem."""
-from __future__ import print_function
-
import argparse
import datetime
from distutils import spawn
import fnmatch
+import io
import os
-import os.path
import shutil
import subprocess
import sys
@@ -109,9 +107,10 @@
# This method gets max version searching 'OpenFlow versions 0x1:0x'.
# And return a version value converted to an integer type.
def _get_ofp_version():
- process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE)
+ process = subprocess.Popen(['ovs-ofctl', '--version'],
+ stdout=subprocess.PIPE)
stdout, _ = process.communicate()
- find_str = 'OpenFlow versions 0x1:0x'
+ find_str = b'OpenFlow versions 0x1:0x'
offset = stdout.find(find_str)
return int(stdout[offset + len(find_str):-1]) - 1
@@ -165,14 +164,13 @@
_header("Network Dump")
_dump_cmd("bridge link")
- _dump_cmd("brctl show")
_dump_cmd("ip link show type bridge")
- ip_cmds = ["neigh", "addr", "link", "route"]
+ ip_cmds = ["neigh", "addr", "route", "-6 route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
for netns_ in _netns_list():
for cmd in ip_cmds:
- args = {'netns': netns_, 'cmd': cmd}
+ args = {'netns': bytes.decode(netns_), 'cmd': cmd}
_dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
@@ -193,7 +191,7 @@
_dump_cmd("sudo ovs-vsctl show")
for ofctl_cmd in ofctl_cmds:
for bridge in bridges:
- args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge}
+ args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)}
_dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args)
@@ -205,7 +203,7 @@
def compute_consoles():
_header("Compute consoles")
- for root, dirnames, filenames in os.walk('/opt/stack'):
+ for root, _, filenames in os.walk('/opt/stack'):
for filename in fnmatch.filter(filenames, 'console.log'):
fullpath = os.path.join(root, filename)
_dump_cmd("sudo cat %s" % fullpath)
@@ -233,12 +231,22 @@
# tools out there that can do that sort of thing though.
_dump_cmd("ls -ltrah /var/core")
+
+def disable_stdio_buffering():
+ # re-open STDOUT as binary, then wrap it in a
+ # TextIOWrapper, and write through everything.
+ binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0)
+ sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True)
+
+
def main():
opts = get_options()
fname = filename(opts.dir, opts.name)
print("World dumping... see %s for details" % fname)
- sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
- with open(fname, 'w') as f:
+
+ disable_stdio_buffering()
+
+ with io.open(fname, 'w') as f:
os.dup2(f.fileno(), sys.stdout.fileno())
disk_space()
process_list()
diff --git a/tools/xen/README.md b/tools/xen/README.md
deleted file mode 100644
index 2873011..0000000
--- a/tools/xen/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there.
-
-.. _os-xenapi: https://opendev.org/x/os-xenapi/
diff --git a/tox.ini b/tox.ini
index 26baa2a..5bb2268 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,15 +5,14 @@
[testenv]
usedevelop = False
-install_command = pip install {opts} {packages}
+basepython = python3
[testenv:bashate]
-basepython = python3
# if you want to test out some changes you have made to bashate
# against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
# modified bashate tree
deps =
- {env:BASHATE_INSTALL_PATH:bashate==0.5.1}
+ {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
@@ -35,8 +34,9 @@
-print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
-basepython = python3
-deps = -r{toxinidir}/doc/requirements.txt
+deps =
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/doc/requirements.txt
whitelist_externals = bash
setenv =
TOP_DIR={toxinidir}
@@ -44,7 +44,6 @@
sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html
[testenv:pdf-docs]
-basepython = python3
deps = {[testenv:docs]deps}
whitelist_externals =
make
@@ -53,6 +52,5 @@
make -C doc/build/pdf
[testenv:venv]
-basepython = python3
deps = -r{toxinidir}/doc/requirements.txt
commands = {posargs}
diff --git a/unstack.sh b/unstack.sh
index 07dc2b1..d9dca7c 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -99,6 +99,7 @@
if is_service_enabled nova; then
stop_nova
+ cleanup_nova
fi
if is_service_enabled placement; then
@@ -147,6 +148,10 @@
stop_service mysql
fi
+ if is_service_enabled postgresql; then
+ stop_service postgresql
+ fi
+
# Stop rabbitmq-server
if is_service_enabled rabbit; then
stop_service rabbitmq-server
@@ -177,3 +182,6 @@
clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
clean_lvm_filter
fi
+
+clean_pyc_files
+rm -Rf $DEST/async