Merge "Add "OVN" debs and rpms package files"
diff --git a/.zuul.yaml b/.zuul.yaml
index cbb9d99..f9808e0 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -9,6 +9,16 @@
- controller
- nodeset:
+ name: openstack-single-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: openstack-single-node-bionic
nodes:
- name: controller
@@ -39,6 +49,16 @@
- controller
- nodeset:
+ name: devstack-single-node-centos-8
+ nodes:
+ - name: controller
+ label: centos-8
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: devstack-single-node-opensuse-15
nodes:
- name: controller
@@ -52,7 +72,7 @@
name: devstack-single-node-fedora-latest
nodes:
- name: controller
- label: fedora-29
+ label: fedora-31
groups:
- name: tempest
nodes:
@@ -89,6 +109,36 @@
- compute1
- nodeset:
+ name: openstack-two-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ - name: compute1
+ label: ubuntu-focal
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
name: openstack-two-node-bionic
nodes:
- name: controller
@@ -149,6 +199,41 @@
- compute1
- nodeset:
+ name: openstack-three-node-focal
+ nodes:
+ - name: controller
+ label: ubuntu-focal
+ - name: compute1
+ label: ubuntu-focal
+ - name: compute2
+ label: ubuntu-focal
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ - compute2
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ - compute2
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+ - compute2
+
+- nodeset:
name: openstack-three-node-bionic
nodes:
- name: controller
@@ -320,14 +405,12 @@
dstat: true
etcd3: true
mysql: true
- peakmem_tracker: true
rabbit: true
group-vars:
subnode:
devstack_services:
# Shared services
dstat: true
- peakmem_tracker: true
devstack_localrc:
# Multinode specific settings
HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -394,14 +477,12 @@
dstat: true
etcd3: true
mysql: true
- peakmem_tracker: true
rabbit: true
tls-proxy: true
# Keystone services
key: true
# Glance services
g-api: true
- g-reg: true
# Nova services
n-api: true
n-api-meta: true
@@ -409,22 +490,15 @@
n-cpu: true
n-novnc: true
n-sch: true
+ # Placement service
placement-api: true
# Neutron services
- # We need to keep using the neutron-legacy based services for
- # now until all issues with the new lib/neutron code are solved
q-agt: true
q-dhcp: true
q-l3: true
q-meta: true
q-metering: true
q-svc: true
- # neutron-api: true
- # neutron-agent: true
- # neutron-dhcp: true
- # neutron-l3: true
- # neutron-metadata-agent: true
- # neutron-metering: true
# Swift services
s-account: true
s-container: true
@@ -450,16 +524,13 @@
# This list replaces the test-matrix.
# Shared services
dstat: true
- peakmem_tracker: true
tls-proxy: true
# Nova services
n-cpu: true
+ # Placement services
placement-client: true
# Neutron services
- # We need to keep using the neutron-legacy based services for
- # now until all issues with the new lib/neutron code are solved
q-agt: true
- # neutron-agent: true
# Cinder services
c-bak: true
c-vol: true
@@ -469,7 +540,7 @@
# s-*: false
horizon: false
tempest: false
- # Test matrix emits ceilometer but ceilomenter is not installed in the
+ # Test matrix emits ceilometer but ceilometer is not installed in the
# integrated gate, so specifying the services has not effect.
# ceilometer-*: false
devstack_localrc:
@@ -487,9 +558,6 @@
devstack_localrc:
SERVICE_IP_VERSION: 6
SERVICE_HOST: ""
- # IPv6 and certificates known issue with python2
- # https://bugs.launchpad.net/devstack/+bug/1794929
- USE_PYTHON3: true
- job:
name: devstack-multinode
@@ -504,6 +572,14 @@
# and these platforms don't have the round-the-clock support to avoid
# becoming blockers in that situation.
- job:
+ name: devstack-platform-centos-8
+ parent: tempest-full-py3
+ description: Centos 8 platform test
+ nodeset: devstack-single-node-centos-8
+ voting: false
+ timeout: 9000
+
+- job:
name: devstack-platform-opensuse-15
parent: tempest-full-py3
description: openSUSE 15.x platform test
@@ -511,6 +587,15 @@
voting: false
- job:
+ name: devstack-platform-focal
+ parent: tempest-full-py3
+ description: Ubuntu Focal Fossa platform test
+ nodeset: openstack-single-node-focal
+ vars:
+ tempest_black_regex: "(tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume|tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON|tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)"
+ voting: false
+
+- job:
name: devstack-platform-fedora-latest
parent: tempest-full-py3
description: Fedora latest platform test
@@ -518,6 +603,16 @@
voting: false
- job:
+ name: devstack-platform-fedora-latest-virt-preview
+ parent: tempest-full-py3
+ description: Fedora latest platform test using the virt-preview repo.
+ nodeset: devstack-single-node-fedora-latest
+ voting: false
+ vars:
+ devstack_localrc:
+ ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
+
+- job:
name: devstack-tox-base
parent: devstack
description: |
@@ -582,7 +677,6 @@
- project:
templates:
- - integrated-gate
- integrated-gate-py3
- publish-openstack-docs-pti
check:
@@ -591,6 +685,8 @@
- devstack-ipv6
- devstack-platform-opensuse-15
- devstack-platform-fedora-latest
+ - devstack-platform-centos-8
+ - devstack-platform-focal
- devstack-multinode
- devstack-unit-tests
- openstack-tox-bashate
@@ -598,10 +694,13 @@
voting: false
- swift-dsvm-functional:
voting: false
- irrelevant-files:
+ irrelevant-files: &dsvm-irrelevant-files
- ^.*\.rst$
- ^doc/.*$
- - grenade-py3:
+ - swift-dsvm-functional-py3:
+ voting: false
+ irrelevant-files: *dsvm-irrelevant-files
+ - grenade:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -613,7 +712,7 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - tempest-multinode-full:
+ - tempest-multinode-full-py3:
voting: false
irrelevant-files:
- ^.*\.rst$
@@ -626,6 +725,15 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ # NOTE(gmann): Remove this job from devstack pipeline once it is
+ # migrated to zuulv3 native. This is legacy job and rely on
+ # devstack-gate + devstack setting so any change in devstack can
+ # break it.
+ - nova-live-migration:
+ voting: false
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
gate:
jobs:
- devstack
@@ -641,7 +749,7 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- - grenade-py3:
+ - grenade:
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
@@ -670,6 +778,10 @@
# Next cycle we can remove this if everything run out stable enough.
# * nova-multi-cell: maintained by nova and currently non-voting in the
# check queue for nova changes but relies on devstack configuration
+ # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood
+ # for Nova to allow early testing of the latest versions of Libvirt and
+ # QEMU. Should only graduate out of experimental if it ever moves into
+ # the check queue for Nova.
experimental:
jobs:
@@ -678,10 +790,6 @@
- neutron-fullstack-with-uwsgi
- neutron-functional-with-uwsgi
- neutron-tempest-with-uwsgi
- - devstack-plugin-ceph-tempest:
- irrelevant-files:
- - ^.*\.rst$
- - ^doc/.*$
- devstack-plugin-ceph-tempest-py3:
irrelevant-files:
- ^.*\.rst$
@@ -706,3 +814,4 @@
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
+ - devstack-platform-fedora-latest-virt-preview
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..bb51165
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,19 @@
+The source repository for this project can be found at:
+
+ https://opendev.org/openstack/devstack
+
+Pull requests submitted through GitHub are not monitored.
+
+To start contributing to OpenStack, follow the steps in the contribution guide
+to set up and use Gerrit:
+
+ https://docs.openstack.org/contributors/code-and-documentation/quick-start.html
+
+Bugs should be filed on Launchpad:
+
+ https://bugs.launchpad.net/devstack
+
+For more specific information about contributing to this repository, see the
+Devstack contributor guide:
+
+ https://docs.openstack.org/devstack/latest/contributor/contributing.html
diff --git a/clean.sh b/clean.sh
index d6c6b40..685a719 100755
--- a/clean.sh
+++ b/clean.sh
@@ -123,12 +123,10 @@
sudo rm -rf $LOGDIR
fi
-# Clean out the systemd user unit files if systemd was used.
-if [[ "$USE_SYSTEMD" = "True" ]]; then
- sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
- # Make systemd aware of the deletion.
- $SYSTEMCTL daemon-reload
-fi
+# Clean out the systemd unit files.
+sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
+# Make systemd aware of the deletion.
+$SYSTEMCTL daemon-reload
# Clean up venvs
DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack"
diff --git a/doc/requirements.txt b/doc/requirements.txt
index fffb83d..ffce3ff 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -2,8 +2,8 @@
Pygments
docutils
-sphinx>=1.6.2
-openstackdocstheme>=1.20.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
nwdiag
blockdiag
sphinxcontrib-blockdiag
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 56043ba..2e17da1 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -30,9 +30,15 @@
'sphinxcontrib.nwdiag' ]
# openstackdocstheme options
-repository_name = 'openstack-dev/devstack'
-bug_project = 'devstack'
-bug_tag = ''
+openstackdocs_repo_name = 'openstack-dev/devstack'
+openstackdocs_pdf_link = True
+openstackdocs_bug_project = 'devstack'
+openstackdocs_bug_tag = ''
+openstackdocs_auto_name = False
+# This repo is not tagged, so don't set versions
+openstackdocs_auto_version = False
+version = ''
+release = ''
todo_include_todos = True
@@ -81,7 +87,7 @@
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['DevStack-doc.']
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 45f4ffe..ec4a9c8 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -137,7 +137,7 @@
::
- OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0
+ OS_AUTH_URL=http://$SERVICE_HOST/identity/v3.0
KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG
Set command-line client log level to ``DEBUG``. These are commented
@@ -430,17 +430,6 @@
ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
-Use python3
-------------
-
-By default ``stack.sh`` uses python2 (the exact version set by the
-``PYTHON2_VERSION``). This can be overriden so devstack will run
-python3 (the exact version set by ``PYTHON3_VERSION``).
-
-::
-
- USE_PYTHON3=True
-
A clean install every time
--------------------------
@@ -696,16 +685,6 @@
KEYSTONE_REGION_NAME has a default value the same as REGION_NAME thus we omit
it in the configuration of RegionOne.
-Disabling Identity API v2
-+++++++++++++++++++++++++
-
-The Identity API v2 is deprecated as of Mitaka and it is recommended to only
-use the v3 API. It is possible to setup keystone without v2 API, by doing:
-
-::
-
- ENABLE_IDENTITY_V2=False
-
.. _arch-configuration:
Architectures
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
new file mode 100644
index 0000000..5e0df56
--- /dev/null
+++ b/doc/source/contributor/contributing.rst
@@ -0,0 +1,56 @@
+============================
+So You Want to Contribute...
+============================
+
+For general information on contributing to OpenStack, please check out the
+`contributor guide <https://docs.openstack.org/contributors/>`_ to get started.
+It covers all the basics that are common to all OpenStack projects: the accounts
+you need, the basics of interacting with our Gerrit review system, how we
+communicate as a community, etc.
+
+Below will cover the more project specific information you need to get started
+with Devstack.
+
+Communication
+~~~~~~~~~~~~~
+* IRC channel ``#openstack-qa`` at FreeNode
+* Mailing list (prefix subjects with ``[qa][devstack]`` for faster responses)
+ http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss
+
+Contacting the Core Team
+~~~~~~~~~~~~~~~~~~~~~~~~
+Please refer to the `Devstack Core Team
+<https://review.opendev.org/#/admin/groups/50,members>`_ contacts.
+
+New Feature Planning
+~~~~~~~~~~~~~~~~~~~~
+If you want to propose a new feature please read `Feature Proposal Process`_
+Devstack features are tracked on `Launchpad BP <https://blueprints.launchpad.net/devstack>`_.
+
+Task Tracking
+~~~~~~~~~~~~~
+We track our tasks in `Launchpad <https://bugs.launchpad.net/devstack>`_.
+
+Reporting a Bug
+~~~~~~~~~~~~~~~
+You found an issue and want to make sure we are aware of it? You can do so on
+`Launchpad <https://bugs.launchpad.net/devstack/+filebug>`__.
+More info about Launchpad usage can be found on `OpenStack docs page
+<https://docs.openstack.org/contributors/common/task-tracking.html#launchpad>`_
+
+Getting Your Patch Merged
+~~~~~~~~~~~~~~~~~~~~~~~~~
+All changes proposed to the Devstack require two ``Code-Review +2`` votes from
+Devstack core reviewers before one of the core reviewers can approve the patch
+by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
+which can be approved by single core reviewers.
+
+Project Team Lead Duties
+~~~~~~~~~~~~~~~~~~~~~~~~
+All common PTL duties are enumerated in the `PTL guide
+<https://docs.openstack.org/project-team-guide/ptl.html>`_.
+
+The Release Process for QA is documented in `QA Release Process
+<https://wiki.openstack.org/wiki/QA/releases>`_.
+
+.. _Feature Proposal Process: https://wiki.openstack.org/wiki/QA#Feature_Proposal_.26_Design_discussions
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index 82e0dd6..e7ec629 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -10,6 +10,7 @@
.. toctree::
:glob:
+ :hidden:
:maxdepth: 1
guides/single-vm
@@ -68,6 +69,11 @@
Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
+Configure Load-Balancer Version 2
+-----------------------------------
+
+Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+
Deploying DevStack with LDAP
----------------------------
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 669a70d..5d96ca7 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -41,9 +41,6 @@
# If you are enabling barbican for TLS offload in Octavia, include it here.
# enable_plugin barbican https://opendev.org/openstack/barbican
- # If you have python3 available:
- # USE_PYTHON3=True
-
# ===== BEGIN localrc =====
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
@@ -62,7 +59,7 @@
ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
ENABLED_SERVICES+=,placement-api,placement-client
# Glance
- ENABLED_SERVICES+=,g-api,g-reg
+ ENABLED_SERVICES+=,g-api
# Neutron
ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 6694022..8b8acde 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,8 +38,7 @@
Start with a clean and minimal install of a Linux system. DevStack
attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 7, as well as Debian and
-OpenSUSE.
+latest/current Fedora version, CentOS/RHEL 8 and OpenSUSE.
If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the
most tested, and will probably go the smoothest.
@@ -151,6 +150,13 @@
with devstack, and help us by :doc:`contributing to the project
<hacking>`.
+If you are a new contributor to devstack please refer: :doc:`contributor/contributing`
+
+.. toctree::
+ :hidden:
+
+ contributor/contributing
+
Contents
++++++++
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index 74010cd..e65c7ef 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -40,7 +40,7 @@
Locally Accessible Guests
=========================
-If you want to make you guests accessible from other machines on your
+If you want to make your guests accessible from other machines on your
network, we have to connect ``br-ex`` to a physical interface.
Dedicated Guest Interface
@@ -81,7 +81,7 @@
[[local|localrc]]
PUBLIC_INTERFACE=eth0
HOST_IP=10.42.0.52
- FLOATING_RANGE=10.42.0.52/24
+ FLOATING_RANGE=10.42.0.0/24
PUBLIC_NETWORK_GATEWAY=10.42.0.1
Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 5cbe4ed..eda5773 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -31,16 +31,16 @@
openstack/ceilometer-powervm `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
openstack/cinderlib `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
openstack/cloudkitty `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
-openstack/congress `https://opendev.org/openstack/congress <https://opendev.org/openstack/congress>`__
openstack/cyborg `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
openstack/designate `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
openstack/devstack-plugin-amqp1 `https://opendev.org/openstack/devstack-plugin-amqp1 <https://opendev.org/openstack/devstack-plugin-amqp1>`__
openstack/devstack-plugin-ceph `https://opendev.org/openstack/devstack-plugin-ceph <https://opendev.org/openstack/devstack-plugin-ceph>`__
openstack/devstack-plugin-container `https://opendev.org/openstack/devstack-plugin-container <https://opendev.org/openstack/devstack-plugin-container>`__
openstack/devstack-plugin-kafka `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
+openstack/devstack-plugin-nfs `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
+openstack/devstack-plugin-open-cas `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
openstack/devstack-plugin-pika `https://opendev.org/openstack/devstack-plugin-pika <https://opendev.org/openstack/devstack-plugin-pika>`__
openstack/devstack-plugin-zmq `https://opendev.org/openstack/devstack-plugin-zmq <https://opendev.org/openstack/devstack-plugin-zmq>`__
-openstack/dragonflow `https://opendev.org/openstack/dragonflow <https://opendev.org/openstack/dragonflow>`__
openstack/ec2-api `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
openstack/freezer `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
openstack/freezer-api `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
@@ -50,6 +50,7 @@
openstack/heat-dashboard `https://opendev.org/openstack/heat-dashboard <https://opendev.org/openstack/heat-dashboard>`__
openstack/ironic `https://opendev.org/openstack/ironic <https://opendev.org/openstack/ironic>`__
openstack/ironic-inspector `https://opendev.org/openstack/ironic-inspector <https://opendev.org/openstack/ironic-inspector>`__
+openstack/ironic-prometheus-exporter `https://opendev.org/openstack/ironic-prometheus-exporter <https://opendev.org/openstack/ironic-prometheus-exporter>`__
openstack/ironic-ui `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
openstack/karbor `https://opendev.org/openstack/karbor <https://opendev.org/openstack/karbor>`__
openstack/karbor-dashboard `https://opendev.org/openstack/karbor-dashboard <https://opendev.org/openstack/karbor-dashboard>`__
@@ -75,35 +76,32 @@
openstack/networking-bagpipe `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
openstack/networking-baremetal `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
openstack/networking-bgpvpn `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
-openstack/networking-calico `https://opendev.org/openstack/networking-calico <https://opendev.org/openstack/networking-calico>`__
openstack/networking-generic-switch `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
openstack/networking-hyperv `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
openstack/networking-l2gw `https://opendev.org/openstack/networking-l2gw <https://opendev.org/openstack/networking-l2gw>`__
openstack/networking-midonet `https://opendev.org/openstack/networking-midonet <https://opendev.org/openstack/networking-midonet>`__
openstack/networking-odl `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
openstack/networking-onos `https://opendev.org/openstack/networking-onos <https://opendev.org/openstack/networking-onos>`__
-openstack/networking-ovn `https://opendev.org/openstack/networking-ovn <https://opendev.org/openstack/networking-ovn>`__
openstack/networking-powervm `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
openstack/networking-sfc `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
openstack/neutron `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
openstack/neutron-dynamic-routing `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
-openstack/neutron-fwaas `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
-openstack/neutron-fwaas-dashboard `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
openstack/neutron-tempest-plugin `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
openstack/neutron-vpnaas `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
openstack/neutron-vpnaas-dashboard `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
openstack/nova-powervm `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
openstack/octavia `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
openstack/octavia-dashboard `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
+openstack/octavia-tempest-plugin `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
openstack/openstacksdk `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
openstack/os-loganalyze `https://opendev.org/openstack/os-loganalyze <https://opendev.org/openstack/os-loganalyze>`__
openstack/osprofiler `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
openstack/oswin-tempest-plugin `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/ovn-octavia-provider `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
openstack/panko `https://opendev.org/openstack/panko <https://opendev.org/openstack/panko>`__
openstack/patrole `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
openstack/qinling `https://opendev.org/openstack/qinling <https://opendev.org/openstack/qinling>`__
openstack/qinling-dashboard `https://opendev.org/openstack/qinling-dashboard <https://opendev.org/openstack/qinling-dashboard>`__
-openstack/rally `https://opendev.org/openstack/rally <https://opendev.org/openstack/rally>`__
openstack/rally-openstack `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
openstack/sahara `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
openstack/sahara-dashboard `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
@@ -135,19 +133,17 @@
starlingx/metal `https://opendev.org/starlingx/metal <https://opendev.org/starlingx/metal>`__
starlingx/nfv `https://opendev.org/starlingx/nfv <https://opendev.org/starlingx/nfv>`__
starlingx/update `https://opendev.org/starlingx/update <https://opendev.org/starlingx/update>`__
+vexxhost/openstack-operator `https://opendev.org/vexxhost/openstack-operator <https://opendev.org/vexxhost/openstack-operator>`__
x/almanach `https://opendev.org/x/almanach <https://opendev.org/x/almanach>`__
x/apmec `https://opendev.org/x/apmec <https://opendev.org/x/apmec>`__
x/bilean `https://opendev.org/x/bilean <https://opendev.org/x/bilean>`__
x/broadview-collector `https://opendev.org/x/broadview-collector <https://opendev.org/x/broadview-collector>`__
x/collectd-openstack-plugins `https://opendev.org/x/collectd-openstack-plugins <https://opendev.org/x/collectd-openstack-plugins>`__
x/devstack-plugin-additional-pkg-repos `https://opendev.org/x/devstack-plugin-additional-pkg-repos <https://opendev.org/x/devstack-plugin-additional-pkg-repos>`__
-x/devstack-plugin-bdd `https://opendev.org/x/devstack-plugin-bdd <https://opendev.org/x/devstack-plugin-bdd>`__
x/devstack-plugin-glusterfs `https://opendev.org/x/devstack-plugin-glusterfs <https://opendev.org/x/devstack-plugin-glusterfs>`__
x/devstack-plugin-hdfs `https://opendev.org/x/devstack-plugin-hdfs <https://opendev.org/x/devstack-plugin-hdfs>`__
x/devstack-plugin-libvirt-qemu `https://opendev.org/x/devstack-plugin-libvirt-qemu <https://opendev.org/x/devstack-plugin-libvirt-qemu>`__
x/devstack-plugin-mariadb `https://opendev.org/x/devstack-plugin-mariadb <https://opendev.org/x/devstack-plugin-mariadb>`__
-x/devstack-plugin-nfs `https://opendev.org/x/devstack-plugin-nfs <https://opendev.org/x/devstack-plugin-nfs>`__
-x/devstack-plugin-sheepdog `https://opendev.org/x/devstack-plugin-sheepdog <https://opendev.org/x/devstack-plugin-sheepdog>`__
x/devstack-plugin-vmax `https://opendev.org/x/devstack-plugin-vmax <https://opendev.org/x/devstack-plugin-vmax>`__
x/drbd-devstack `https://opendev.org/x/drbd-devstack <https://opendev.org/x/drbd-devstack>`__
x/fenix `https://opendev.org/x/fenix <https://opendev.org/x/fenix>`__
@@ -203,6 +199,7 @@
x/valet `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
x/vmware-nsx `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
x/vmware-vspc `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+x/whitebox-tempest-plugin `https://opendev.org/x/whitebox-tempest-plugin <https://opendev.org/x/whitebox-tempest-plugin>`__
======================================== ===
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 15b3f75..4f83b36 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -208,7 +208,8 @@
the one you want.
- ``systemd`` - a python 3 only library, not what you want.
- ``python-systemd`` - another library you don't want. Installing it
- on a system will break ansible's ability to run.
+ on a system will break ansible's ability to run. The package has now
+ been renamed to ``cysystemd``, which avoids the namespace collision.
If we were using user units, the ``[Service]`` - ``Group=`` parameter
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
index 17e7e16..c43603e 100644
--- a/doc/source/zuul_ci_jobs_migration.rst
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -195,12 +195,6 @@
- A bridge called br-infra is set up for all jobs that inherit
from multinode with a dedicated `bridge role
<https://zuul-ci.org/docs/zuul-jobs/general-roles.html#role-multi-node-bridge>`_.
- * - DEVSTACK_GATE_FEATURE_MATRIX
- - devstack-gate
- - ``test_matrix_features`` variable of the test-matrix role in
- devstack-gate. This is a temporary solution, feature matrix
- will go away. In the future services will be defined in jobs
- only.
* - DEVSTACK_CINDER_VOLUME_CLEAR
- devstack
- *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1284360..cf26c21 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,39 +1,9 @@
-Listen %PUBLICPORT%
-Listen %ADMINPORT%
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
<Directory %KEYSTONE_BIN%>
Require all granted
</Directory>
-<VirtualHost *:%PUBLICPORT%>
- WSGIDaemonProcess keystone-public processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup keystone-public
- WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%M"
- ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
-<VirtualHost *:%ADMINPORT%>
- WSGIDaemonProcess keystone-admin processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup keystone-admin
- WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
- WSGIApplicationGroup %{GLOBAL}
- WSGIPassAuthorization On
- ErrorLogFormat "%M"
- ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
- %SSLENGINE%
- %SSLCERTFILE%
- %SSLKEYFILE%
-</VirtualHost>
-
%SSLLISTEN%<VirtualHost *:443>
%SSLLISTEN% %SSLENGINE%
%SSLLISTEN% %SSLCERTFILE%
diff --git a/files/debs/dstat b/files/debs/dstat
index 0d9da44..2b643b8 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1,2 +1 @@
dstat
-python-psutil
diff --git a/files/debs/general b/files/debs/general
index df872a0..4bf1ff4 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,7 +1,6 @@
apache2
apache2-dev
bc
-bridge-utils
bsdmainutils
curl
default-jre-headless # NOPRIME
@@ -27,9 +26,9 @@
openssl
pkg-config
psmisc
-python2.7
-python-dev
-python-gdbm # needed for testr
+python3-dev
+python3-pip
+python3-venv
tar
tcpdump
unzip
diff --git a/files/debs/keystone b/files/debs/keystone
index fd0317b..1cfa6ff 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -2,5 +2,5 @@
libldap2-dev
libsasl2-dev
memcached
-python-mysqldb
+python3-mysqldb
sqlite3
diff --git a/files/debs/ldap b/files/debs/ldap
index aa3a934..54896bb 100644
--- a/files/debs/ldap
+++ b/files/debs/ldap
@@ -1,3 +1,3 @@
ldap-utils
-python-ldap
+python3-ldap
slapd
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index 636644f..54d6fa3 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -5,7 +5,7 @@
lvm2 # NOPRIME
netcat-openbsd
open-iscsi
-python-guestfs # NOPRIME
+python3-guestfs # NOPRIME
qemu-utils
sg3-utils
sysfsutils
diff --git a/files/debs/neutron-common b/files/debs/neutron-common
index b269f63..e548396 100644
--- a/files/debs/neutron-common
+++ b/files/debs/neutron-common
@@ -9,7 +9,7 @@
libmysqlclient-dev
mysql-server #NOPRIME
postgresql-server-dev-all
-python-mysqldb
+python3-mysqldb
rabbitmq-server # NOPRIME
radvd # NOPRIME
sqlite3
diff --git a/files/debs/nova b/files/debs/nova
index e5110e9..a7aebbf 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -16,8 +16,7 @@
mysql-server # NOPRIME
parted
pm-utils
-python-mysqldb
-qemu # dist:wheezy,jessie NOPRIME
+python3-mysqldb
qemu-kvm # NOPRIME
rabbitmq-server # NOPRIME
socat # used by ajaxterm
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
index 0d9da44..2b643b8 100644
--- a/files/rpms-suse/dstat
+++ b/files/rpms-suse/dstat
@@ -1,2 +1 @@
dstat
-python-psutil
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index b870d72..0af2b5b 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -1,7 +1,6 @@
apache2
apache2-devel
bc
-bridge-utils
ca-certificates-mozilla
curl
gcc
diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common
index d1cc73f..e3799a9 100644
--- a/files/rpms-suse/neutron-common
+++ b/files/rpms-suse/neutron-common
@@ -5,7 +5,6 @@
haproxy # to serve as metadata proxy inside router/dhcp namespaces
iptables
iputils
-mariadb # NOPRIME
rabbitmq-server # NOPRIME
radvd # NOPRIME
sqlite3
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 1d58121..9923760 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -11,7 +11,6 @@
kvm # NOPRIME
libvirt # NOPRIME
libvirt-python # NOPRIME
-mariadb # NOPRIME
# mkisofs is required for config_drive
mkisofs # not:sle12
parted
diff --git a/files/rpms/cinder b/files/rpms/cinder
index e6b33dc..c21ea08 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,4 @@
iscsi-initiator-utils
lvm2
qemu-img
-scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29 NOPRIME
-targetcli # dist:rhel7,f25,f26,f27,f28,f29 NOPRIME
+targetcli
diff --git a/files/rpms/dstat b/files/rpms/dstat
index b058c27..6524bed 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1,3 +1 @@
-dstat # not:f29
-pcp-system-tools # dist:f29
-python-psutil
+pcp-system-tools
diff --git a/files/rpms/general b/files/rpms/general
index 5bf1e9a..c42ce52 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,5 +1,4 @@
bc
-bridge-utils
curl
dbus
gcc
@@ -9,15 +8,13 @@
graphviz # needed only for docs
httpd
httpd-devel
-iptables-services # NOPRIME f25,f26,f27,f28,f29
-java-1.7.0-openjdk-headless # NOPRIME rhel7
-java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27,f28,f29
+iptables-services
+java-1.8.0-openjdk-headless
libffi-devel
libjpeg-turbo-devel # Pillow 3.0.0
libxml2-devel # lxml
libxslt-devel # lxml
libyaml-devel
-mariadb-devel # MySQL-python
net-tools
openssh-server
openssl
@@ -26,8 +23,8 @@
pkgconfig
postgresql-devel # psycopg2
psmisc
-pyOpenSSL # version in pip uses too much memory
-python-devel
+python3-devel
+python3-pip
redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
systemd-devel # for systemd-python
tar
diff --git a/files/rpms/neutron-common b/files/rpms/neutron-common
index 0cc8d11..fe25f57 100644
--- a/files/rpms/neutron-common
+++ b/files/rpms/neutron-common
@@ -5,8 +5,6 @@
haproxy # to serve as metadata proxy inside router/dhcp namespaces
iptables
iputils
-mysql-devel
-mysql-server # NOPRIME
openvswitch # NOPRIME
rabbitmq-server # NOPRIME
radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 639d793..2218330 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,12 +7,8 @@
genisoimage # required for config_drive
iptables
iputils
-kernel-modules # dist:f25,f26,f27,f28,f29
+kernel-modules
kpartx
-libxml2-python
-m2crypto
-mysql-devel
-mysql-server # NOPRIME
parted
polkit
rabbitmq-server # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index be524d1..376c6f3 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,8 +1,7 @@
curl
liberasurecode-devel
memcached
-pyxattr
-rsync-daemon # dist:f25,f26,f27,f28,f29
+rsync-daemon
sqlite
xfsprogs
xinetd
diff --git a/functions b/functions
index 8ea634e..2470015 100644
--- a/functions
+++ b/functions
@@ -341,6 +341,12 @@
disk_format=qcow2
container_format=bare
;;
+ *.qcow2.xz)
+ image_name=$(basename "$image" ".qcow2.xz")
+ disk_format=qcow2
+ container_format=bare
+ unpack=unxz
+ ;;
*.raw)
image_name=$(basename "$image" ".raw")
disk_format=raw
@@ -376,6 +382,16 @@
openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
elif [ "$unpack" = "bunzip2" ]; then
openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(bunzip2 -cdk "${image}")
+ elif [ "$unpack" = "unxz" ]; then
+ # NOTE(brtknr): unxz the file first and cleanup afterwards to
+ # prevent timeout while Glance tries to upload image (e.g. to Swift).
+ local tmp_dir
+ local image_path
+ tmp_dir=$(mktemp -d)
+ image_path="$tmp_dir/$image_name"
+ unxz -cv "${image}" > "$image_path"
+ openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format --file "$image_path"
+ rm -rf $tmp_dir
else
openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
fi
@@ -635,40 +651,29 @@
# This sets up defaults we like in devstack for logging for tracking
# down issues, and makes sure everything is done the same between
# projects.
+# NOTE(jh): Historically this function switched between three different
+# functions: setup_systemd_logging, setup_colorized_logging and
+# setup_standard_logging_identity. Since we always run with systemd now,
+# this could be cleaned up, but the other functions may still be in use
+# by plugins. Since deprecations haven't worked in the past, we'll just
+# leave them in place.
function setup_logging {
- local conf_file=$1
- local other_cond=${2:-"False"}
- if [[ "$USE_SYSTEMD" == "True" ]]; then
- setup_systemd_logging $conf_file
- elif [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$other_cond" == "False" ]; then
- setup_colorized_logging $conf_file
- else
- setup_standard_logging_identity $conf_file
- fi
+ setup_systemd_logging $1
}
# This function sets log formatting options for colorizing log
# output to stdout. It is meant to be called by lib modules.
-# The last two parameters are optional and can be used to specify
-# non-default value for project and user format variables.
-# Defaults are respectively 'project_name' and 'user_name'
-#
-# setup_colorized_logging something.conf SOMESECTION
function setup_colorized_logging {
local conf_file=$1
- local conf_section="DEFAULT"
- local project_var="project_name"
- local user_var="user_name"
# Add color to logging output
- iniset $conf_file $conf_section logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%("$project_var")s %("$user_var")s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
- iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
+ iniset $conf_file DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
+ iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;35m%(instance)s[00m"
}
function setup_systemd_logging {
local conf_file=$1
- local conf_section="DEFAULT"
# NOTE(sdague): this is a nice to have, and means we're using the
# native systemd path, which provides for things like search on
# request-id. However, there may be an eventlet interaction here,
@@ -676,16 +681,16 @@
USE_JOURNAL=$(trueorfalse False USE_JOURNAL)
local pidstr=""
if [[ "$USE_JOURNAL" == "True" ]]; then
- iniset $conf_file $conf_section use_journal "True"
+ iniset $conf_file DEFAULT use_journal "True"
# if we are using the journal directly, our process id is already correct
else
pidstr="(pid=%(process)d) "
fi
- iniset $conf_file $conf_section logging_debug_format_suffix "[00;33m{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}[00m"
+ iniset $conf_file DEFAULT logging_debug_format_suffix "[00;33m{{${pidstr}%(funcName)s %(pathname)s:%(lineno)d}}[00m"
- iniset $conf_file $conf_section logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
- iniset $conf_file $conf_section logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
+ iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [[01;36m%(global_request_id)s %(request_id)s [00;36m%(project_name)s %(user_name)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s [01;35m%(instance)s[00m"
}
function setup_standard_logging_identity {
diff --git a/functions-common b/functions-common
index a13d611..6595c3d 100644
--- a/functions-common
+++ b/functions-common
@@ -27,7 +27,6 @@
# - ``RECLONE``
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
-# - ``TRACK_DEPENDS``
# - ``http_proxy``, ``https_proxy``, ``no_proxy``
#
@@ -44,12 +43,11 @@
declare -A -g GITBRANCH
declare -A -g GITDIR
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
KILL_PATH="$(which kill)"
# Save these variables to .stackenv
STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
- KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \
+ KEYSTONE_SERVICE_URI \
LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
HOST_IPV6 SERVICE_IP_VERSION"
@@ -131,6 +129,11 @@
--os-password $ADMIN_PASSWORD \
--os-system-scope all
+ cat >> $CLOUDS_YAML <<EOF
+functional:
+ image_name: $DEFAULT_IMAGE_NAME
+EOF
+
# CLean up any old clouds.yaml files we had laying around
rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml
}
@@ -331,9 +334,6 @@
sudo zypper -n install lsb-release
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
sudo dnf install -y redhat-lsb-core
- elif [[ -x $(command -v yum 2>/dev/null) ]]; then
- # all rh patforms (fedora, centos, rhel) have this pkg
- sudo yum install -y redhat-lsb-core
else
die $LINENO "Unable to find or auto-install lsb_release"
fi
@@ -1363,7 +1363,7 @@
if is_ubuntu; then
apt_get purge "$@"
elif is_fedora; then
- sudo ${YUM:-yum} remove -y "$@" ||:
+ sudo dnf remove -y "$@" ||:
elif is_suse; then
sudo zypper remove -y "$@" ||:
else
@@ -1371,8 +1371,11 @@
fi
}
-# Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
+# Wrapper for ``dnf`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# The name is kept for backwards compatability with external
+# callers, despite none of our supported platforms using yum
+# any more.
# yum_install package [package ...]
function yum_install {
local result parse_yum_result
@@ -1380,44 +1383,8 @@
[[ "$OFFLINE" = "True" ]] && return
time_start "yum_install"
-
- # This is a bit tricky, because yum -y assumes missing or failed
- # packages are OK (see [1]). We want devstack to stop if we are
- # installing missing packages.
- #
- # Thus we manually match on the output (stack.sh runs in a fixed
- # locale, so lang shouldn't change).
- #
- # If yum returns !0, we echo the result as "YUM_FAILED" and return
- # that from the awk (we're subverting -e with this trick).
- # Otherwise we use awk to look for failure strings and return "2"
- # to indicate a terminal failure.
- #
- # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
- parse_yum_result=' \
- BEGIN { result=0 } \
- /^YUM_FAILED/ { result=$2 } \
- /^No package/ { result=2 } \
- /^Failed:/ { result=2 } \
- //{ print } \
- END { exit result }'
- (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
- | awk "$parse_yum_result" && result=$? || result=$?
-
+ sudo_with_proxies dnf install -y "$@"
time_stop "yum_install"
-
- # if we return 1, then the wrapper functions will run an update
- # and try installing the package again as a defense against bad
- # mirrors. This can hide failures, especially when we have
- # packages that are in the "Failed:" section because their rpm
- # install scripts failed to run correctly (in this case, the
- # package looks installed, so when the retry happens we just think
- # the package is OK, and incorrectly continue on).
- if [ "$result" == 2 ]; then
- die "Detected fatal package install failure"
- fi
-
- return "$result"
}
# zypper wrapper to set arguments correctly
@@ -2075,11 +2042,7 @@
return 0
fi
- if [[ $TRACK_DEPENDS = True ]]; then
- sudo_cmd="env"
- else
- sudo_cmd="sudo"
- fi
+ sudo_cmd="sudo"
$xtrace
$sudo_cmd $@
diff --git a/inc/python b/inc/python
index 81b6a96..727d52c 100644
--- a/inc/python
+++ b/inc/python
@@ -21,6 +21,14 @@
# project. A null value installs to the system Python directories.
declare -A -g PROJECT_VENV
+# Utility Functions
+# =================
+
+# Joins bash array of extras with commas as expected by other functions
+function join_extras {
+ local IFS=","
+ echo "$*"
+}
# Python Functions
# ================
@@ -54,7 +62,7 @@
$xtrace
local PYTHON_PATH=/usr/local/bin
- ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin
+ is_suse && PYTHON_PATH=/usr/bin
echo $PYTHON_PATH
}
@@ -80,65 +88,13 @@
function pip_install_gr_extras {
local name=$1
local extras=$2
- local clean_name
- clean_name=$(get_from_global_requirements $name)
- pip_install $clean_name[$extras]
-}
-
-# python3_enabled_for() assumes the service(s) specified as arguments are
-# enabled for python 3 unless explicitly disabled. See python3_disabled_for().
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# python3_enabled_for dir [dir ...]
-function python3_enabled_for {
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
-
- local enabled=1
- local dirs=$@
- local dir
- for dir in ${dirs}; do
- if ! python3_disabled_for "${dir}"; then
- enabled=0
- fi
- done
-
- $xtrace
- return $enabled
-}
-
-# python3_disabled_for() checks if the service(s) specified as arguments are
-# disabled by the user in ``DISABLED_PYTHON3_PACKAGES``.
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# Uses global ``DISABLED_PYTHON3_PACKAGES``
-# python3_disabled_for dir [dir ...]
-function python3_disabled_for {
- local xtrace
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
-
- local enabled=1
- local dirs=$@
- local dir
- for dir in ${dirs}; do
- [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
- done
-
- $xtrace
- return $enabled
+ local version_constraints
+ version_constraints=$(get_version_constraints_from_global_requirements $name)
+ pip_install $name[$extras]$version_constraints
}
# enable_python3_package() -- no-op for backwards compatibility
#
-# For example:
-# enable_python3_package nova
-#
# enable_python3_package dir [dir ...]
function enable_python3_package {
local xtrace
@@ -150,32 +106,22 @@
$xtrace
}
-# disable_python3_package() adds the services passed as argument to
-# the ``DISABLED_PYTHON3_PACKAGES`` list.
+# disable_python3_package() -- no-op for backwards compatibility
#
-# For example:
-# disable_python3_package swift
-#
-# Uses global ``DISABLED_PYTHON3_PACKAGES``
# disable_python3_package dir [dir ...]
function disable_python3_package {
local xtrace
xtrace=$(set +o | grep xtrace)
set +o xtrace
- local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}"
- local dir
- for dir in $@; do
- disabled_svcs+=",$dir"
- done
- DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs")
+ echo "It is no longer possible to call disable_python3_package()."
$xtrace
}
# Wrapper for ``pip install`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``PIP_UPGRADE``, ``*_proxy``,
# Usage:
# pip_install pip_arguments
function pip_install {
@@ -219,64 +165,21 @@
# this works (for now...)
local package_dir=${!#%\[*\]}
- if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
- # TRACK_DEPENDS=True installation creates a circular dependency when
- # we attempt to install virtualenv into a virtualenv, so we must global
- # that installation.
- source $DEST/.venv/bin/activate
- local cmd_pip=$DEST/.venv/bin/pip
+ if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
+ local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
- local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
- local sudo_pip="env"
- else
- local cmd_pip
- cmd_pip=$(get_pip_command $PYTHON2_VERSION)
- local sudo_pip="sudo -H"
- if python3_enabled; then
- # Special case some services that have experimental
- # support for python3 in progress, but don't claim support
- # in their classifier
- echo "Check python version for : $package_dir"
- if python3_disabled_for ${package_dir##*/}; then
- echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
- else
- # For everything that is not explicitly blacklisted with
- # DISABLED_PYTHON3_PACKAGES, assume it supports python3
- # and we will let pip sort out the install, regardless of
- # the package being local or remote.
- echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior"
- sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
- cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- fi
- fi
- fi
+ local cmd_pip="python$PYTHON3_VERSION -m pip"
+ local sudo_pip="sudo -H LC_ALL=en_US.UTF-8"
+ echo "Using python $PYTHON3_VERSION to install $package_dir"
fi
cmd_pip="$cmd_pip install"
# Always apply constraints
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
- # FIXME(dhellmann): Need to force multiple versions of pip for
- # packages like setuptools?
- local pip_version
- pip_version=$(python -c "import pip; \
- print(pip.__version__.split('.')[0])")
- if (( pip_version<6 )); then
- die $LINENO "Currently installed pip version ${pip_version} does not" \
- "meet minimum requirements (>=6)."
- fi
-
$xtrace
- # Also install test requirements
- local install_test_reqs=""
- local test_req="${package_dir}/test-requirements.txt"
- if [[ -e "$test_req" ]]; then
- install_test_reqs="-r $test_req"
- fi
-
# adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
# the same behaviour of setuptools before version 25.0.0.
# related issue: https://github.com/pypa/pip/issues/3874
@@ -286,7 +189,7 @@
no_proxy="${no_proxy:-}" \
PIP_FIND_LINKS=$PIP_FIND_LINKS \
SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
- $cmd_pip $upgrade $install_test_reqs \
+ $cmd_pip $upgrade \
$@
result=$?
@@ -303,9 +206,8 @@
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- local cmd_pip
- cmd_pip=$(get_pip_command $PYTHON2_VERSION)
- local sudo_pip="sudo -H"
+ local cmd_pip="python$PYTHON3_VERSION -m pip"
+ local sudo_pip="sudo -H LC_ALL=en_US.UTF-8"
fi
# don't error if we can't uninstall, it might not be there
$sudo_pip $cmd_pip uninstall -y $name || /bin/true
@@ -323,6 +225,19 @@
echo $required_pkg
}
+# get only version constraints of a package from global requirements file
+# get_version_constraints_from_global_requirements <package>
+function get_version_constraints_from_global_requirements {
+ local package=$1
+ local required_pkg_version_constraint
+ # drop the package name from output (\K)
+ required_pkg_version_constraint=$(grep -i -h -o -P "^${package}\K.*" $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+ if [[ $required_pkg_version_constraint == "" ]]; then
+ die $LINENO "Can't find package $package in requirements"
+ fi
+ echo $required_pkg_version_constraint
+}
+
# should we use this library from their git repo, or should we let it
# get pulled in via pip dependencies.
function use_library_from_git {
@@ -371,7 +286,7 @@
#
# use this for non namespaced libraries
#
-# setup_dev_lib [-bindep] <name>
+# setup_dev_lib [-bindep] <name> [<extras>]
function setup_dev_lib {
local bindep
if [[ $1 == -bindep* ]]; then
@@ -380,7 +295,8 @@
fi
local name=$1
local dir=${GITDIR[$name]}
- setup_develop $bindep $dir
+ local extras=$2
+ setup_develop $bindep $dir $extras
}
# this should be used if you want to install globally, all libraries should
@@ -528,12 +444,15 @@
}
# Report whether python 3 should be used
+# TODO(frickler): drop this once all legacy uses are removed
function python3_enabled {
- if [[ $USE_PYTHON3 == "True" ]]; then
- return 0
- else
- return 1
- fi
+ return 1
+}
+
+# Provide requested python version and sets PYTHON variable
+function install_python {
+ install_python3
+ export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null)
}
# Install python3 packages
@@ -542,6 +461,12 @@
apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev
elif is_suse; then
install_package python3-devel python3-dbm
+ elif is_fedora; then
+ if [ "$os_VENDOR" = "Fedora" ]; then
+ install_package python${PYTHON3_VERSION//.}
+ else
+ install_package python${PYTHON3_VERSION//.} python${PYTHON3_VERSION//.}-devel
+ fi
fi
}
diff --git a/lib/apache b/lib/apache
index 84cec73..cc28200 100644
--- a/lib/apache
+++ b/lib/apache
@@ -82,26 +82,51 @@
apxs="apxs"
fi
- # Ubuntu xenial is back level on uwsgi so the proxy doesn't
- # actually work. Hence we have to build from source for now.
+ # This varies based on packaged/installed. If we've
+ # pip_installed, then the pip setup will only build a "python"
+ # module that will be either python2 or python3 depending on what
+ # it was built with.
#
- # Centos 7 actually has the module in epel, but there was a big
- # push to disable epel by default. As such, compile from source
- # there as well.
+ # For package installs, the distro ships both plugins and you need
+ # to select the right one ... it will not be autodetected.
+ UWSGI_PYTHON_PLUGIN=python3
- local dir
- dir=$(mktemp -d)
- pushd $dir
- pip_install uwsgi
- pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
- local uwsgi
- uwsgi=$(ls uwsgi*)
- tar xvf $uwsgi
- cd uwsgi*/apache2
- sudo $apxs -i -c mod_proxy_uwsgi.c
- popd
- # delete the temp directory
- sudo rm -rf $dir
+ if is_ubuntu; then
+ local pkg_list="uwsgi uwsgi-plugin-python3 libapache2-mod-proxy-uwsgi"
+ if "$DISTRO" == 'bionic'; then
+ pkg_list="${pkg_list} uwsgi-plugin-python"
+ fi
+ install_package ${pkg_list}
+ elif is_fedora; then
+ # Note httpd comes with mod_proxy_uwsgi and it is loaded by
+ # default; the mod_proxy_uwsgi package actually conflicts now.
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1574335
+ #
+ # Thus there is nothing else to do after this install
+ install_package uwsgi \
+ uwsgi-plugin-python3
+ elif [[ $os_VENDOR =~ openSUSE ]]; then
+ install_package uwsgi \
+ uwsgi-python3 \
+ apache2-mod_uwsgi
+ else
+ # Compile uwsgi from source.
+ local dir
+ dir=$(mktemp -d)
+ pushd $dir
+ pip_install uwsgi
+ pip download uwsgi -c $REQUIREMENTS_DIR/upper-constraints.txt
+ local uwsgi
+ uwsgi=$(ls uwsgi*)
+ tar xvf $uwsgi
+ cd uwsgi*/apache2
+ sudo $apxs -i -c mod_proxy_uwsgi.c
+ popd
+ # delete the temp directory
+ sudo rm -rf $dir
+ UWSGI_PYTHON_PLUGIN=python
+ fi
if is_ubuntu || is_suse ; then
# we've got to enable proxy and proxy_uwsgi for this to work
@@ -121,14 +146,10 @@
if is_ubuntu; then
# Install apache2, which is NOPRIME'd
install_package apache2
- if python3_enabled; then
- if is_package_installed libapache2-mod-wsgi; then
- uninstall_package libapache2-mod-wsgi
- fi
- install_package libapache2-mod-wsgi-py3
- else
- install_package libapache2-mod-wsgi
+ if is_package_installed libapache2-mod-wsgi; then
+ uninstall_package libapache2-mod-wsgi
fi
+ install_package libapache2-mod-wsgi-py3
elif is_fedora; then
sudo rm -f /etc/httpd/conf.d/000-*
install_package httpd mod_wsgi
@@ -265,7 +286,7 @@
# configured after graceful shutdown
iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins python
+ iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
@@ -318,7 +339,7 @@
iniset "$file" uwsgi die-on-term true
iniset "$file" uwsgi exit-on-reload false
iniset "$file" uwsgi enable-threads true
- iniset "$file" uwsgi plugins python
+ iniset "$file" uwsgi plugins http,${UWSGI_PYTHON_PLUGIN}
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
# Set hook to trigger graceful shutdown on SIGTERM
diff --git a/lib/cinder b/lib/cinder
index fd96053..b1e3d0d 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -419,7 +419,7 @@
if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
install_package tgt
elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
- if [[ ${DISTRO} == "bionic" ]]; then
+ if is_ubuntu; then
# TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
sudo mkdir -p /etc/target
@@ -492,7 +492,7 @@
start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
fi
else
- run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
+ run_process "c-api" "$(which uwsgi) --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
fi
fi
diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog
deleted file mode 100644
index 558de46..0000000
--- a/lib/cinder_plugins/sheepdog
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#
-# lib/cinder_plugins/sheepdog
-# Configure the sheepdog driver
-
-# Enable with:
-#
-# CINDER_DRIVER=sheepdog
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
- iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_SHEEPDOG
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 420a86e..d4969d7 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -109,8 +109,10 @@
sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
fi
+ # Create DB user if it does not already exist
+ sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
# Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
- sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+ sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
# Now update ``my.cnf`` for some local needs and restart the mysql service
@@ -120,8 +122,6 @@
iniset -sudo $my_conf mysqld sql_mode TRADITIONAL
iniset -sudo $my_conf mysqld default-storage-engine InnoDB
iniset -sudo $my_conf mysqld max_connections 1024
- iniset -sudo $my_conf mysqld query_cache_type OFF
- iniset -sudo $my_conf mysqld query_cache_size 0
if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
echo_summary "Enabling MySQL query logging"
@@ -175,7 +175,10 @@
# Install mysql-server
if is_oraclelinux; then
install_package mysql-community-server
- elif is_fedora || is_suse; then
+ elif is_fedora; then
+ install_package mariadb-server mariadb-devel
+ sudo systemctl enable $MYSQL_SERVICE_NAME
+ elif is_suse; then
install_package mariadb-server
sudo systemctl enable $MYSQL_SERVICE_NAME
elif is_ubuntu; then
diff --git a/lib/dstat b/lib/dstat
index fe38d75..f5bd2bb 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -9,6 +9,7 @@
# ``stack.sh`` calls the entry points in this order:
#
+# - install_dstat
# - start_dstat
# - stop_dstat
@@ -16,6 +17,14 @@
_XTRACE_DSTAT=$(set +o | grep xtrace)
set +o xtrace
+# install_dstat() - Install prerequisites for dstat services
+function install_dstat {
+ if is_service_enabled memory_tracker; then
+ # Install python libraries required by tools/mlock_report.py
+ pip_install_gr psutil
+ fi
+}
+
# start_dstat() - Start running processes
function start_dstat {
# A better kind of sysstat, with the top process per time slice
diff --git a/lib/glance b/lib/glance
index 54d3276..6d252e3 100644
--- a/lib/glance
+++ b/lib/glance
@@ -41,18 +41,35 @@
GLANCE_BIN_DIR=$(get_python_exec_prefix)
fi
+# Glance multi-store configuration
+# Boolean flag to enable multiple store configuration for glance
+GLANCE_ENABLE_MULTIPLE_STORES=$(trueorfalse False GLANCE_ENABLE_MULTIPLE_STORES)
+
+# Comma separated list for configuring multiple file stores of glance,
+# for example; GLANCE_MULTIPLE_FILE_STORES = fast,cheap,slow
+GLANCE_MULTIPLE_FILE_STORES=${GLANCE_MULTIPLE_FILE_STORES:-fast}
+
+# Default store/backend for glance, must be one of the store specified
+# in GLANCE_MULTIPLE_FILE_STORES option.
+GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
+
GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+
+# File path for each store specified in GLANCE_MULTIPLE_FILE_STORES, the store
+# identifier will be appended to this path at runtime. If GLANCE_MULTIPLE_FILE_STORES
+# has fast,cheap specified then filepath will be generated like $DATA_DIR/glance/fast
+# and $DATA_DIR/glance/cheap.
+GLANCE_MULTISTORE_FILE_IMAGE_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/glance}
GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks}
+GLANCE_STAGING_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_staging_store}
+GLANCE_TASKS_DIR=${GLANCE_MULTISTORE_FILE_IMAGE_DIR:=$DATA_DIR/os_glance_tasks_store}
GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
-GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
-GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
-GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf
@@ -69,8 +86,6 @@
GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
-GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api
GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini
# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet
@@ -98,27 +113,28 @@
function cleanup_glance {
# delete image files (glance)
sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR
+
+ # Cleanup multiple stores directories
+ if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
+ local store file_dir
+ for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+ file_dir="${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+ sudo rm -rf $file_dir
+ done
+
+ # Cleanup reserved stores directories
+ sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
+ fi
}
# configure_glance() - Set config files, create data dirs, etc
function configure_glance {
sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
- # Set non-default configuration options for registry
- iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
- iniset $GLANCE_REGISTRY_CONF DEFAULT workers $API_WORKERS
+ # Set non-default configuration options for the API server
local dburl
dburl=`database_connection_url glance`
- iniset $GLANCE_REGISTRY_CONF database connection $dburl
- iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
- iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
- configure_keystone_authtoken_middleware $GLANCE_REGISTRY_CONF glance
- iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2
- iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
- iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
- # Set non-default configuration options for the API server
iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_API_CONF database connection $dburl
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
@@ -142,9 +158,21 @@
iniset $GLANCE_API_CONF DEFAULT enable_v1_api False
fi
- # Store specific configs
- iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
- iniset $GLANCE_API_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
+ # Glance multiple store Store specific configs
+ if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
+ iniset $GLANCE_API_CONF glance_store default_backend $GLANCE_DEFAULT_BACKEND
+ local store
+ for store in $(echo $GLANCE_MULTIPLE_FILE_STORES | tr "," "\n"); do
+ iniset $GLANCE_API_CONF $store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/${store}/"
+ done
+
+ # Glance configure reserved stores
+ iniset $GLANCE_API_CONF os_glance_staging_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_staging_store/"
+ iniset $GLANCE_API_CONF os_glance_tasks_store filesystem_store_datadir "${GLANCE_MULTISTORE_FILE_IMAGE_DIR}/os_glance_tasks_store/"
+ else
+ # Store specific configs
+ iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ fi
# CORS feature support - to allow calls from Horizon by default
if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
@@ -153,29 +181,24 @@
iniset $GLANCE_API_CONF cors allowed_origin "http://$SERVICE_HOST"
fi
+ # No multiple stores for swift yet
# Store the images in swift if enabled.
if is_service_enabled s-proxy; then
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
- if python3_enabled; then
- iniset $GLANCE_API_CONF glance_store swift_store_auth_insecure True
- fi
iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+ if is_service_enabled tls-proxy; then
+ iniset $GLANCE_API_CONF glance_store swift_store_cacert $SSL_BUNDLE_FILE
+ fi
iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
- if python3_enabled; then
- # NOTE(dims): Currently the glance_store+swift does not support either an insecure flag
- # or ability to specify the CACERT. So fallback to http:// url
- iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address ${KEYSTONE_SERVICE_URI/https/http}/v3
- else
- iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
- fi
+ iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
fi
@@ -185,32 +208,22 @@
if is_service_enabled tls-proxy; then
iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
- iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
-
- iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
- iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
- fi
-
- if is_service_enabled tls-proxy; then
- iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https
+ iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_SERVICE_URI
fi
# Format logging
setup_logging $GLANCE_API_CONF
- setup_logging $GLANCE_REGISTRY_CONF
- cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
# Set non-default configuration options for the glance-cache
iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
- iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI
+ iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
- iniset $GLANCE_CACHE_CONF DEFAULT registry_host $(ipv6_unquote $GLANCE_SERVICE_HOST)
# Store specific confs
iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
@@ -220,7 +233,6 @@
iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin
iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject
- cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
@@ -301,11 +313,24 @@
# install_glance() - Collect source and prepare
function install_glance {
+ local glance_store_extras=()
+
+ if is_service_enabled cinder; then
+ glance_store_extras=("cinder" "${glance_store_extras[@]}")
+ fi
+
+ if is_service_enabled swift; then
+ glance_store_extras=("swift" "${glance_store_extras[@]}")
+ fi
+
# Install glance_store from git so we make sure we're testing
# the latest code.
if use_library_from_git "glance_store"; then
git_clone_by_name "glance_store"
- setup_dev_lib "glance_store"
+ setup_dev_lib "glance_store" $(join_extras "${glance_store_extras[@]}")
+ else
+ # we still need to pass extras
+ pip_install_gr_extras glance-store $(join_extras "${glance_store_extras[@]}")
fi
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
@@ -320,12 +345,10 @@
if [[ "$WSGI_MODE" != "uwsgi" ]]; then
start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
fi
- start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
fi
- run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
+ run_process g-api "$(which uwsgi) --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
else
run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
fi
@@ -339,7 +362,6 @@
# stop_glance() - Stop running processes
function stop_glance {
stop_process g-api
- stop_process g-reg
}
# Restore xtrace
diff --git a/lib/infra b/lib/infra
index cf003cc..b983f2b 100644
--- a/lib/infra
+++ b/lib/infra
@@ -29,7 +29,7 @@
# install_infra() - Collect source and prepare
function install_infra {
local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
- [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
+ [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
# We don't care about testing git pbr in the requirements venv.
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
diff --git a/lib/keystone b/lib/keystone
index 9ceb829..29407a0 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -83,14 +83,10 @@
# Set Keystone interface configuration
KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358}
KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
# Public facing bits
KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
-KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
-KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
# Bind hosts
@@ -115,7 +111,7 @@
KEYSTONE_AUTH_URI=$KEYSTONE_SERVICE_URI
# V3 URIs
-KEYSTONE_AUTH_URI_V3=$KEYSTONE_AUTH_URI/v3
+KEYSTONE_AUTH_URI_V3=$KEYSTONE_SERVICE_URI/v3
KEYSTONE_SERVICE_URI_V3=$KEYSTONE_SERVICE_URI/v3
# Security compliance
@@ -170,22 +166,14 @@
local keystone_ssl=""
local keystone_certfile=""
local keystone_keyfile=""
- local keystone_service_port=$KEYSTONE_SERVICE_PORT
- local keystone_auth_port=$KEYSTONE_AUTH_PORT
local venv_path=""
- if is_service_enabled tls-proxy; then
- keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
- keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
- fi
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
fi
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
- s|%PUBLICPORT%|$keystone_service_port|g;
- s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%SSLLISTEN%|$keystone_ssl_listen|g;
s|%SSLENGINE%|$keystone_ssl|g;
@@ -222,21 +210,8 @@
iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
- local service_port=$KEYSTONE_SERVICE_PORT
- local auth_port=$KEYSTONE_AUTH_PORT
-
- if is_service_enabled tls-proxy; then
- # Set the service ports for a proxy to take the originals
- service_port=$KEYSTONE_SERVICE_PORT_INT
- auth_port=$KEYSTONE_AUTH_PORT_INT
- fi
-
# Override the endpoints advertised by keystone (the public_endpoint and
- # admin_endpoint) so that clients use the correct endpoint. By default, the
- # keystone server uses the public_port and admin_port which isn't going to
- # work when you want to use a different port (in the case of proxy), or you
- # don't want the port (in the case of putting keystone on a path in
- # apache).
+ # admin_endpoint) so that clients use the correct endpoint.
iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
@@ -270,12 +245,6 @@
iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/"
- # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project.
- # The users from this project are globally admin as before, but it also
- # allows policy changes in order to clarify the adminess scope.
- #iniset $KEYSTONE_CONF resource admin_project_domain_name Default
- #iniset $KEYSTONE_CONF resource admin_project_name admin
-
if [[ "$KEYSTONE_SECURITY_COMPLIANCE_ENABLED" = True ]]; then
iniset $KEYSTONE_CONF security_compliance lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
@@ -413,6 +382,7 @@
local section=${3:-keystone_authtoken}
iniset $conf_file $section auth_type password
+ iniset $conf_file $section interface public
iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
iniset $conf_file $section username $admin_user
iniset $conf_file $section password $SERVICE_PASSWORD
@@ -421,7 +391,7 @@
iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME"
iniset $conf_file $section cafile $SSL_BUNDLE_FILE
- iniset $conf_file $section memcached_servers localhost:11211
+ iniset $conf_file $section memcached_servers $MEMCACHE_SERVERS
}
# configure_auth_token_middleware conf_file admin_user IGNORED [section]
@@ -504,46 +474,26 @@
if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
install_apache_wsgi
- elif [ "$KEYSTONE_DEPLOY" == "uwsgi" ]; then
- pip_install uwsgi
fi
}
# start_keystone() - Start running processes
function start_keystone {
- # Get right service port for testing
- local service_port=$KEYSTONE_SERVICE_PORT
- local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
- if is_service_enabled tls-proxy; then
- service_port=$KEYSTONE_SERVICE_PORT_INT
- auth_protocol="http"
- fi
-
if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
enable_apache_site keystone
restart_apache_server
else # uwsgi
- run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
+ run_process keystone "$(which uwsgi) --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
fi
echo "Waiting for keystone to start..."
- # Check that the keystone service is running. Even if the tls tunnel
- # should be enabled, make sure the internal port is checked using
- # unencryted traffic at this point.
- # If running in Apache, use the path rather than port.
-
+ # Check that the keystone service is running.
local service_uri=$auth_protocol://$KEYSTONE_SERVICE_HOST/identity/v$IDENTITY_API_VERSION/
if ! wait_for_service $SERVICE_TIMEOUT $service_uri; then
die $LINENO "keystone did not start"
fi
- # Start proxies if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
- start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
- fi
-
# (re)start memcached to make sure we have a clean memcache.
restart_service memcached
}
@@ -562,12 +512,9 @@
# This function uses the following GLOBAL variables:
# - ``KEYSTONE_BIN_DIR``
# - ``ADMIN_PASSWORD``
-# - ``IDENTITY_API_VERSION``
# - ``KEYSTONE_AUTH_URI``
# - ``REGION_NAME``
-# - ``KEYSTONE_SERVICE_PROTOCOL``
-# - ``KEYSTONE_SERVICE_HOST``
-# - ``KEYSTONE_SERVICE_PORT``
+# - ``KEYSTONE_SERVICE_URI``
function bootstrap_keystone {
$KEYSTONE_BIN_DIR/keystone-manage bootstrap \
--bootstrap-username admin \
diff --git a/lib/lvm b/lib/lvm
index d9e78a0..92265f2 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -124,13 +124,15 @@
local vg=$1
local size=$2
- # Start the lvmetad and tgtd services
- if is_fedora || is_suse; then
+ # Start the lvmetad on f30 (dropped from f31) or SUSE
+ if [[ $DISTRO =~ f30 ]] || is_suse; then
# services is not started by default
start_service lvm2-lvmetad
- if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
- start_service tgtd
- fi
+ fi
+
+ # Start the tgtd service on Fedora and SUSE if tgtadm is used
+ if is_fedora || is_suse && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+ start_service tgtd
fi
# Start with a clean volume group
diff --git a/lib/neutron b/lib/neutron
index 888b5e8..885df97 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -354,7 +354,6 @@
# if not passed $NOVA_CONF is used.
function configure_neutron_nova_new {
local conf=${1:-$NOVA_CONF}
- iniset $conf DEFAULT use_neutron True
iniset $conf neutron auth_type "password"
iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
iniset $conf neutron username neutron
@@ -365,8 +364,6 @@
iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
iniset $conf neutron region_name "$REGION_NAME"
- iniset $conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
-
# optionally set options in nova_conf
neutron_plugin_create_nova_conf $conf
@@ -466,7 +463,7 @@
done
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
- run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index dbd6e2c..59649ef 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -58,8 +58,6 @@
# Neutron Network Configuration
# -----------------------------
-deprecated "Using lib/neutron-legacy is deprecated, and it will be removed in the future"
-
if is_service_enabled tls-proxy; then
Q_PROTOCOL="https"
fi
@@ -373,9 +371,8 @@
function create_nova_conf_neutron {
local conf=${1:-$NOVA_CONF}
- iniset $conf DEFAULT use_neutron True
iniset $conf neutron auth_type "password"
- iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI"
+ iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
iniset $conf neutron username "$Q_ADMIN_USERNAME"
iniset $conf neutron password "$SERVICE_PASSWORD"
iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
@@ -384,11 +381,6 @@
iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $conf neutron region_name "$REGION_NAME"
- if [[ "$Q_USE_SECGROUP" == "True" ]]; then
- LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
- iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
- fi
-
# optionally set options in nova_conf
neutron_plugin_create_nova_conf $conf
@@ -483,7 +475,7 @@
# Start the Neutron service
if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
enable_service neutron-api
- run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
enable_service neutron-rpc-server
run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
@@ -630,7 +622,7 @@
IP_UP="sudo ip link set $to_intf up"
if [[ "$af" == "inet" ]]; then
IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
- ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP "
+ ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
fi
fi
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 1f1b0e8..bdeaf0f 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -38,7 +38,7 @@
}
function neutron_plugin_install_agent_packages {
- install_package bridge-utils
+ :
}
function neutron_plugin_configure_dhcp_agent {
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index f39c7c4..8c75e15 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -11,8 +11,6 @@
local conf="$1"
NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE
- LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
- iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
}
function neutron_plugin_install_agent_packages {
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index b65a258..1009611 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -41,8 +41,10 @@
# Setup physical network bridge mappings. Override
# ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
# complex physical network configurations.
- if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
- OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+ if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+ if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then
+ OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+ fi
# Configure bridge manually with physical interface as port for multi-node
_neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
diff --git a/lib/nova b/lib/nova
index 7557a51..c1354e7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -96,10 +96,6 @@
# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
-# Nova supports pluggable schedulers. The default ``FilterScheduler``
-# should work in most cases.
-SCHEDULER=${SCHEDULER:-filter_scheduler}
-
# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
# the default filters.
NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
@@ -259,6 +255,7 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
+ LIBVIRT_CPU_MODE=none
if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
@@ -296,13 +293,9 @@
fi
fi
- if is_fedora && [[ $DISTRO =~ f[0-9][0-9] ]]; then
- # There is an iscsi-initiator bug where it inserts
- # different whitespace that causes a bunch of output
- # matching to fail. We have not been able to get
- # fixed, yet :/ Exists in fedora 29 & 30 at least
- # https://bugzilla.redhat.com/show_bug.cgi?id=1676365
- sudo dnf copr enable -y iwienand/iscsi-initiator-utils
+ if is_fedora && [[ $DISTRO =~ f3[0-1] ]]; then
+ # For f30 and f31 use the rebased 2.1.0 version of the package.
+ sudo dnf copr enable -y lyarwood/iscsi-initiator-utils
sudo dnf update -y
fi
@@ -401,11 +394,8 @@
fi
iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI"
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
- iniset $NOVA_CONF scheduler driver "$SCHEDULER"
iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
- if [[ $SCHEDULER == "filter_scheduler" ]]; then
- iniset $NOVA_CONF scheduler workers "$API_WORKERS"
- fi
+ iniset $NOVA_CONF scheduler workers "$API_WORKERS"
iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
if [[ $SERVICE_IP_VERSION == 6 ]]; then
iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
@@ -867,7 +857,7 @@
start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
fi
else
- run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
+ run_process "n-api" "$(which uwsgi) --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
fi
@@ -958,7 +948,7 @@
if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
else
- run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
+ run_process n-api-meta "$(which uwsgi) --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
fi
export PATH=$old_path
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 3566639..d3827c3 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -24,6 +24,10 @@
# Currently fairly specific to OpenStackCI hosts
DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS)
+# Enable the Fedora Virtualization Preview Copr repo that provides the latest
+# rawhide builds of QEMU, Libvirt and other virt tools.
+ENABLE_FEDORA_VIRT_PREVIEW_REPO=$(trueorfalse False ENABLE_FEDORA_VIRT_PREVIEW_REPO)
+
# Enable coredumps for libvirt
# Bug: https://bugs.launchpad.net/nova/+bug/1643911
function _enable_coredump {
@@ -55,12 +59,21 @@
if is_ubuntu; then
install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev
+ if is_arch "aarch64"; then
+ install_package qemu-efi
+ fi
# uninstall in case the libvirt version changed
pip_uninstall libvirt-python
pip_install_gr libvirt-python
#pip_install_gr <there-si-no-guestfs-in-pypi>
elif is_fedora || is_suse; then
+ # Optionally enable the virt-preview repo when on Fedora
+ if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then
+ # https://copr.fedorainfracloud.org/coprs/g/virtmaint-sig/virt-preview/
+ sudo dnf copr enable -y @virtmaint-sig/virt-preview
+ fi
+
# Note that in CentOS/RHEL this needs to come from the RDO
# repositories (qemu-kvm-ev ... which provides this package)
# as the base system version is too old. We should have
@@ -68,6 +81,10 @@
install_package qemu-kvm
install_package libvirt libvirt-devel
+ if is_arch "aarch64"; then
+ install_package edk2.git-aarch64
+ fi
+
pip_uninstall libvirt-python
pip_install_gr libvirt-python
fi
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index adcc278..bda6ef6 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -39,16 +39,14 @@
if ! is_ironic_hardware; then
configure_libvirt
fi
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
- iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
# ironic section
iniset $NOVA_CONF ironic auth_type password
iniset $NOVA_CONF ironic username admin
iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
- iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI
+ iniset $NOVA_CONF ironic auth_url $KEYSTONE_SERVICE_URI
iniset $NOVA_CONF ironic project_domain_id default
iniset $NOVA_CONF ironic user_domain_id default
iniset $NOVA_CONF ironic project_name demo
@@ -70,13 +68,6 @@
return
fi
install_libvirt
- if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] && is_ubuntu; then
- # Ubuntu packaging+apparmor issue prevents libvirt from loading
- # the ROM from /usr/share/misc. Workaround by installing it directly
- # to a directory that it can read from. (LP: #1393548)
- sudo rm -rf /usr/share/qemu/sgabios.bin
- sudo cp /usr/share/misc/sgabios.bin /usr/share/qemu/sgabios.bin
- fi
}
# start_nova_hypervisor - Start any required external services
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 3d676b9..b25bc0c 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -39,14 +39,12 @@
function configure_nova_hypervisor {
configure_libvirt
iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
- iniset $NOVA_CONF libvirt cpu_mode "none"
+ iniset $NOVA_CONF libvirt cpu_mode "$LIBVIRT_CPU_MODE"
# Do not enable USB tablet input devices to avoid QEMU CPU overhead.
iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
# Power architecture currently does not support graphical consoles.
if is_arch "ppc64"; then
iniset $NOVA_CONF vnc enabled "false"
@@ -54,8 +52,6 @@
# arm64-specific configuration
if is_arch "aarch64"; then
- # arm64 architecture currently does not support graphical consoles.
- iniset $NOVA_CONF vnc enabled "false"
iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
fi
@@ -119,7 +115,7 @@
# Workaround for missing dependencies in python-libguestfs
install_package python-libguestfs guestfs-data augeas augeas-lenses
elif is_fedora; then
- install_package python-libguestfs
+ install_package python3-libguestfs
fi
fi
}
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index 58ab5c1..57dc45c 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -38,8 +38,6 @@
function configure_nova_hypervisor {
iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
iniset $NOVA_CONF DEFAULT connection_type "openvz"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
}
# install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index ccab18d..511ec1b 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -61,9 +61,6 @@
iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER"
iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD"
iniset $NOVA_CONF DEFAULT flat_injected "False"
- # Need to avoid crash due to new firewall support
- XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
- iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
local dom0_ip
dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
diff --git a/lib/placement b/lib/placement
index 785b0dd..2a449bf 100644
--- a/lib/placement
+++ b/lib/placement
@@ -144,7 +144,7 @@
# start_placement_api() - Start the API processes ahead of other things
function start_placement_api {
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
+ run_process "placement-api" "$(which uwsgi) --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
else
enable_apache_site placement-api
restart_apache_server
diff --git a/lib/swift b/lib/swift
index 5be9e35..a981dfc 100644
--- a/lib/swift
+++ b/lib/swift
@@ -428,10 +428,13 @@
swift_pipeline+=" s3api"
fi
if is_service_enabled keystone; then
+ swift_pipeline+=" authtoken"
if is_service_enabled s3api;then
swift_pipeline+=" s3token"
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token auth_uri ${KEYSTONE_AUTH_URI_V3}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:s3token delay_auth_decision true
fi
- swift_pipeline+=" authtoken keystoneauth"
+ swift_pipeline+=" keystoneauth"
fi
swift_pipeline+=" tempauth "
@@ -524,7 +527,7 @@
else
iniset ${testfile} func_test auth_port 80
fi
- iniset ${testfile} func_test auth_uri ${KEYSTONE_AUTH_URI}
+ iniset ${testfile} func_test auth_uri ${KEYSTONE_SERVICE_URI}
if [[ "$auth_vers" == "3" ]]; then
iniset ${testfile} func_test auth_prefix /identity/v3/
else
diff --git a/lib/tempest b/lib/tempest
index 2208470..05fcb1f 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -107,7 +107,7 @@
function image_size_in_gib {
local size
size=$(openstack image show $1 -c size -f value)
- echo $size | python -c "import math; import six; print(int(math.ceil(float(int(six.moves.input()) / 1024.0 ** 3))))"
+ echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
}
# configure_tempest() - Set config files, create data dirs, etc
@@ -203,13 +203,13 @@
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
# Determine the flavor disk size based on the image size.
disk=$(image_size_in_gib $image_uuid)
- openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
+ openstack flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
fi
flavor_ref=42
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
# Determine the alt flavor disk size based on the alt image size.
disk=$(image_size_in_gib $image_uuid_alt)
- openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
+ openstack flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
fi
flavor_ref_alt=84
else
@@ -472,6 +472,11 @@
TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
fi
iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
+ # Only turn on TEMPEST_VOLUME_REVERT_TO_SNAPSHOT by default for "lvm" backends
+ if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]]; then
+ TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
+ fi
+ iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
# Reset microversions to None where v2 is running which does not support microversion.
@@ -570,17 +575,22 @@
iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
else
+ iniset $TEMPEST_CONFIG compute-feature-enabled stable_rescue True
iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True
fi
fi
# ``service_available``
#
- # this tempest service list needs to be all the services that
- # tempest supports, otherwise we can have an erroneous set of
+ # this tempest service list needs to be the services that
+ # tempest own, otherwise we can have an erroneous set of
# defaults (something defaulting true in Tempest, but not listed here).
+ # services tested by tempest plugins needs to be set on service devstack
+ # plugin side as devstack cannot keep track of all the tempest plugins
+ # services. Refer Bug#1743688 for more details.
+ # 'horizon' is also kept here as no devtack plugin for horizon.
local service
- local tempest_services="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove"
+ local tempest_services="key,glance,nova,neutron,cinder,swift,horizon"
for service in ${tempest_services//,/ }; do
if is_service_enabled $service ; then
iniset $TEMPEST_CONFIG service_available $service "True"
@@ -617,9 +627,9 @@
iniset $TEMPEST_CONFIG auth tempest_roles "member"
if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
- tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
+ tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
else
- tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
+ tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
fi
iniset $TEMPEST_CONFIG auth use_dynamic_credentials False
iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
@@ -686,6 +696,11 @@
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
pip_install 'tox!=2.8.0'
pushd $TEMPEST_DIR
+ # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
+ # is tag name not master. git_clone would not checkout tag because
+ # TEMPEST_DIR already exist until RECLONE is true.
+ git checkout $TEMPEST_BRANCH
+
tox -r --notest -efull
# NOTE(mtreinish) Respect constraints in the tempest full venv, things that
# are using a tox job other than full will not be respecting constraints but
diff --git a/lib/tls b/lib/tls
index 65ffeb9..baafb59 100644
--- a/lib/tls
+++ b/lib/tls
@@ -227,13 +227,7 @@
function init_cert {
if [[ ! -r $DEVSTACK_CERT ]]; then
if [[ -n "$TLS_IP" ]]; then
- if python3_enabled; then
- TLS_IP="IP:$TLS_IP"
- else
- # Lie to let incomplete match routines work with python2
- # see https://bugs.python.org/issue23239
- TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
- fi
+ TLS_IP="IP:$TLS_IP"
if [[ -n "$HOST_IPV6" ]]; then
TLS_IP="$TLS_IP,IP:$HOST_IPV6"
fi
@@ -369,8 +363,7 @@
function fix_system_ca_bundle_path {
if is_service_enabled tls-proxy; then
local capath
- local python_cmd=${1:-python}
- capath=$($python_cmd -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+ capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
if is_fedora; then
diff --git a/openrc b/openrc
index 99d3351..28f388b 100644
--- a/openrc
+++ b/openrc
@@ -86,10 +86,10 @@
#
# If you don't have a working .stackenv, this is the backup position
-KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
-KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP}
+KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST/identity
+KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP}
-export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_AUTH_URI}
+export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI}
# Currently, in order to use openstackclient with Identity API v3,
# we need to set the domain which the user and project belong to.
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
index 60f365a..ff97a1f 100644
--- a/playbooks/pre.yaml
+++ b/playbooks/pre.yaml
@@ -26,7 +26,6 @@
set_fact:
external_bridge_mtu: "{{ local_mtu | int - 50 }}"
roles:
- - test-matrix
- configure-swap
- setup-stack-user
- setup-tempest-user
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index cbec444..ef839ed 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -14,7 +14,7 @@
name=""
for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do
name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
- journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz
+ journalctl -o short-precise --unit $u > {{ stage_dir }}/logs/$name.txt
done
- name: Export legacy syslog.txt
@@ -29,7 +29,7 @@
-t sudo \
--no-pager \
--since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
- | gzip - > {{ stage_dir }}/logs/syslog.txt.gz
+ > {{ stage_dir }}/logs/syslog.txt
# TODO: convert this to ansible
# - make a list of the above units
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
index 598eb7f..fe36653 100644
--- a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -10,7 +10,7 @@
$ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal
Note this binary is not in the regular path. On Debian/Ubuntu
-platforms, you will need to have the "sytemd-journal-remote" package
+platforms, you will need to have the "systemd-journal-remote" package
installed.
It should result in something like:
diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml
index fea05c8..77a74d7 100644
--- a/roles/setup-devstack-source-dirs/defaults/main.yaml
+++ b/roles/setup-devstack-source-dirs/defaults/main.yaml
@@ -1 +1,9 @@
devstack_base_dir: /opt/stack
+devstack_source_dirs:
+ - src/opendev.org/opendev
+ - src/opendev.org/openstack
+ - src/opendev.org/openstack-dev
+ - src/opendev.org/openstack-infra
+ - src/opendev.org/starlingx
+ - src/opendev.org/x
+ - src/opendev.org/zuul
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index 160757e..294c29c 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -1,13 +1,6 @@
- name: Find all OpenStack source repos used by this job
find:
- paths:
- - src/opendev.org/opendev
- - src/opendev.org/openstack
- - src/opendev.org/openstack-dev
- - src/opendev.org/openstack-infra
- - src/opendev.org/starlingx
- - src/opendev.org/x
- - src/opendev.org/zuul
+ paths: "{{ devstack_source_dirs }}"
file_type: directory
register: found_repos
diff --git a/setup.cfg b/setup.cfg
index 4e27ad8..146f010 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,6 +10,3 @@
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
-
-[wheel]
-universal = 1
diff --git a/stack.sh b/stack.sh
index d0206eb..37e7518 100755
--- a/stack.sh
+++ b/stack.sh
@@ -221,7 +221,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (bionic|focal|f30|f31|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel8) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -283,74 +283,20 @@
# to pick up required packages.
function _install_epel {
- # NOTE: We always remove and install latest -- some environments
- # use snapshot images, and if EPEL version updates they break
- # unless we update them to latest version.
- if sudo yum repolist enabled epel | grep -q 'epel'; then
- uninstall_package epel-release || true
- fi
+ # epel-release is in extras repo which is enabled by default
+ install_package epel-release
- # This trick installs the latest epel-release from a bootstrap
- # repo, then removes itself (as epel-release installed the
- # "real" repo).
- #
- # You would think that rather than this, you could use
- # $releasever directly in .repo file we create below. However
- # RHEL gives a $releasever of "6Server" which breaks the path;
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
- cat <<EOF | sudo tee /etc/yum.repos.d/epel-bootstrap.repo
-[epel-bootstrap]
-name=Bootstrap EPEL
-mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=\$basearch
-failovermethod=priority
-enabled=0
-gpgcheck=0
-EOF
- # Enable a bootstrap repo. It is removed after finishing
- # the epel-release installation.
- is_package_installed yum-utils || install_package yum-utils
- sudo yum-config-manager --enable epel-bootstrap
- yum_install epel-release || \
- die $LINENO "Error installing EPEL repo, cannot continue"
- sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
+ # RDO repos are not tested with epel and may have incompatibilities so
+ # let's limit the packages fetched from epel to the ones not in RDO repos.
+ sudo dnf config-manager --save --setopt=includepkgs=debootstrap,dpkg epel
}
function _install_rdo {
- # There are multiple options for this, including using CloudSIG
- # repositories (centos-release-*), trunk versions, etc. Since
- # we're not interested in the actual openstack distributions
- # (since we're using git to run!) but only peripherial packages
- # like kvm or ovs, this has been reliable.
-
- # TODO(ianw): figure out how to best mirror -- probably use infra
- # mirror RDO reverse proxy. We could either have test
- # infrastructure set it up disabled like EPEL, or fiddle it here.
- # Per the point above, it's a bunch of repos so starts getting a
- # little messy...
- if ! is_package_installed rdo-release ; then
- if [[ "$TARGET_BRANCH" == "master" ]]; then
- yum_install https://rdoproject.org/repos/rdo-release.rpm
- else
- # Get latest rdo-release-$rdo_release RPM package version
- rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
- yum_install https://rdoproject.org/repos/openstack-$rdo_release/rdo-release-$rdo_release.rpm
- fi
- fi
-
- # Also enable optional for RHEL7 proper. Note this is a silent
- # no-op on other platforms.
- sudo yum-config-manager --enable rhel-7-server-optional-rpms
-
- # Enable the Software Collections (SCL) repository for CentOS.
- # This repository includes useful software (e.g. the Go Toolset)
- # which is not present in the main repository.
- if [[ "$os_VENDOR" =~ (CentOS) ]]; then
- yum_install centos-release-scl
- fi
-
- if is_oraclelinux; then
- sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
- fi
+ # NOTE(ianw) 2020-04-30 : when we have future branches, we
+ # probably want to install the relevant branch RDO release as
+ # well. But for now it's all master.
+ sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+ sudo dnf -y update
}
@@ -395,15 +341,19 @@
# to speed things up
SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
-if [[ $DISTRO == "rhel7" ]]; then
+if [[ $DISTRO == "rhel8" ]]; then
# If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI
# node, where EPEL is installed (but disabled) and already
# pointing at our internal mirror
if [[ -f /etc/ci/mirror_info.sh ]]; then
SKIP_EPEL_INSTALL=True
- sudo yum-config-manager --enable epel
+ sudo dnf config-manager --set-enabled epel
fi
+ # PowerTools repo provides libyaml-devel required by devstack itself and
+ # EPEL packages assume that the PowerTools repository is enable.
+ sudo dnf config-manager --set-enabled PowerTools
+
if [[ ${SKIP_EPEL_INSTALL} != True ]]; then
_install_epel
fi
@@ -411,11 +361,17 @@
# available in RDO repositories (e.g. OVS, or later versions of
# kvm) to run.
_install_rdo
+
+ # NOTE(cgoncalves): workaround RHBZ#1154272
+ # dnf fails for non-privileged users when expired_repos.json doesn't exist.
+ # RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=1154272
+ # Patch: https://github.com/rpm-software-management/dnf/pull/1448
+ echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
fi
# Ensure python is installed
# --------------------------
-is_package_installed python || install_package python
+install_python
# Configure Logging
@@ -494,14 +450,14 @@
_of_args="$_of_args --no-timestamp"
fi
# Set fd 1 and 2 to write the log file
- exec 1> >( $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
+ exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
# Set fd 6 to summary log file
- exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
else
# Set fd 1 and 2 to primary logfile
- exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
+ exec 1> >( $PYTHON $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
# Set fd 6 to summary logfile and stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
fi
echo_summary "stack.sh log $LOGFILE"
@@ -518,7 +474,7 @@
exec 1>/dev/null 2>&1
fi
# Always send summary fd to original stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
+ exec 6> >( $PYTHON $TOP_DIR/tools/outfilter.py -v >&3 )
fi
# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
@@ -554,9 +510,9 @@
generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
fi
if [[ -z $LOGDIR ]]; then
- $TOP_DIR/tools/worlddump.py
+ ${PYTHON} $TOP_DIR/tools/worlddump.py
else
- $TOP_DIR/tools/worlddump.py -d $LOGDIR
+ ${PYTHON} $TOP_DIR/tools/worlddump.py -d $LOGDIR
fi
else
# If we error before we've installed os-testr, this will fail.
@@ -796,19 +752,6 @@
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
fi
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
-
-# Install Python packages into a virtualenv so that we can track them
-if [[ $TRACK_DEPENDS = True ]]; then
- echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
- pip_install -U virtualenv
-
- rm -rf $DEST/.venv
- virtualenv --system-site-packages $DEST/.venv
- source $DEST/.venv/bin/activate
- $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
-fi
-
# Do the ugly hacks for broken packages and distros
source $TOP_DIR/tools/fixup_stuff.sh
fixup_all
@@ -816,13 +759,11 @@
# Install subunit for the subunit output stream
pip_install -U os-testr
-if [[ "$USE_SYSTEMD" == "True" ]]; then
- pip_install_gr systemd-python
- # the default rate limit of 1000 messages / 30 seconds is not
- # sufficient given how verbose our logging is.
- iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
- sudo systemctl restart systemd-journald
-fi
+pip_install_gr systemd-python
+# the default rate limit of 1000 messages / 30 seconds is not
+# sufficient given how verbose our logging is.
+iniset -sudo /etc/systemd/journald.conf "Journal" "RateLimitBurst" "0"
+sudo systemctl restart systemd-journald
# Virtual Environment
# -------------------
@@ -880,6 +821,13 @@
init_cert
fi
+# Dstat
+# -----
+
+# Install dstat services prerequisites
+install_dstat
+
+
# Check Out and Install Source
# ----------------------------
@@ -974,9 +922,6 @@
if is_service_enabled tls-proxy; then
fix_system_ca_bundle_path
- if python3_enabled ; then
- fix_system_ca_bundle_path python3
- fi
fi
# Extras Install
@@ -997,17 +942,6 @@
# osc commands. Alias dies with stack.sh.
install_oscwrap
-if [[ $TRACK_DEPENDS = True ]]; then
- $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
- if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
- echo "Detect some changes for installed packages of pip, in depend tracking mode"
- cat $DEST/requires.diff
- fi
- echo "Ran stack.sh in depend tracking mode, bailing out now"
- exit 0
-fi
-
-
# Syslog
# ------
@@ -1119,7 +1053,7 @@
# Set up password auth credentials now that Keystone is bootstrapped
export OS_IDENTITY_API_VERSION=3
-export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_AUTH_URL=$KEYSTONE_SERVICE_URI
export OS_USERNAME=admin
export OS_USER_DOMAIN_ID=default
export OS_PASSWORD=$ADMIN_PASSWORD
@@ -1179,7 +1113,8 @@
# Glance
# ------
-if is_service_enabled g-reg; then
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
echo_summary "Configuring Glance"
init_glance
fi
@@ -1304,8 +1239,8 @@
# scripts as userdata.
# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
-if is_service_enabled g-reg; then
-
+# NOTE(yoctozepto): limited to node hosting the database which is the controller
+if is_service_enabled $DATABASE_BACKENDS && is_service_enabled glance; then
echo_summary "Uploading images"
for image_url in ${IMAGE_URLS//,/ }; do
@@ -1545,14 +1480,11 @@
echo
fi
-# If USE_SYSTEMD is enabled, tell the user about using it.
-if [[ "$USE_SYSTEMD" == "True" ]]; then
- echo
- echo "Services are running under systemd unit files."
- echo "For more information see: "
- echo "https://docs.openstack.org/devstack/latest/systemd.html"
- echo
-fi
+echo
+echo "Services are running under systemd unit files."
+echo "For more information see: "
+echo "https://docs.openstack.org/devstack/latest/systemd.html"
+echo
# Useful info on current state
cat /etc/devstack-version
diff --git a/stackrc b/stackrc
index 2d3a599..4ffd537 100644
--- a/stackrc
+++ b/stackrc
@@ -69,7 +69,7 @@
# Placement service needed for Nova
ENABLED_SERVICES+=,placement-api,placement-client
# Glance services needed for Nova
- ENABLED_SERVICES+=,g-api,g-reg
+ ENABLED_SERVICES+=,g-api
# Cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol
# Neutron
@@ -109,9 +109,7 @@
# Set the root URL for Horizon
HORIZON_APACHE_ROOT="/dashboard"
-# Whether to use SYSTEMD to manage services, we only do this from
-# Queens forward.
-USE_SYSTEMD="True"
+# Whether to use user specific units for running services or global ones.
USER_UNITS=$(trueorfalse False USER_UNITS)
if [[ "$USER_UNITS" == "True" ]]; then
SYSTEMD_DIR="$HOME/.local/share/systemd/user"
@@ -136,28 +134,17 @@
fi
# Control whether Python 3 should be used at all.
-export USE_PYTHON3=$(trueorfalse True USE_PYTHON3)
+# TODO(frickler): Drop this when all consumers are fixed
+export USE_PYTHON3=True
-# Explicitly list services not to run under Python 3. See
-# disable_python3_package to edit this variable.
-export DISABLED_PYTHON3_PACKAGES=""
-
-# When Python 3 is supported by an application, adding the specific
-# version of Python 3 to this variable will install the app using that
-# version of the interpreter instead of 2.7.
+# Adding the specific version of Python 3 to this variable will install
+# the app using that version of the interpreter instead of just 3.
_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
-export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
-
-# Just to be more explicit on the Python 2 version to use.
-_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
-export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
+export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3}}
# Create a virtualenv with this
-if [[ ${USE_PYTHON3} == True ]]; then
- export VIRTUALENV_CMD="virtualenv -p python3"
-else
- export VIRTUALENV_CMD="virtualenv "
-fi
+# Use the built-in venv to avoid more dependencies
+export VIRTUALENV_CMD="python3 -m venv"
# Default for log coloring is based on interactive-or-not.
# Baseline assumption is that non-interactive invocations are for CI,
@@ -258,7 +245,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="ussuri"
+DEVSTACK_SERIES="victoria"
##############
#
@@ -286,10 +273,6 @@
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH}
-# neutron fwaas service
-NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
-NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH}
-
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH}
@@ -603,7 +586,7 @@
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.1.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -625,6 +608,7 @@
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
+ LIBVIRT_CPU_MODE=${LIBVIRT_CPU_MODE:-none}
if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
@@ -678,7 +662,7 @@
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
-CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -815,15 +799,6 @@
# Service graceful shutdown timeout
WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
-# Choose DNF on RedHat/Fedora platforms with it, or otherwise default
-# to YUM. Can remove this when only dnf is supported (i.e. centos7
-# disappears)
-if [[ -e /usr/bin/dnf ]]; then
- YUM=${YUM:-dnf}
-else
- YUM=${YUM:-yum}
-fi
-
# Common Configuration
# --------------------
diff --git a/tests/test_python.sh b/tests/test_python.sh
deleted file mode 100755
index 1f5453c..0000000
--- a/tests/test_python.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-# Tests for DevStack INI functions
-
-TOP=$(cd $(dirname "$0")/.. && pwd)
-
-source $TOP/functions-common
-source $TOP/inc/python
-
-source $TOP/tests/unittest.sh
-
-echo "Testing Python 3 functions"
-
-# Initialize variables manipulated by functions under test.
-export DISABLED_PYTHON3_PACKAGES=""
-
-assert_true "should be enabled by default" python3_enabled_for testpackage1
-
-assert_false "should not be disabled yet" python3_disabled_for testpackage2
-
-disable_python3_package testpackage2
-assert_equal "$DISABLED_PYTHON3_PACKAGES" "testpackage2" "unexpected result"
-assert_true "should be disabled" python3_disabled_for testpackage2
-
-report_results
diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh
index f407d40..9196525 100755
--- a/tests/test_worlddump.sh
+++ b/tests/test_worlddump.sh
@@ -8,7 +8,7 @@
OUT_DIR=$(mktemp -d)
-$TOP/tools/worlddump.py -d $OUT_DIR
+${PYTHON} $TOP/tools/worlddump.py -d $OUT_DIR
if [[ $? -ne 0 ]]; then
fail "worlddump failed"
diff --git a/tools/cap-pip.txt b/tools/cap-pip.txt
deleted file mode 100644
index f5278d7..0000000
--- a/tools/cap-pip.txt
+++ /dev/null
@@ -1 +0,0 @@
-pip!=8,<10
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index f4a4edc..c7bea4a 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -152,7 +152,7 @@
fi
if [ -z "$OS_AUTH_URL" ]; then
- export OS_AUTH_URL=http://localhost:5000/v3/
+ export OS_AUTH_URL=http://localhost/identity/v3/
fi
if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index eb8a76f..2ac8a47 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -5,16 +5,6 @@
# fixup_stuff.sh
#
# All distro and package specific hacks go in here
-#
-# - prettytable 0.7.2 permissions are 600 in the package and
-# pip 1.4 doesn't fix it (1.3 did)
-#
-# - httplib2 0.8 permissions are 600 in the package and
-# pip 1.4 doesn't fix it (1.3 did)
-#
-# - Fedora:
-# - set selinux not enforcing
-# - uninstall firewalld (f20 only)
# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
@@ -36,39 +26,6 @@
FILES=$TOP_DIR/files
fi
-# Keystone Port Reservation
-# -------------------------
-# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
-# being used as ephemeral ports by the system. The default(s) are 35357 and
-# 35358 which are in the Linux defined ephemeral port range (in disagreement
-# with the IANA ephemeral port range). This is a workaround for bug #1253482
-# where Keystone will try and bind to the port and the port will already be
-# in use as an ephemeral port by another process. This places an explicit
-# exception into the Kernel for the Keystone AUTH ports.
-function fixup_keystone {
- keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-
- # Only do the reserved ports when available, on some system (like containers)
- # where it's not exposed we are almost pretty sure these ports would be
- # exclusive for our DevStack.
- if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
- # Get any currently reserved ports, strip off leading whitespace
- reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
-
- if [[ -z "${reserved_ports}" ]]; then
- # If there are no currently reserved ports, reserve the keystone ports
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
- else
- # If there are currently reserved ports, keep those and also reserve the
- # Keystone specific ports. Duplicate reservations are merged into a single
- # reservation (or range) automatically by the kernel.
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
- fi
- else
- echo_summary "WARNING: unable to reserve keystone ports"
- fi
-}
-
# Ubuntu Repositories
#--------------------
# Enable universe for bionic since it is missing when installing from ISO.
@@ -82,6 +39,16 @@
# Enable universe
sudo add-apt-repository -y universe
+
+ # Since pip10, pip will refuse to uninstall files from packages
+ # that were created with distutils (rather than more modern
+ # setuptools). This is because it technically doesn't have a
+ # manifest of what to remove. However, in most cases, simply
+ # overwriting works. So this hacks around those packages that
+ # have been dragged in by some other system dependency
+ sudo rm -rf /usr/lib/python3/dist-packages/httplib2-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/pyasn1_modules-*.egg-info
+ sudo rm -rf /usr/lib/python3/dist-packages/PyYAML-*.egg-info
}
# Python Packages
@@ -93,32 +60,6 @@
echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])")
}
-
-# Pre-install affected packages so we can fix the permissions
-# These can go away once we are confident that pip 1.4.1+ is available everywhere
-
-function fixup_python_packages {
- # Fix prettytable 0.7.2 permissions
- # Don't specify --upgrade so we use the existing package if present
- pip_install 'prettytable>=0.7'
- PACKAGE_DIR=$(get_package_path prettytable)
- # Only fix version 0.7.2
- dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
- if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
- fi
-
- # Fix httplib2 0.8 permissions
- # Don't specify --upgrade so we use the existing package if present
- pip_install httplib2
- PACKAGE_DIR=$(get_package_path httplib2)
- # Only fix version 0.8
- dir=$(echo $PACKAGE_DIR-0.8*)
- if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
- fi
-}
-
function fixup_fedora {
if ! is_fedora; then
return
@@ -226,42 +167,15 @@
# have been dragged in by some other system dependency
sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info
-}
-# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
-# connection issues under proxy so re-install the latest version using
-# pip. To avoid having pip's virtualenv overwritten by the distro's
-# package (e.g. due to installing a distro package with a dependency
-# on python-virtualenv), first install the distro python-virtualenv
-# to satisfy any dependencies then use pip to overwrite it.
-
-# ... but, for infra builds, the pip-and-virtualenv [1] element has
-# already done this to ensure the latest pip, virtualenv and
-# setuptools on the base image for all platforms. It has also added
-# the packages to the yum/dnf ignore list to prevent them being
-# overwritten with old versions. F26 and dnf 2.0 has changed
-# behaviour that means re-installing python-virtualenv fails [2].
-# Thus we do a quick check if we're in the infra environment by
-# looking for the mirror config script before doing this, and just
-# skip it if so.
-
-# [1] https://opendev.org/openstack/diskimage-builder/src/branch/master/ \
-# diskimage_builder/elements/pip-and-virtualenv/ \
-# install.d/pip-and-virtualenv-source-install/04-install-pip
-# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
-
-function fixup_virtualenv {
- if [[ ! -f /etc/ci/mirror_info.sh ]]; then
- install_package python-virtualenv
- pip_install -U --force-reinstall virtualenv
- fi
+ # Ensure trusted CA certificates are up to date
+ # See https://bugzilla.suse.com/show_bug.cgi?id=1154871
+ # May be removed once a new opensuse-15 image is available in nodepool
+ sudo zypper up -y p11-kit ca-certificates-mozilla
}
function fixup_all {
- fixup_keystone
fixup_ubuntu
- fixup_python_packages
fixup_fedora
fixup_suse
- fixup_virtualenv
}
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index d39b801..1cacd06 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
@@ -45,9 +45,14 @@
def is_in_wanted_namespace(proj):
# only interested in openstack or x namespace (e.g. not retired
- # stackforge, etc)
+ # stackforge, etc).
+ #
+ # openstack/openstack "super-repo" of openstack projects as
+ # submodules, that can cause gitea to 500 timeout and thus stop
+ # this script. Skip it.
if proj.startswith('stackforge/') or \
- proj.startswith('stackforge-attic/'):
+ proj.startswith('stackforge-attic/') or \
+ proj == "openstack/openstack":
return False
else:
return True
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 2b6aa4c..f3fd1e2 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -5,7 +5,7 @@
# Update pip and friends to a known common version
# Assumptions:
-# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed
+# - PYTHON3_VERSION refers to a version already installed
set -o errexit
@@ -53,6 +53,8 @@
else
echo "pip: Not Installed"
fi
+ # Show python3 module version
+ python${PYTHON3_VERSION} -m pip --version
}
@@ -89,10 +91,7 @@
die $LINENO "Download of get-pip.py failed"
touch $LOCAL_PIP.downloaded
fi
- sudo -H -E python $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
- if python3_enabled; then
- sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP -c $TOOLS_DIR/cap-pip.txt
- fi
+ sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
}
@@ -125,7 +124,14 @@
# Show starting versions
get_versions
-# Do pip
+if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
+ configure_pypi_alternative_url
+fi
+
+# Just use system pkgs on Focal
+if [[ "$DISTRO" == focal ]]; then
+ exit 0
+fi
# Eradicate any and all system packages
@@ -133,16 +139,13 @@
# results in a nonfunctional system. pip on fedora installs to /usr so pip
# can safely override the system pip for all versions of fedora
if ! is_fedora && ! is_suse; then
- uninstall_package python-pip
- uninstall_package python3-pip
+ if is_package_installed python3-pip ; then
+ uninstall_package python3-pip
+ fi
fi
install_get_pip
-if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
- configure_pypi_alternative_url
-fi
-
set -x
# Note setuptools is part of requirements.txt and we want to make sure
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index da59093..a7c03d2 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -81,12 +81,6 @@
fi
fi
-if python3_enabled; then
- install_python3
- export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
-else
- export PYTHON=$(which python 2>/dev/null)
-fi
# Mark end of run
# ---------------
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index 63f25ca..6c36534 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -14,7 +14,7 @@
set -o errexit
-PYTHON=${PYTHON:-python}
+PYTHON=${PYTHON:-python3}
# time to sleep between checks
SLEEP_TIME=20
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
old mode 100755
new mode 100644
index 07716b0..b15a0bf
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# This tool lists processes that lock memory pages from swapping to disk.
import re
diff --git a/tools/outfilter.py b/tools/outfilter.py
old mode 100755
new mode 100644
index cf09124..e910f79
--- a/tools/outfilter.py
+++ b/tools/outfilter.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-#
+#!/usr/bin/env python3
+
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 9187c66..7be995e 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
diff --git a/tools/worlddump.py b/tools/worlddump.py
index d5ff5d1..6a618f5 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
@@ -17,14 +17,12 @@
"""Dump the state of the world for post mortem."""
-from __future__ import print_function
-
import argparse
import datetime
from distutils import spawn
import fnmatch
+import io
import os
-import os.path
import shutil
import subprocess
import sys
@@ -109,9 +107,10 @@
# This method gets max version searching 'OpenFlow versions 0x1:0x'.
# And return a version value converted to an integer type.
def _get_ofp_version():
- process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE)
+ process = subprocess.Popen(['ovs-ofctl', '--version'],
+ stdout=subprocess.PIPE)
stdout, _ = process.communicate()
- find_str = 'OpenFlow versions 0x1:0x'
+ find_str = b'OpenFlow versions 0x1:0x'
offset = stdout.find(find_str)
return int(stdout[offset + len(find_str):-1]) - 1
@@ -165,15 +164,13 @@
_header("Network Dump")
_dump_cmd("bridge link")
- if _find_cmd("brctl"):
- _dump_cmd("brctl show")
_dump_cmd("ip link show type bridge")
ip_cmds = ["neigh", "addr", "link", "route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
for netns_ in _netns_list():
for cmd in ip_cmds:
- args = {'netns': netns_, 'cmd': cmd}
+ args = {'netns': bytes.decode(netns_), 'cmd': cmd}
_dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
@@ -194,7 +191,7 @@
_dump_cmd("sudo ovs-vsctl show")
for ofctl_cmd in ofctl_cmds:
for bridge in bridges:
- args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge}
+ args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bytes.decode(bridge)}
_dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args)
@@ -206,7 +203,7 @@
def compute_consoles():
_header("Compute consoles")
- for root, dirnames, filenames in os.walk('/opt/stack'):
+ for root, _, filenames in os.walk('/opt/stack'):
for filename in fnmatch.filter(filenames, 'console.log'):
fullpath = os.path.join(root, filename)
_dump_cmd("sudo cat %s" % fullpath)
@@ -234,12 +231,22 @@
# tools out there that can do that sort of thing though.
_dump_cmd("ls -ltrah /var/core")
+
+def disable_stdio_buffering():
+ # re-open STDOUT as binary, then wrap it in a
+ # TextIOWrapper, and write through everything.
+ binary_stdout = io.open(sys.stdout.fileno(), 'wb', 0)
+ sys.stdout = io.TextIOWrapper(binary_stdout, write_through=True)
+
+
def main():
opts = get_options()
fname = filename(opts.dir, opts.name)
print("World dumping... see %s for details" % fname)
- sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
- with open(fname, 'w') as f:
+
+ disable_stdio_buffering()
+
+ with io.open(fname, 'w') as f:
os.dup2(f.fileno(), sys.stdout.fileno())
disk_space()
process_list()
diff --git a/tox.ini b/tox.ini
index 26baa2a..ed28636 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,15 +5,14 @@
[testenv]
usedevelop = False
-install_command = pip install {opts} {packages}
+basepython = python3
[testenv:bashate]
-basepython = python3
# if you want to test out some changes you have made to bashate
# against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
# modified bashate tree
deps =
- {env:BASHATE_INSTALL_PATH:bashate==0.5.1}
+ {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \
@@ -35,7 +34,6 @@
-print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
-basepython = python3
deps = -r{toxinidir}/doc/requirements.txt
whitelist_externals = bash
setenv =
@@ -44,7 +42,6 @@
sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html
[testenv:pdf-docs]
-basepython = python3
deps = {[testenv:docs]deps}
whitelist_externals =
make
@@ -53,6 +50,5 @@
make -C doc/build/pdf
[testenv:venv]
-basepython = python3
deps = -r{toxinidir}/doc/requirements.txt
commands = {posargs}