Merge "Explicitly set scheduler_available_filters"
diff --git a/.gitignore b/.gitignore
index 8553b3f..8fe56ad 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,9 @@
*~
.*.sw?
*.log
-*.log.[1-9]
+*-log
+*.log.*
+*-log.*
*.pem
*.pyc
.localrc.auto
@@ -26,7 +28,7 @@
files/ir-deploy*
files/ironic-inspector*
files/etcd*
-local.conf
+/local.conf
local.sh
localrc
proto
diff --git a/.gitreview b/.gitreview
index 570d31a..e1bf63b 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
-host=review.openstack.org
+host=review.opendev.org
port=29418
-project=openstack-dev/devstack.git
+project=openstack/devstack.git
diff --git a/.zuul.yaml b/.zuul.yaml
index 57cbf88..8c0ce2f 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -9,6 +9,26 @@
- controller
- nodeset:
+ name: openstack-single-node-bionic
+ nodes:
+ - name: controller
+ label: ubuntu-bionic
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: openstack-single-node-xenial
+ nodes:
+ - name: controller
+ label: ubuntu-xenial
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
name: devstack-single-node-centos-7
nodes:
- name: controller
@@ -19,30 +39,10 @@
- controller
- nodeset:
- name: devstack-single-node-opensuse-423
+ name: devstack-single-node-opensuse-150
nodes:
- name: controller
- label: opensuse-423
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
- name: devstack-single-node-opensuse-tumbleweed
- nodes:
- - name: controller
- label: opensuse-tumbleweed
- groups:
- - name: tempest
- nodes:
- - controller
-
-- nodeset:
- name: devstack-single-node-fedora-27
- nodes:
- - name: controller
- label: fedora-27
+ label: opensuse-150
groups:
- name: tempest
nodes:
@@ -88,6 +88,101 @@
nodes:
- compute1
+- nodeset:
+ name: openstack-two-node-bionic
+ nodes:
+ - name: controller
+ label: ubuntu-bionic
+ - name: compute1
+ label: ubuntu-bionic
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
+ name: openstack-two-node-xenial
+ nodes:
+ - name: controller
+ label: ubuntu-xenial
+ - name: compute1
+ label: ubuntu-xenial
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- nodeset:
+ name: openstack-three-node-bionic
+ nodes:
+ - name: controller
+ label: ubuntu-bionic
+ - name: compute1
+ label: ubuntu-bionic
+ - name: compute2
+ label: ubuntu-bionic
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ - compute2
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ - compute2
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+ - compute2
+
- job:
name: devstack-base
parent: multinode
@@ -103,10 +198,10 @@
job.group-vars.peers, which is what is used by multi node jobs for subnode
nodes (everything but the controller).
required-projects:
- - git.openstack.org/openstack-dev/devstack
+ - opendev.org/openstack/devstack
roles:
- - zuul: git.openstack.org/openstack-infra/devstack-gate
- - zuul: git.openstack.org/openstack-infra/openstack-zuul-jobs
+ - zuul: opendev.org/openstack/devstack-gate
+ - zuul: opendev.org/openstack/openstack-zuul-jobs
vars:
devstack_localrc:
DATABASE_PASSWORD: secretdatabase
@@ -130,46 +225,48 @@
# Ignore any default set by devstack. Emit a "disable_all_services".
base: false
zuul_copy_output:
- '{{ devstack_conf_dir }}/local.conf': 'logs'
- '{{ devstack_conf_dir }}/localrc': 'logs'
- '{{ devstack_conf_dir }}/.localrc.auto': 'logs'
- '{{ devstack_conf_dir }}/.stackenv': 'logs'
- '{{ devstack_log_dir }}/dstat-csv.log': 'logs'
- '{{ devstack_log_dir }}/devstacklog.txt': 'logs'
- '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs'
- '{{ devstack_full_log}}': 'logs'
- '{{ stage_dir }}/verify_tempest_conf.log': 'logs'
- '{{ stage_dir }}/apache': 'logs'
- '{{ stage_dir }}/apache_config': 'logs'
- '{{ stage_dir }}/etc': 'logs'
- '/var/log/rabbitmq': 'logs'
- '/var/log/postgresql': 'logs'
- '/var/log/mysql.err': 'logs'
- '/var/log/mysql.log': 'logs'
- '/var/log/libvirt': 'logs'
- '/etc/sudoers': 'logs'
- '/etc/sudoers.d': 'logs'
- '{{ stage_dir }}/iptables.txt': 'logs'
- '{{ stage_dir }}/df.txt': 'logs'
- '{{ stage_dir }}/pip2-freeze.txt': 'logs'
- '{{ stage_dir }}/pip3-freeze.txt': 'logs'
- '{{ stage_dir }}/dpkg-l.txt': 'logs'
- '{{ stage_dir }}/rpm-qa.txt': 'logs'
- '{{ stage_dir }}/core': 'logs'
- '{{ stage_dir }}/listen53.txt': 'logs'
- '{{ stage_dir }}/deprecations.log': 'logs'
- '/var/log/ceph': 'logs'
- '/var/log/openvswitch': 'logs'
- '/var/log/glusterfs': 'logs'
- '/etc/glusterfs/glusterd.vol': 'logs'
- '/etc/resolv.conf': 'logs'
- '/var/log/unbound.log': 'logs'
+ '{{ devstack_conf_dir }}/local.conf': logs
+ '{{ devstack_conf_dir }}/localrc': logs
+ '{{ devstack_conf_dir }}/.localrc.auto': logs
+ '{{ devstack_conf_dir }}/.stackenv': logs
+ '{{ devstack_log_dir }}/dstat-csv.log': logs
+ '{{ devstack_log_dir }}/devstacklog.txt': logs
+ '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
+ '{{ devstack_log_dir }}/tcpdump.pcap': logs
+ '{{ devstack_full_log}}': logs
+ '{{ stage_dir }}/verify_tempest_conf.log': logs
+ '{{ stage_dir }}/apache': logs
+ '{{ stage_dir }}/apache_config': logs
+ '{{ stage_dir }}/etc': logs
+ /var/log/rabbitmq: logs
+ /var/log/postgresql: logs
+ /var/log/mysql.err: logs
+ /var/log/mysql.log: logs
+ /var/log/libvirt: logs
+ /etc/sudoers: logs
+ /etc/sudoers.d: logs
+ '{{ stage_dir }}/iptables.txt': logs
+ '{{ stage_dir }}/df.txt': logs
+ '{{ stage_dir }}/pip2-freeze.txt': logs
+ '{{ stage_dir }}/pip3-freeze.txt': logs
+ '{{ stage_dir }}/dpkg-l.txt': logs
+ '{{ stage_dir }}/rpm-qa.txt': logs
+ '{{ stage_dir }}/core': logs
+ '{{ stage_dir }}/listen53.txt': logs
+ '{{ stage_dir }}/deprecations.log': logs
+ '{{ stage_dir }}/audit.log': logs
+ /var/log/ceph: logs
+ /var/log/openvswitch: logs
+ /var/log/glusterfs: logs
+ /etc/glusterfs/glusterd.vol: logs
+ /etc/resolv.conf: logs
+ /var/log/unbound.log: logs
extensions_to_txt:
- conf: True
- log: True
- localrc: True
- stackenv: True
- auto: True
+ conf: true
+ log: true
+ localrc: true
+ stackenv: true
+ auto: true
group-vars:
subnode:
devstack_localrc:
@@ -209,15 +306,15 @@
description: |
Minimal devstack base job, intended for use by jobs that need
less than the normal minimum set of required-projects.
- nodeset: openstack-single-node
+ nodeset: openstack-single-node-bionic
required-projects:
- - git.openstack.org/openstack/requirements
+ - opendev.org/openstack/requirements
vars:
devstack_localrc:
# Multinode specific settings
SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
- PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+ PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
devstack_services:
# Shared services
dstat: true
@@ -235,7 +332,7 @@
# Multinode specific settings
HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
- PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+ PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
# Subnode specific settings
DATABASE_TYPE: mysql
RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
@@ -251,8 +348,8 @@
This base job can be used for single node and multinode devstack jobs.
With a single node nodeset, this job sets up an "all-in-one" (aio)
- devstack with the six OpenStack services included in the devstack tree:
- keystone, glance, cinder, neutron, nova and swift.
+ devstack with the seven OpenStack services included in the devstack tree:
+ keystone, glance, cinder, neutron, nova, placement, and swift.
With a two node nodeset, this job sets up an aio + compute node.
The controller can be customised using host-vars.controller, the
@@ -268,14 +365,14 @@
The run playbook consists of a single role, so it can be easily rewritten
and extended.
- nodeset: openstack-single-node
required-projects:
- - git.openstack.org/openstack/cinder
- - git.openstack.org/openstack/glance
- - git.openstack.org/openstack/keystone
- - git.openstack.org/openstack/neutron
- - git.openstack.org/openstack/nova
- - git.openstack.org/openstack/swift
+ - opendev.org/openstack/cinder
+ - opendev.org/openstack/glance
+ - opendev.org/openstack/keystone
+ - opendev.org/openstack/neutron
+ - opendev.org/openstack/nova
+ - opendev.org/openstack/placement
+ - opendev.org/openstack/swift
timeout: 7200
vars:
devstack_localrc:
@@ -284,7 +381,7 @@
SWIFT_START_ALL_SERVICES: false
SWIFT_HASH: 1234123412341234
CINDER_PERIODIC_INTERVAL: 10
- DEBUG_LIBVIRT_COREDUMPS: True
+ DEBUG_LIBVIRT_COREDUMPS: true
NOVA_VNC_ENABLED: true
VNCSERVER_LISTEN: 0.0.0.0
VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
@@ -292,7 +389,7 @@
post-config:
$NEUTRON_CONF:
DEFAULT:
- global_physnet_mtu: "{{ external_bridge_mtu }}"
+ global_physnet_mtu: '{{ external_bridge_mtu }}'
devstack_services:
# Core services enabled for this branch.
# This list replaces the test-matrix.
@@ -386,13 +483,43 @@
VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
- job:
+ name: devstack-ipv6
+ parent: devstack
+ description: |
+ Devstack single node job for integration gate with IPv6.
+ vars:
+ devstack_localrc:
+ SERVICE_IP_VERSION: 6
+ SERVICE_HOST: ""
+ # IPv6 and certificates known issue with python2
+ # https://bugs.launchpad.net/devstack/+bug/1794929
+ USE_PYTHON3: true
+
+- job:
+ name: devstack-xenial
+ parent: devstack
+ nodeset: openstack-single-node-xenial
+ description: |
+ Simple singlenode test to verify functionality on devstack
+ side running on Xenial.
+
+- job:
name: devstack-multinode
parent: devstack
- nodeset: openstack-two-node
+ nodeset: openstack-two-node-bionic
description: |
Simple multinode test to verify multinode functionality on devstack side.
This is not meant to be used as a parent job.
+- job:
+ name: devstack-multinode-xenial
+ parent: devstack
+ nodeset: openstack-two-node-xenial
+ description: |
+ Simple multinode test to verify multinode functionality on devstack
+ side running on Xenial.
+ This is not meant to be used as a parent job.
+
# NOTE(ianw) Platform tests have traditionally been non-voting because
# we often have to rush things through devstack to stabilise the gate,
# and these platforms don't have the round-the-clock support to avoid
@@ -405,17 +532,10 @@
voting: false
- job:
- name: devstack-platform-opensuse-423
+ name: devstack-platform-opensuse-150
parent: tempest-full
- description: openSUSE 43.2 platform test
- nodeset: devstack-single-node-opensuse-423
- voting: false
-
-- job:
- name: devstack-platform-opensuse-tumbleweed
- parent: tempest-full
- description: openSUSE Tumbleweed platform test
- nodeset: devstack-single-node-opensuse-tumbleweed
+ description: openSUSE 15.0 platform test
+ nodeset: devstack-single-node-opensuse-150
voting: false
- job:
@@ -426,6 +546,13 @@
voting: false
- job:
+ name: devstack-platform-xenial
+ parent: tempest-full
+ description: Ubuntu Xenial platform test
+ nodeset: openstack-single-node-xenial
+ voting: false
+
+- job:
name: devstack-tox-base
parent: devstack
description: |
@@ -489,28 +616,95 @@
run: playbooks/unit-tests/run.yaml
- project:
+ templates:
+ - integrated-gate
+ - integrated-gate-py3
+ - publish-openstack-docs-pti
check:
jobs:
- devstack
+ - devstack-xenial
+ - devstack-ipv6:
+ voting: false
- devstack-platform-centos-7
- - devstack-platform-opensuse-423
- - devstack-platform-opensuse-tumbleweed
+ - devstack-platform-opensuse-150
- devstack-platform-fedora-latest
+ - devstack-platform-xenial
- devstack-multinode
+ - devstack-multinode-xenial
- devstack-unit-tests
+ - openstack-tox-bashate
+ - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
+ voting: false
+ - swift-dsvm-functional:
+ voting: false
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-grenade:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-grenade-multinode:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-tempest-linuxbridge:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - tempest-multinode-full:
+ voting: false
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - openstacksdk-functional-devstack:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
gate:
jobs:
- devstack
+ - devstack-xenial
+ - devstack-multinode
+ - devstack-multinode-xenial
- devstack-unit-tests
+ - openstack-tox-bashate
+ - neutron-grenade-multinode:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-tempest-linuxbridge:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-grenade:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - openstacksdk-functional-devstack:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
# Please add a note on each job and conditions for the job not
# being experimental any more, so we can keep this list somewhat
# pruned.
#
# * nova-cells-v1: maintained by nova for cells v1 (nova-cells service);
- # nova gates on this job, it's in experimental for testing cells v1
+ # it's in experimental here (and in nova) for testing cells v1
# changes to devstack w/o gating on it for all devstack changes.
# * nova-next: maintained by nova for unreleased/undefaulted
# things like cellsv2 and placement-api
+ # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
+ # when neutron-api is served by uwsgi, it's in exprimental for testing.
+ # the next cycle we can remove this job if things turn out to be
+ # stable enough.
+ # * neutron-functional-with-uwsgi: maintained by neutron for functional
+ # test. Next cycle we can remove this one if things turn out to be
+ # stable engouh with uwsgi.
+ # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
+ # Next cycle we can remove this if everything run out stable enough.
+
experimental:
jobs:
- nova-cells-v1:
@@ -518,3 +712,34 @@
- ^.*\.rst$
- ^doc/.*$
- nova-next
+ - neutron-fullstack-with-uwsgi
+ - neutron-functional-with-uwsgi
+ - neutron-tempest-with-uwsgi
+ - devstack-plugin-ceph-tempest:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - devstack-plugin-ceph-tempest-py3:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-tempest-dvr:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - legacy-tempest-dsvm-neutron-dvr-multinode-full:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - neutron-tempest-dvr-ha-multinode-full:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - legacy-tempest-dsvm-lvm-multibackend:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - tempest-pg-full:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
diff --git a/HACKING.rst b/HACKING.rst
index d5d6fbc..3853eed 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -47,12 +47,7 @@
level.
``doc`` - Contains the Sphinx source for the documentation.
-``tools/build_docs.sh`` is used to generate the HTML versions of the
-DevStack scripts. A complete doc build can be run with ``tox -edocs``.
-
-``exercises`` - Contains the test scripts used to sanity-check and
-demonstrate some OpenStack functions. These scripts know how to exit
-early or skip services that are not enabled.
+A complete doc build can be run with ``tox -edocs``.
``extras.d`` - Contains the dispatch scripts called by the hooks in
``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins
@@ -183,88 +178,6 @@
OpenStack project standard.
-Exercises
----------
-
-The scripts in the exercises directory are meant to 1) perform basic operational
-checks on certain aspects of OpenStack; and b) document the use of the
-OpenStack command-line clients.
-
-In addition to the guidelines above, exercise scripts MUST follow the structure
-outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines.
-These scripts are executed serially by ``exercise.sh`` in testing situations.
-
-* Begin and end with a banner that stands out in a sea of script logs to aid
- in debugging failures, particularly in automated testing situations. If the
- end banner is not displayed, the script ended prematurely and can be assumed
- to have failed.
-
- ::
-
- echo "**************************************************"
- echo "Begin DevStack Exercise: $0"
- echo "**************************************************"
- ...
- set +o xtrace
- echo "**************************************************"
- echo "End DevStack Exercise: $0"
- echo "**************************************************"
-
-* The scripts will generally have the shell ``xtrace`` attribute set to display
- the actual commands being executed, and the ``errexit`` attribute set to exit
- the script on non-zero exit codes::
-
- # This script exits on an error so that errors don't compound and you see
- # only the first error that occurred.
- set -o errexit
-
- # Print the commands being run so that we can see the command that triggers
- # an error. It is also useful for following as the install occurs.
- set -o xtrace
-
-* Settings and configuration are stored in ``exerciserc``, which must be
- sourced after ``openrc`` or ``stackrc``::
-
- # Import exercise configuration
- source $TOP_DIR/exerciserc
-
-* There are a couple of helper functions in the common ``functions`` sub-script
- that will check for non-zero exit codes and unset environment variables and
- print a message and exit the script. These should be called after most client
- commands that are not otherwise checked to short-circuit long timeouts
- (instance boot failure, for example)::
-
- swift post $CONTAINER
- die_if_error "Failure creating container $CONTAINER"
-
- FLOATING_IP=`euca-allocate-address | cut -f2`
- die_if_not_set FLOATING_IP "Failure allocating floating IP"
-
-* If you want an exercise to be skipped when for example a service wasn't
- enabled for the exercise to be run, you can exit your exercise with the
- special exitcode 55 and it will be detected as skipped.
-
-* The exercise scripts should only use the various OpenStack client binaries to
- interact with OpenStack. This specifically excludes any ``*-manage`` tools
- as those assume direct access to configuration and databases, as well as direct
- database access from the exercise itself.
-
-* If specific configuration needs to be present for the exercise to complete,
- it should be staged in ``stack.sh``, or called from ``stack.sh``.
-
-* The ``OS_*`` environment variables should be the only ones used for all
- authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
-
-.. _CLIAuth: https://wiki.openstack.org/CLIAuth
-
-* The exercise MUST clean up after itself if successful. If it is not successful,
- it is assumed that state will be left behind; this allows a chance for developers
- to look around and attempt to debug the problem. The exercise SHOULD clean up
- or graciously handle possible artifacts left over from previous runs if executed
- again. It is acceptable to require a reboot or even a re-install of DevStack
- to restore a clean test environment.
-
-
Bash Style Guidelines
~~~~~~~~~~~~~~~~~~~~~
DevStack defines a bash set of best practices for maintaining large
diff --git a/README.rst b/README.rst
index 6885546..ad7ede4 100644
--- a/README.rst
+++ b/README.rst
@@ -38,7 +38,7 @@
`stackrc` for the default set). Usually just before a release there will be
milestone-proposed branches that need to be tested::
- GLANCE_REPO=git://git.openstack.org/openstack/glance.git
+ GLANCE_REPO=https://git.openstack.org/openstack/glance.git
GLANCE_BRANCH=milestone-proposed
Start A Dev Cloud
diff --git a/clean.sh b/clean.sh
index a29ebd9..d6c6b40 100755
--- a/clean.sh
+++ b/clean.sh
@@ -123,7 +123,7 @@
sudo rm -rf $LOGDIR
fi
-# Clean out the sytemd user unit files if systemd was used.
+# Clean out the systemd user unit files if systemd was used.
if [[ "$USE_SYSTEMD" = "True" ]]; then
sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
# Make systemd aware of the deletion.
diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf
new file mode 120000
index 0000000..cfc2a4e
--- /dev/null
+++ b/doc/source/assets/local.conf
@@ -0,0 +1 @@
+../../../samples/local.conf
\ No newline at end of file
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 7efe4d6..022e6ba 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -446,6 +446,16 @@
ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
+Use python3
+------------
+
+By default ``stack.sh`` uses python2 (the exact version set by the
+``PYTHON2_VERSION``). This can be overriden so devstack will run
+python3 (the exact version set by ``PYTHON3_VERSION``).
+
+ ::
+
+ USE_PYTHON3=True
A clean install every time
--------------------------
@@ -665,8 +675,7 @@
enable_service n-cell
Be aware that there are some features currently missing in cells, one
-notable one being security groups. The exercises have been patched to
-disable functionality not supported by cells.
+notable one being security groups.
Cinder
~~~~~~
@@ -729,44 +738,6 @@
ENABLE_IDENTITY_V2=False
-Exercises
-~~~~~~~~~
-
-``exerciserc`` is used to configure settings for the exercise scripts.
-The values shown below are the default values. These can all be
-overridden by setting them in the ``localrc`` section.
-
-* Max time to wait while vm goes from build to active state
-
- ::
-
- ACTIVE_TIMEOUT==30
-
-* Max time to wait for proper IP association and dis-association.
-
- ::
-
- ASSOCIATE_TIMEOUT=15
-
-* Max time till the vm is bootable
-
- ::
-
- BOOT_TIMEOUT=30
-
-* Max time from run instance command until it is running
-
- ::
-
- RUNNING_TIMEOUT=$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))
-
-* Max time to wait for a vm to terminate
-
- ::
-
- TERMINATE_TIMEOUT=30
-
-
.. _arch-configuration:
Architectures
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
new file mode 100644
index 0000000..fd0d9cd
--- /dev/null
+++ b/doc/source/debugging.rst
@@ -0,0 +1,46 @@
+=====================
+System-wide debugging
+=====================
+
+A lot can go wrong during a devstack run, and there are a few inbuilt
+tools to help you.
+
+dstat
+-----
+
+Enable the ``dstat`` service to produce performance logs during the
+devstack run. These will be logged to the journal and also as a CSV
+file.
+
+memory_tracker
+--------------
+
+The ``memory_tracker`` service periodically monitors RAM usage and
+provides consumption output when available memory is seen to be
+falling (i.e. processes are consuming memory). It also provides
+output showing locked (unswappable) memory.
+
+tcpdump
+-------
+
+Enable the ``tcpdump`` service to run a background tcpdump. You must
+set the ``TCPDUMP_ARGS`` variable to something suitable (there is no
+default). For example, to trace iSCSI communication during a job in
+the OpenStack gate and copy the result into the log output, you might
+use:
+
+.. code-block:: yaml
+
+ job:
+ name: devstack-job
+ parent: devstack
+ vars:
+ devstack_services:
+ tcpdump: true
+ devstack_localrc:
+ TCPDUMP_ARGS: "-i any tcp port 3260"
+ zuul_copy_output:
+ '{{ devstack_log_dir }}/tcpdump.pcap': logs
+
+
+
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index df3c7ce..b1d88cb 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -1,39 +1,54 @@
-Configure Load-Balancer Version 2
-=================================
+Devstack with Octavia Load Balancing
+====================================
-Starting in the OpenStack Liberty release, the
-`neutron LBaaS v2 API <https://developer.openstack.org/api-ref/network/v2/index.html>`_
-is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference
-driver is based on Octavia.
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://developer.openstack.org/api-ref/load-balancer/v2/index.html
Phase 1: Create DevStack + 2 nova instances
--------------------------------------------
First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find useful.
+make sure it is updated. Install git and any other developer tools you find
+useful.
Install devstack
::
git clone https://git.openstack.org/openstack-dev/devstack
- cd devstack
+ cd devstack/tools
+ sudo ./create-stack-user.sh
+ cd ../..
+ sudo mv devstack /opt/stack
+ sudo chown -R stack.stack /opt/stack/devstack
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
-Edit your ``local.conf`` to look like
+Edit your ``/opt/stack/devstack/local.conf`` to look like
::
[[local|localrc]]
- # Load the external LBaaS plugin.
- enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
enable_plugin octavia https://git.openstack.org/openstack/octavia
+ # If you are enabling horizon, include the octavia dashboard
+ # enable_plugin octavia-dashboard https://git.openstack.org/openstack/octavia-dashboard.git
+ # If you are enabling barbican for TLS offload in Octavia, include it here.
+ # enable_plugin barbican https://github.com/openstack/barbican.git
+
+ # If you have python3 available:
+ # USE_PYTHON3=True
# ===== BEGIN localrc =====
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
SERVICE_PASSWORD=password
+ SERVICE_TOKEN=password
RABBIT_PASSWORD=password
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
@@ -41,27 +56,30 @@
LOG_COLOR=True
# Pre-requisite
ENABLED_SERVICES=rabbit,mysql,key
- # Horizon
- ENABLED_SERVICES+=,horizon
+ # Horizon - enable for the OpenStack web GUI
+ # ENABLED_SERVICES+=,horizon
# Nova
- ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch
+ ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
+ ENABLED_SERVICES+=,placement-api,placement-client
# Glance
ENABLED_SERVICES+=,g-api,g-reg
# Neutron
- ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
- # Enable LBaaS v2
- ENABLED_SERVICES+=,q-lbaasv2
+ ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
# Cinder
ENABLED_SERVICES+=,c-api,c-vol,c-sch
# Tempest
ENABLED_SERVICES+=,tempest
+ # Barbican - Optionally used for TLS offload in Octavia
+ # ENABLED_SERVICES+=,barbican
# ===== END localrc =====
Run stack.sh and do some sanity checks
::
+ sudo su - stack
+ cd /opt/stack/devstack
./stack.sh
. ./openrc
@@ -72,38 +90,59 @@
::
#create nova instances on private network
- nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
- nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
- nova list # should show the nova instances just created
+ openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+ openstack server creeate --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+ openstack server list # should show the nova instances just created
#add secgroup rules to allow ssh etc..
openstack security group rule create default --protocol icmp
openstack security group rule create default --protocol tcp --dst-port 22:22
openstack security group rule create default --protocol tcp --dst-port 80:80
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
+Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
::
MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-Phase 2: Create your load balancers
-------------------------------------
+Phase 2: Create your load balancer
+----------------------------------
+
+Make sure you have the 'openstack loadbalancer' commands:
::
- neutron lbaas-loadbalancer-create --name lb1 private-subnet
- neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE.
- neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
- sleep 10 # Sleep since LBaaS actions can take a few seconds depending on the environment.
- neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
- sleep 10
- neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1
- sleep 10
- neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1
+ pip install python-octaviaclient
-Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes
-(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be
-reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is
-"curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
+Create your load balancer:
+
+::
+
+ openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+ openstack loadbalancer show lb1 # Note the vip_address
+ curl http://<vip_address>
+ curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst
index ec41141..4c54723 100644
--- a/doc/source/guides/devstack-with-ldap.rst
+++ b/doc/source/guides/devstack-with-ldap.rst
@@ -12,14 +12,14 @@
LDAP support in keystone is read-only. You can use it to back an entire
OpenStack deployment to a single LDAP server, or you can use it to back
separate LDAP servers to specific keystone domains. Users within those domains
-will can authenticate against keystone, assume role assignments, and interact
-with other OpenStack services.
+can authenticate against keystone, assume role assignments, and interact with
+other OpenStack services.
Configuration
=============
To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of
-``ENABLED_SERVICES``::
+``ENABLED_SERVICES`` in the ``local.conf`` file::
enable_service ldap
@@ -35,9 +35,9 @@
At this point, devstack should have everything it needs to deploy OpenLDAP,
bootstrap it with a minimal set of users, and configure it to back to a domain
-in keystone::
+in keystone. You can do this by running the ``stack.sh`` script::
- ./stack.sh
+ $ ./stack.sh
Once ``stack.sh`` completes, you should have a running keystone deployment with
a basic set of users. It is important to note that not all users will live
@@ -63,7 +63,7 @@
To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP
user bootstrapped by devstack::
- ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+ $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
-H ldap://localhost -b dc=openstack,dc=org
As you can see, devstack creates an OpenStack domain called ``openstack.org``
@@ -93,7 +93,7 @@
Now, we use the ``Manager`` user to create a user for Peter in LDAP::
- ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+ $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
-H ldap://localhost -c -f peter.ldif.in
We should be able to assign Peter roles on projects. After Peter has some level
@@ -125,7 +125,7 @@
We can use the same basic steps to remove users from LDAP, but instead of using
LDIFs, we can just pass the ``dn`` of the user we want to delete::
- ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+ $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
-H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org
Group Management
@@ -153,7 +153,7 @@
We can create the group using the same ``ldapadd`` command as we did with
users::
- ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+ $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
-H ldap://localhost -c -f guardian-group.ldif.in
If we check the group membership in Horizon, we'll see that only Peter is a
@@ -167,7 +167,7 @@
Just like users, groups can be deleted using the ``dn``::
- ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+ $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
-H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org
Note that this operation will not remove users within that group. It will only
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index b4e2891..c3574ac 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -177,7 +177,7 @@
GLANCE_HOSTPORT=$SERVICE_HOST:9292
ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client
NOVA_VNC_ENABLED=True
- NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
+ NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
VNCSERVER_LISTEN=$HOST_IP
VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
@@ -240,8 +240,8 @@
sudo rm -rf /etc/libvirt/qemu/inst*
sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy
-Options pimp your stack
-=======================
+Going further
+=============
Additional Users
----------------
@@ -302,10 +302,10 @@
DevStack will automatically use an existing LVM volume group named
``stack-volumes`` to store cloud-created volumes. If ``stack-volumes``
-doesn't exist, DevStack will set up a 10Gb loop-mounted file to contain
-it. This obviously limits the number and size of volumes that can be
-created inside OpenStack. The size can be overridden by setting
-``VOLUME_BACKING_FILE_SIZE`` in ``local.conf``.
+doesn't exist, DevStack will set up a loop-mounted file to contain
+it. If the default size is insufficient for the number and size of volumes
+required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in
+``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``).
``stack-volumes`` can be pre-created on any physical volume supported by
Linux's LVM. The name of the volume group can be changed by setting
@@ -369,17 +369,6 @@
Notes stuff you might need to know
==================================
-Reset the Bridge
-----------------
-
-How to reset the bridge configuration:
-
-::
-
- sudo brctl delif br100 eth0.926
- sudo ip link set dev br100 down
- sudo brctl delbr br100
-
Set MySQL Password
------------------
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 7f360c6..80b2f85 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -376,8 +376,8 @@
## Neutron options
Q_USE_SECGROUP=True
- ENABLE_PROJECT_VLANS=True
- PROJECT_VLAN_RANGE=3001:4000
+ ENABLE_TENANT_VLANS=True
+ TENANT_VLAN_RANGE=3001:4000
PHYSICAL_NETWORK=default
OVS_PHYSICAL_BRIDGE=br-ex
@@ -567,7 +567,7 @@
Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
Q_USE_PROVIDER_NETWORKING=True
- enable_plugin neutron git://git.openstack.org/openstack/neutron
+ enable_plugin neutron https://git.openstack.org/openstack/neutron
## MacVTap agent options
Q_AGENT=macvtap
@@ -622,7 +622,7 @@
# Services that a compute node runs
disable_all_services
- enable_plugin neutron git://git.openstack.org/openstack/neutron
+ enable_plugin neutron https://git.openstack.org/openstack/neutron
ENABLED_SERVICES+=n-cpu,q-agt
## MacVTap agent options
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 0f105d7..65491d1 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -68,3 +68,69 @@
For more information on OpenStack configuration see the `OpenStack
Compute Service Configuration Reference
<https://docs.openstack.org/nova/latest/admin/configuration/index.html>`_
+
+
+Fake virt driver
+================
+
+Nova has a `fake virt driver`_ which can be used for scale testing the control
+plane services or testing "move" operations between fake compute nodes, for
+example cold/live migration, evacuate and unshelve.
+
+The fake virt driver does not communicate with any hypervisor, it just reports
+some fake resource inventory values and keeps track of the state of the
+"guests" created, moved and deleted. It is not feature-complete with the
+compute API but is good enough for most API testing, and is also used within
+the nova functional tests themselves so is fairly robust.
+
+.. _fake virt driver: http://git.openstack.org/cgit/openstack/nova/tree/nova/virt/fake.py
+
+Configuration
+-------------
+
+Set the following in your devstack ``local.conf``:
+
+.. code-block:: ini
+
+ [[local|localrc]]
+ VIRT_DRIVER=fake
+ NUMBER_FAKE_NOVA_COMPUTE=<number>
+
+The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake
+``nova-compute`` services to run and defaults to 1.
+
+When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in
+nova and neutron automatically. However, other services, like cinder, will
+still enforce quota limits by default.
+
+Scaling
+-------
+
+The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors
+such as:
+
+* The size of the host (physical or virtualized) on which devstack is running.
+* The number of API workers. By default, devstack will run ``max($nproc/2, 2)``
+ workers per API service. If you are running several fake compute services on
+ a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``.
+
+In addition, while quota will be disabled in neutron, there is no fake ML2
+backend for neutron so creating fake VMs will still result in real ports being
+created. To create servers without networking, you can specify ``--nic=none``
+when creating the server, for example:
+
+.. code-block:: shell
+
+ $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \
+ --image cirros-0.3.5-x86_64-disk --nic none --wait test-server
+
+.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is
+ required to use ``--nic=none``.
+
+To avoid overhead from other services which you may not need, disable them in
+your ``local.conf``, for example:
+
+.. code-block:: ini
+
+ disable_service horizon
+ disable_service tempest
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 48a4fa8..168172c 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -45,31 +45,37 @@
install you can skip this step and just give the user sudo privileges
below)
-::
+.. code-block:: console
- useradd -s /bin/bash -d /opt/stack -m stack
+ $ sudo useradd -s /bin/bash -d /opt/stack -m stack
Since this user will be making many changes to your system, it will need
to have sudo privileges:
-::
+.. code-block:: console
- apt-get install sudo -y || yum install -y sudo
- echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+ $ apt-get install sudo -y || yum install -y sudo
+ $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+.. note:: On some systems you may need to use ``sudo visudo``.
From here on you should use the user you created. **Logout** and
-**login** as that user.
+**login** as that user:
+
+.. code-block:: console
+
+ $ sudo su stack && cd ~
Download DevStack
-----------------
We'll grab the latest version of DevStack via https:
-::
+.. code-block:: console
- sudo apt-get install git -y || sudo yum install -y git
- git clone https://git.openstack.org/openstack-dev/devstack
- cd devstack
+ $ sudo apt-get install git -y || sudo yum install -y git
+ $ git clone https://git.openstack.org/openstack-dev/devstack
+ $ cd devstack
Run DevStack
------------
@@ -97,7 +103,7 @@
``local.conf`` should look something like this:
-::
+.. code-block:: ini
[[local|localrc]]
FLOATING_RANGE=192.168.1.224/27
@@ -109,11 +115,14 @@
RABBIT_PASSWORD=flopsymopsy
SERVICE_PASSWORD=iheartksl
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+ under the *samples* directory in the devstack repository.
+
Run DevStack:
-::
+.. code-block:: console
- ./stack.sh
+ $ ./stack.sh
A seemingly endless stream of activity ensues. When complete you will
see a summary of ``stack.sh``'s work, including the relevant URLs,
@@ -127,7 +136,3 @@
http://192.168.1.201/ for the dashboard (aka Horizon). Launch VMs and if
you give them floating IPs and security group access those VMs will be
accessible from other machines on your network.
-
-Some examples of using the OpenStack command-line clients ``nova`` and
-``glance`` are in the shakedown scripts in ``devstack/exercises``.
-``exercise.sh`` will run all of those scripts and report on the results.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 2ff4ff0..1ea1c5d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,30 +38,32 @@
Install Linux
-------------
-Start with a clean and minimal install of a Linux system. Devstack
-attempts to support Ubuntu 16.04/17.04, Fedora 24/25, CentOS/RHEL 7,
-as well as Debian and OpenSUSE.
+Start with a clean and minimal install of a Linux system. DevStack
+attempts to support the two latest LTS releases of Ubuntu, the
+latest/current Fedora version, CentOS/RHEL 7, as well as Debian and
+OpenSUSE.
-If you do not have a preference, Ubuntu 16.04 is the most tested, and
-will probably go the smoothest.
+If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the
+most tested, and will probably go the smoothest.
-Add Stack User
---------------
+Add Stack User (optional)
+-------------------------
-Devstack should be run as a non-root user with sudo enabled
+DevStack should be run as a non-root user with sudo enabled
(standard logins to cloud images such as "ubuntu" or "cloud-user"
are usually fine).
-You can quickly create a separate `stack` user to run DevStack with
+If you are not using a cloud image, you can create a separate `stack` user
+to run DevStack with
-::
+.. code-block:: console
$ sudo useradd -s /bin/bash -d /opt/stack -m stack
Since this user will be making many changes to your system, it should
have sudo privileges:
-::
+.. code-block:: console
$ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
$ sudo su - stack
@@ -69,20 +71,21 @@
Download DevStack
-----------------
-::
+.. code-block:: console
$ git clone https://git.openstack.org/openstack-dev/devstack
$ cd devstack
The ``devstack`` repo contains a script that installs OpenStack and
-templates for configuration files
+templates for configuration files.
Create a local.conf
-------------------
-Create a ``local.conf`` file with 4 passwords preset at the root of the
+Create a ``local.conf`` file with four passwords preset at the root of the
devstack git repo.
-::
+
+.. code-block:: ini
[[local|localrc]]
ADMIN_PASSWORD=secret
@@ -92,12 +95,15 @@
This is the minimum required config to get started with DevStack.
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+ under the *samples* directory in the devstack repository.
+
Start the install
-----------------
-::
+.. code-block:: console
- ./stack.sh
+ $ ./stack.sh
This will take a 15 - 20 minutes, largely depending on the speed of
your internet connection. Many git trees and packages will be
@@ -109,8 +115,8 @@
You now have a working DevStack! Congrats!
Your devstack will have installed ``keystone``, ``glance``, ``nova``,
-``cinder``, ``neutron``, and ``horizon``. Floating IPs will be
-available, guests have access to the external world.
+``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs
+will be available, guests have access to the external world.
You can access horizon to experience the web interface to
OpenStack, and manage vms, networks, volumes, and images from
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 814a2b1..a609333 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -64,7 +64,8 @@
The default services configured by DevStack are Identity (keystone),
Object Storage (swift), Image Service (glance), Block Storage
-(cinder), Compute (nova), Networking (neutron), Dashboard (horizon)
+(cinder), Compute (nova), Placement (placement),
+Networking (neutron), Dashboard (horizon).
Additional services not included directly in DevStack can be tied in to
``stack.sh`` using the :doc:`plugin mechanism <plugins>` to call
@@ -75,11 +76,3 @@
- single node
- multi-node configurations as are tested by the gate
-
-Exercises
----------
-
-The DevStack exercise scripts are no longer used as integration and gate
-testing as that job has transitioned to Tempest. They are still
-maintained as a demonstrations of using OpenStack from the command line
-and for quick operational testing.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 7ad65f7..93c16f4 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -24,173 +24,186 @@
====================================== ===
Plugin Name URL
====================================== ===
-almanach `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
-aodh `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
-apmec `git://git.openstack.org/openstack/apmec <https://git.openstack.org/cgit/openstack/apmec>`__
-astara `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
-barbican `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
-bilean `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
-blazar `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
-broadview-collector `git://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
-castellan-ui `git://git.openstack.org/openstack/castellan-ui <https://git.openstack.org/cgit/openstack/castellan-ui>`__
-ceilometer `git://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
-ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm <https://git.openstack.org/cgit/openstack/ceilometer-powervm>`__
-cloudkitty `git://git.openstack.org/openstack/cloudkitty <https://git.openstack.org/cgit/openstack/cloudkitty>`__
-collectd-openstack-plugins `git://git.openstack.org/openstack/collectd-openstack-plugins <https://git.openstack.org/cgit/openstack/collectd-openstack-plugins>`__
-congress `git://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
-cyborg `git://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
-designate `git://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
-devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
-devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
-devstack-plugin-bdd `git://git.openstack.org/openstack/devstack-plugin-bdd <https://git.openstack.org/cgit/openstack/devstack-plugin-bdd>`__
-devstack-plugin-ceph `git://git.openstack.org/openstack/devstack-plugin-ceph <https://git.openstack.org/cgit/openstack/devstack-plugin-ceph>`__
-devstack-plugin-container `git://git.openstack.org/openstack/devstack-plugin-container <https://git.openstack.org/cgit/openstack/devstack-plugin-container>`__
-devstack-plugin-glusterfs `git://git.openstack.org/openstack/devstack-plugin-glusterfs <https://git.openstack.org/cgit/openstack/devstack-plugin-glusterfs>`__
-devstack-plugin-hdfs `git://git.openstack.org/openstack/devstack-plugin-hdfs <https://git.openstack.org/cgit/openstack/devstack-plugin-hdfs>`__
-devstack-plugin-kafka `git://git.openstack.org/openstack/devstack-plugin-kafka <https://git.openstack.org/cgit/openstack/devstack-plugin-kafka>`__
-devstack-plugin-libvirt-qemu `git://git.openstack.org/openstack/devstack-plugin-libvirt-qemu <https://git.openstack.org/cgit/openstack/devstack-plugin-libvirt-qemu>`__
-devstack-plugin-mariadb `git://git.openstack.org/openstack/devstack-plugin-mariadb <https://git.openstack.org/cgit/openstack/devstack-plugin-mariadb>`__
-devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
-devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
-devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
-devstack-plugin-vmax `git://git.openstack.org/openstack/devstack-plugin-vmax <https://git.openstack.org/cgit/openstack/devstack-plugin-vmax>`__
-devstack-plugin-zmq `git://git.openstack.org/openstack/devstack-plugin-zmq <https://git.openstack.org/cgit/openstack/devstack-plugin-zmq>`__
-dragonflow `git://git.openstack.org/openstack/dragonflow <https://git.openstack.org/cgit/openstack/dragonflow>`__
-drbd-devstack `git://git.openstack.org/openstack/drbd-devstack <https://git.openstack.org/cgit/openstack/drbd-devstack>`__
-ec2-api `git://git.openstack.org/openstack/ec2-api <https://git.openstack.org/cgit/openstack/ec2-api>`__
-freezer `git://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
-freezer-api `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
-freezer-tempest-plugin `git://git.openstack.org/openstack/freezer-tempest-plugin <https://git.openstack.org/cgit/openstack/freezer-tempest-plugin>`__
-freezer-web-ui `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
-fuxi `git://git.openstack.org/openstack/fuxi <https://git.openstack.org/cgit/openstack/fuxi>`__
-gce-api `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
-glare `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
-group-based-policy `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
-heat `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
-heat-dashboard `git://git.openstack.org/openstack/heat-dashboard <https://git.openstack.org/cgit/openstack/heat-dashboard>`__
-horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
-ironic `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
-ironic-inspector `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
-ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
-ironic-ui `git://git.openstack.org/openstack/ironic-ui <https://git.openstack.org/cgit/openstack/ironic-ui>`__
-karbor `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
-karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
-keystone `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
-kingbird `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
-kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
-kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
-kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin <https://git.openstack.org/cgit/openstack/kuryr-tempest-plugin>`__
-magnum `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
-magnum-ui `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
-manila `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
-manila-ui `git://git.openstack.org/openstack/manila-ui <https://git.openstack.org/cgit/openstack/manila-ui>`__
-masakari `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
-meteos `git://git.openstack.org/openstack/meteos <https://git.openstack.org/cgit/openstack/meteos>`__
-meteos-ui `git://git.openstack.org/openstack/meteos-ui <https://git.openstack.org/cgit/openstack/meteos-ui>`__
-mistral `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
-mixmatch `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
-mogan `git://git.openstack.org/openstack/mogan <https://git.openstack.org/cgit/openstack/mogan>`__
-mogan-ui `git://git.openstack.org/openstack/mogan-ui <https://git.openstack.org/cgit/openstack/mogan-ui>`__
-monasca-analytics `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
-monasca-api `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
-monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
-monasca-events-api `git://git.openstack.org/openstack/monasca-events-api <https://git.openstack.org/cgit/openstack/monasca-events-api>`__
-monasca-log-api `git://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
-monasca-tempest-plugin `git://git.openstack.org/openstack/monasca-tempest-plugin <https://git.openstack.org/cgit/openstack/monasca-tempest-plugin>`__
-monasca-transform `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
-murano `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
-networking-6wind `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
-networking-ansible `git://git.openstack.org/openstack/networking-ansible <https://git.openstack.org/cgit/openstack/networking-ansible>`__
-networking-arista `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
-networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
-networking-baremetal `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
-networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
-networking-brocade `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
-networking-calico `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
-networking-cisco `git://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
-networking-cumulus `git://git.openstack.org/openstack/networking-cumulus <https://git.openstack.org/cgit/openstack/networking-cumulus>`__
-networking-dpm `git://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
-networking-fortinet `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
-networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
-networking-hpe `git://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
-networking-huawei `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
-networking-hyperv `git://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
-networking-infoblox `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
-networking-l2gw `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
-networking-lagopus `git://git.openstack.org/openstack/networking-lagopus <https://git.openstack.org/cgit/openstack/networking-lagopus>`__
-networking-midonet `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
-networking-mlnx `git://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
-networking-nec `git://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
-networking-odl `git://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
-networking-onos `git://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
-networking-opencontrail `git://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
-networking-ovn `git://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
-networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
-networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
-networking-powervm `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
-networking-sfc `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
-networking-spp `git://git.openstack.org/openstack/networking-spp <https://git.openstack.org/cgit/openstack/networking-spp>`__
-networking-vpp `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
-networking-vsphere `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
-neutron `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
-neutron-classifier `git://git.openstack.org/openstack/neutron-classifier <https://git.openstack.org/cgit/openstack/neutron-classifier>`__
-neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
-neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas>`__
-neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-fwaas-dashboard>`__
-neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
-neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
-neutron-tempest-plugin `git://git.openstack.org/openstack/neutron-tempest-plugin <https://git.openstack.org/cgit/openstack/neutron-tempest-plugin>`__
-neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
-neutron-vpnaas-dashboard `git://git.openstack.org/openstack/neutron-vpnaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-vpnaas-dashboard>`__
-nova-dpm `git://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
-nova-lxd `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
-nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
-nova-powervm `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
-oaktree `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
-octavia `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
-octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
-omni `git://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
-openstacksdk `git://git.openstack.org/openstack/openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`__
-os-xenapi `git://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
-osprofiler `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
-oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
-panko `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-patrole `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
-picasso `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
-qinling `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
-rally `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
-rally-openstack `git://git.openstack.org/openstack/rally-openstack <https://git.openstack.org/cgit/openstack/rally-openstack>`__
-sahara `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
-sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
-scalpels `git://git.openstack.org/openstack/scalpels <https://git.openstack.org/cgit/openstack/scalpels>`__
-searchlight `git://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
-searchlight-ui `git://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
-senlin `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
-slogging `git://git.openstack.org/openstack/slogging <https://git.openstack.org/cgit/openstack/slogging>`__
-solum `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
-stackube `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
-storlets `git://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
-tacker `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
-tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
-tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
-tatu `git://git.openstack.org/openstack/tatu <https://git.openstack.org/cgit/openstack/tatu>`__
-telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin <https://git.openstack.org/cgit/openstack/telemetry-tempest-plugin>`__
-tricircle `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
-trio2o `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
-trove `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
-trove-dashboard `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
-valet `git://git.openstack.org/openstack/valet <https://git.openstack.org/cgit/openstack/valet>`__
-vitrage `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
-vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard <https://git.openstack.org/cgit/openstack/vitrage-dashboard>`__
-vitrage-tempest-plugin `git://git.openstack.org/openstack/vitrage-tempest-plugin <https://git.openstack.org/cgit/openstack/vitrage-tempest-plugin>`__
-vmware-nsx `git://git.openstack.org/openstack/vmware-nsx <https://git.openstack.org/cgit/openstack/vmware-nsx>`__
-vmware-vspc `git://git.openstack.org/openstack/vmware-vspc <https://git.openstack.org/cgit/openstack/vmware-vspc>`__
-watcher `git://git.openstack.org/openstack/watcher <https://git.openstack.org/cgit/openstack/watcher>`__
-watcher-dashboard `git://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
-zaqar `git://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
-zaqar-ui `git://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
-zun `git://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
-zun-ui `git://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
+almanach `https://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
+aodh `https://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
+apmec `https://git.openstack.org/openstack/apmec <https://git.openstack.org/cgit/openstack/apmec>`__
+barbican `https://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
+bilean `https://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
+blazar `https://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
+broadview-collector `https://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
+castellan-ui `https://git.openstack.org/openstack/castellan-ui <https://git.openstack.org/cgit/openstack/castellan-ui>`__
+ceilometer `https://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
+ceilometer-powervm `https://git.openstack.org/openstack/ceilometer-powervm <https://git.openstack.org/cgit/openstack/ceilometer-powervm>`__
+cinderlib `https://git.openstack.org/openstack/cinderlib <https://git.openstack.org/cgit/openstack/cinderlib>`__
+cloudkitty `https://git.openstack.org/openstack/cloudkitty <https://git.openstack.org/cgit/openstack/cloudkitty>`__
+collectd-openstack-plugins `https://git.openstack.org/openstack/collectd-openstack-plugins <https://git.openstack.org/cgit/openstack/collectd-openstack-plugins>`__
+congress `https://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
+cyborg `https://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
+designate `https://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
+devstack-plugin-additional-pkg-repos `https://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
+devstack-plugin-amqp1 `https://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
+devstack-plugin-bdd `https://git.openstack.org/openstack/devstack-plugin-bdd <https://git.openstack.org/cgit/openstack/devstack-plugin-bdd>`__
+devstack-plugin-ceph `https://git.openstack.org/openstack/devstack-plugin-ceph <https://git.openstack.org/cgit/openstack/devstack-plugin-ceph>`__
+devstack-plugin-container `https://git.openstack.org/openstack/devstack-plugin-container <https://git.openstack.org/cgit/openstack/devstack-plugin-container>`__
+devstack-plugin-glusterfs `https://git.openstack.org/openstack/devstack-plugin-glusterfs <https://git.openstack.org/cgit/openstack/devstack-plugin-glusterfs>`__
+devstack-plugin-hdfs `https://git.openstack.org/openstack/devstack-plugin-hdfs <https://git.openstack.org/cgit/openstack/devstack-plugin-hdfs>`__
+devstack-plugin-kafka `https://git.openstack.org/openstack/devstack-plugin-kafka <https://git.openstack.org/cgit/openstack/devstack-plugin-kafka>`__
+devstack-plugin-libvirt-qemu `https://git.openstack.org/openstack/devstack-plugin-libvirt-qemu <https://git.openstack.org/cgit/openstack/devstack-plugin-libvirt-qemu>`__
+devstack-plugin-mariadb `https://git.openstack.org/openstack/devstack-plugin-mariadb <https://git.openstack.org/cgit/openstack/devstack-plugin-mariadb>`__
+devstack-plugin-nfs `https://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
+devstack-plugin-pika `https://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
+devstack-plugin-sheepdog `https://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
+devstack-plugin-vmax `https://git.openstack.org/openstack/devstack-plugin-vmax <https://git.openstack.org/cgit/openstack/devstack-plugin-vmax>`__
+devstack-plugin-zmq `https://git.openstack.org/openstack/devstack-plugin-zmq <https://git.openstack.org/cgit/openstack/devstack-plugin-zmq>`__
+dragonflow `https://git.openstack.org/openstack/dragonflow <https://git.openstack.org/cgit/openstack/dragonflow>`__
+drbd-devstack `https://git.openstack.org/openstack/drbd-devstack <https://git.openstack.org/cgit/openstack/drbd-devstack>`__
+ec2-api `https://git.openstack.org/openstack/ec2-api <https://git.openstack.org/cgit/openstack/ec2-api>`__
+freezer `https://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
+freezer-api `https://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
+freezer-tempest-plugin `https://git.openstack.org/openstack/freezer-tempest-plugin <https://git.openstack.org/cgit/openstack/freezer-tempest-plugin>`__
+freezer-web-ui `https://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
+gce-api `https://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
+glare `https://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
+group-based-policy `https://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
+gyan `https://git.openstack.org/openstack/gyan <https://git.openstack.org/cgit/openstack/gyan>`__
+heat `https://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
+heat-dashboard `https://git.openstack.org/openstack/heat-dashboard <https://git.openstack.org/cgit/openstack/heat-dashboard>`__
+horizon-mellanox `https://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
+ironic `https://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
+ironic-inspector `https://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
+ironic-staging-drivers `https://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
+ironic-ui `https://git.openstack.org/openstack/ironic-ui <https://git.openstack.org/cgit/openstack/ironic-ui>`__
+karbor `https://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
+karbor-dashboard `https://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
+keystone `https://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
+kingbird `https://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
+kuryr-kubernetes `https://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
+kuryr-libnetwork `https://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
+kuryr-tempest-plugin `https://git.openstack.org/openstack/kuryr-tempest-plugin <https://git.openstack.org/cgit/openstack/kuryr-tempest-plugin>`__
+magnum `https://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
+magnum-ui `https://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
+manila `https://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
+manila-tempest-plugin `https://git.openstack.org/openstack/manila-tempest-plugin <https://git.openstack.org/cgit/openstack/manila-tempest-plugin>`__
+manila-ui `https://git.openstack.org/openstack/manila-ui <https://git.openstack.org/cgit/openstack/manila-ui>`__
+masakari `https://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
+meteos `https://git.openstack.org/openstack/meteos <https://git.openstack.org/cgit/openstack/meteos>`__
+meteos-ui `https://git.openstack.org/openstack/meteos-ui <https://git.openstack.org/cgit/openstack/meteos-ui>`__
+mistral `https://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
+mixmatch `https://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
+mogan `https://git.openstack.org/openstack/mogan <https://git.openstack.org/cgit/openstack/mogan>`__
+mogan-ui `https://git.openstack.org/openstack/mogan-ui <https://git.openstack.org/cgit/openstack/mogan-ui>`__
+monasca-analytics `https://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
+monasca-api `https://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
+monasca-ceilometer `https://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
+monasca-events-api `https://git.openstack.org/openstack/monasca-events-api <https://git.openstack.org/cgit/openstack/monasca-events-api>`__
+monasca-log-api `https://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
+monasca-tempest-plugin `https://git.openstack.org/openstack/monasca-tempest-plugin <https://git.openstack.org/cgit/openstack/monasca-tempest-plugin>`__
+monasca-transform `https://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
+murano `https://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
+networking-6wind `https://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
+networking-ansible `https://git.openstack.org/openstack/networking-ansible <https://git.openstack.org/cgit/openstack/networking-ansible>`__
+networking-arista `https://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
+networking-bagpipe `https://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
+networking-baremetal `https://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
+networking-bgpvpn `https://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
+networking-brocade `https://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
+networking-calico `https://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
+networking-cisco `https://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
+networking-cumulus `https://git.openstack.org/openstack/networking-cumulus <https://git.openstack.org/cgit/openstack/networking-cumulus>`__
+networking-dpm `https://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
+networking-fortinet `https://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
+networking-generic-switch `https://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
+networking-hpe `https://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
+networking-huawei `https://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
+networking-hyperv `https://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
+networking-infoblox `https://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
+networking-l2gw `https://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
+networking-lagopus `https://git.openstack.org/openstack/networking-lagopus <https://git.openstack.org/cgit/openstack/networking-lagopus>`__
+networking-midonet `https://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
+networking-mlnx `https://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
+networking-nec `https://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
+networking-odl `https://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
+networking-omnipath `https://git.openstack.org/openstack/networking-omnipath <https://git.openstack.org/cgit/openstack/networking-omnipath>`__
+networking-onos `https://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
+networking-opencontrail `https://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
+networking-ovn `https://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
+networking-ovs-dpdk `https://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
+networking-plumgrid `https://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
+networking-powervm `https://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
+networking-sfc `https://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
+networking-spp `https://git.openstack.org/openstack/networking-spp <https://git.openstack.org/cgit/openstack/networking-spp>`__
+networking-vpp `https://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
+networking-vsphere `https://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
+neutron `https://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
+neutron-classifier `https://git.openstack.org/openstack/neutron-classifier <https://git.openstack.org/cgit/openstack/neutron-classifier>`__
+neutron-dynamic-routing `https://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
+neutron-fwaas `https://git.openstack.org/openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas>`__
+neutron-fwaas-dashboard `https://git.openstack.org/openstack/neutron-fwaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-fwaas-dashboard>`__
+neutron-lbaas `https://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
+neutron-lbaas-dashboard `https://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
+neutron-tempest-plugin `https://git.openstack.org/openstack/neutron-tempest-plugin <https://git.openstack.org/cgit/openstack/neutron-tempest-plugin>`__
+neutron-vpnaas `https://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
+neutron-vpnaas-dashboard `https://git.openstack.org/openstack/neutron-vpnaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-vpnaas-dashboard>`__
+nova-dpm `https://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
+nova-lxd `https://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
+nova-mksproxy `https://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
+nova-powervm `https://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
+oaktree `https://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
+octavia `https://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
+octavia-dashboard `https://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
+omni `https://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
+openstacksdk `https://git.openstack.org/openstack/openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`__
+os-faults `https://git.openstack.org/openstack/os-faults <https://git.openstack.org/cgit/openstack/os-faults>`__
+os-xenapi `https://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
+osprofiler `https://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
+oswin-tempest-plugin `https://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
+panko `https://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
+patrole `https://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
+picasso `https://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
+qinling `https://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
+qinling-dashboard `https://git.openstack.org/openstack/qinling-dashboard <https://git.openstack.org/cgit/openstack/qinling-dashboard>`__
+rally `https://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
+rally-openstack `https://git.openstack.org/openstack/rally-openstack <https://git.openstack.org/cgit/openstack/rally-openstack>`__
+rsd-virt-for-nova `https://git.openstack.org/openstack/rsd-virt-for-nova <https://git.openstack.org/cgit/openstack/rsd-virt-for-nova>`__
+sahara `https://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
+sahara-dashboard `https://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
+scalpels `https://git.openstack.org/openstack/scalpels <https://git.openstack.org/cgit/openstack/scalpels>`__
+searchlight `https://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
+searchlight-ui `https://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
+senlin `https://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
+slogging `https://git.openstack.org/openstack/slogging <https://git.openstack.org/cgit/openstack/slogging>`__
+solum `https://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
+stackube `https://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
+storlets `https://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
+stx-config `https://git.openstack.org/openstack/stx-config <https://git.openstack.org/cgit/openstack/stx-config>`__
+stx-fault `https://git.openstack.org/openstack/stx-fault <https://git.openstack.org/cgit/openstack/stx-fault>`__
+stx-ha `https://git.openstack.org/openstack/stx-ha <https://git.openstack.org/cgit/openstack/stx-ha>`__
+stx-integ `https://git.openstack.org/openstack/stx-integ <https://git.openstack.org/cgit/openstack/stx-integ>`__
+stx-metal `https://git.openstack.org/openstack/stx-metal <https://git.openstack.org/cgit/openstack/stx-metal>`__
+stx-nfv `https://git.openstack.org/openstack/stx-nfv <https://git.openstack.org/cgit/openstack/stx-nfv>`__
+stx-update `https://git.openstack.org/openstack/stx-update <https://git.openstack.org/cgit/openstack/stx-update>`__
+tacker `https://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
+tap-as-a-service `https://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
+tap-as-a-service-dashboard `https://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
+tatu `https://git.openstack.org/openstack/tatu <https://git.openstack.org/cgit/openstack/tatu>`__
+telemetry-tempest-plugin `https://git.openstack.org/openstack/telemetry-tempest-plugin <https://git.openstack.org/cgit/openstack/telemetry-tempest-plugin>`__
+tobiko `https://git.openstack.org/openstack/tobiko <https://git.openstack.org/cgit/openstack/tobiko>`__
+tricircle `https://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
+trio2o `https://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
+trove `https://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
+trove-dashboard `https://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
+valet `https://git.openstack.org/openstack/valet <https://git.openstack.org/cgit/openstack/valet>`__
+vitrage `https://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
+vitrage-dashboard `https://git.openstack.org/openstack/vitrage-dashboard <https://git.openstack.org/cgit/openstack/vitrage-dashboard>`__
+vitrage-tempest-plugin `https://git.openstack.org/openstack/vitrage-tempest-plugin <https://git.openstack.org/cgit/openstack/vitrage-tempest-plugin>`__
+vmware-nsx `https://git.openstack.org/openstack/vmware-nsx <https://git.openstack.org/cgit/openstack/vmware-nsx>`__
+vmware-vspc `https://git.openstack.org/openstack/vmware-vspc <https://git.openstack.org/cgit/openstack/vmware-vspc>`__
+watcher `https://git.openstack.org/openstack/watcher <https://git.openstack.org/cgit/openstack/watcher>`__
+watcher-dashboard `https://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
+zaqar `https://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
+zaqar-ui `https://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
+zun `https://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
+zun-ui `https://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
====================================== ===
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 89b9381..b1f2397 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -58,7 +58,7 @@
plugin's name, which is the name that should be used by users on
"enable_plugin" lines. It should generally be the last component of
the git repo path (e.g., if the plugin's repo is
- openstack/devstack-foo, then the name here should be "foo") ::
+ openstack/foo, then the name here should be "foo") ::
define_plugin <YOUR PLUGIN>
@@ -99,7 +99,7 @@
An example would be as follows::
- enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
+ enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api
plugin.sh contract
==================
@@ -148,7 +148,7 @@
``devstack/settings``::
- # settings file for template
+ # settings file for template
enable_service template
@@ -277,7 +277,7 @@
# note the actual url here is somewhat irrelevant because it
# caches in nodepool, however make it a valid url for
# documentation purposes.
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
+ export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://git.openstack.org/openstack/ec2-api"
See Also
========
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
index c00f06e..633f951 100644
--- a/doc/source/zuul_ci_jobs_migration.rst
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -102,7 +102,6 @@
tox_envlist: 'all'
devstack_localrc:
KURYR_K8S_API_PORT: 8080
- TEMPEST_PLUGINS: '/opt/stack/kuryr-tempest-plugin'
devstack_services:
kubernetes-api: true
kubernetes-controller-manager: true
@@ -114,6 +113,8 @@
kuryr-kubernetes: https://git.openstack.org/openstack/kuryr
devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container
neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas
+ tempest_plugins:
+ - kuryr-tempest-plugin
(...)
Job variables
diff --git a/exercise.sh b/exercise.sh
deleted file mode 100755
index 9067033..0000000
--- a/exercise.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-# **exercise.sh**
-
-# Keep track of the current DevStack directory.
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-# Run everything in the exercises/ directory that isn't explicitly disabled
-
-# comma separated list of script basenames to skip
-# to refrain from exercising foo.sh use ``SKIP_EXERCISES=foo``
-SKIP_EXERCISES=${SKIP_EXERCISES:-""}
-
-# comma separated list of script basenames to run
-# to run only foo.sh use ``RUN_EXERCISES=foo``
-basenames=${RUN_EXERCISES:-""}
-
-EXERCISE_DIR=$TOP_DIR/exercises
-
-if [[ -z "${basenames}" ]]; then
- # Locate the scripts we should run
- basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
-else
- # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``.
- SKIP_EXERCISES=
-fi
-
-# Track the state of each script
-passes=""
-failures=""
-skips=""
-
-# Loop over each possible script (by basename)
-for script in $basenames; do
- if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then
- skips="$skips $script"
- else
- echo "====================================================================="
- echo Running $script
- echo "====================================================================="
- $EXERCISE_DIR/$script.sh
- exitcode=$?
- if [[ $exitcode == 55 ]]; then
- skips="$skips $script"
- elif [[ $exitcode -ne 0 ]]; then
- failures="$failures $script"
- else
- passes="$passes $script"
- fi
- fi
-done
-
-# Output status of exercise run
-echo "====================================================================="
-for script in $skips; do
- echo SKIP $script
-done
-for script in $passes; do
- echo PASS $script
-done
-for script in $failures; do
- echo FAILED $script
-done
-echo "====================================================================="
-
-if [[ -n "$failures" ]]; then
- exit 1
-fi
diff --git a/exerciserc b/exerciserc
deleted file mode 100644
index 978e0b3..0000000
--- a/exerciserc
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-# source exerciserc
-#
-# Configure the DevStack exercise scripts
-# For best results, source this _after_ stackrc/localrc as it will set
-# values only if they are not already set.
-
-# Max time to wait while vm goes from build to active state
-export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time to wait for proper IP association and dis-association.
-export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
-
-# Max time till the vm is bootable
-export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
-
-# Max time from run instance command until it is running
-export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
-
-# Max time to wait for a vm to terminate
-export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
-
-# The size of the volume we want to boot from; some storage back-ends
-# do not allow a disk resize, so it's important that this can be tuned
-export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1}
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
deleted file mode 100755
index 8cbca54..0000000
--- a/exercises/aggregates.sh
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env bash
-
-# **aggregates.sh**
-
-# This script demonstrates how to use host aggregates:
-#
-# * Create an Aggregate
-# * Updating Aggregate details
-# * Testing Aggregate metadata
-# * Testing Aggregate delete
-# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
-# * Testing add/remove hosts (with one host)
-
-echo "**************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "**************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Test as the admin user
-# note this imports stackrc/functions, etc
-. $TOP_DIR/openrc admin admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Cells does not support aggregates.
-is_service_enabled n-cell && exit 55
-
-# Create an aggregate
-# ===================
-
-AGGREGATE_NAME=test_aggregate_$RANDOM
-AGGREGATE2_NAME=test_aggregate_$RANDOM
-AGGREGATE_A_ZONE=nova
-
-function exit_if_aggregate_present {
- aggregate_name=$1
-
- if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
- echo "SUCCESS $aggregate_name not present"
- else
- die $LINENO "found aggregate: $aggregate_name"
- exit -1
- fi
-}
-
-exit_if_aggregate_present $AGGREGATE_NAME
-
-AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
-
-AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
-die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
-
-# check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
-
-
-# Ensure creating a duplicate fails
-# =================================
-
-if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
- die $LINENO "could create duplicate aggregate"
-fi
-
-
-# Test aggregate-update (and aggregate-details)
-# =============================================
-AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
-nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
-
-
-# Test aggregate-set-metadata
-# ===========================
-META_DATA_1_KEY=asdf
-META_DATA_2_KEY=foo
-META_DATA_3_KEY=bar
-
-#ensure no additional metadata is set
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep 123
-
-nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
-
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
-
-nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
-nova aggregate-details $AGGREGATE_ID | egrep "\|[{u ]*'availability_zone.+$AGGREGATE_A_ZONE'[ }]*\|"
-
-
-# Test aggregate-add/remove-host
-# ==============================
-if [ "$VIRT_DRIVER" == "xenserver" ]; then
- echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
-fi
-FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1)
-# Make sure can add two aggregates to same host
-nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
-nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
-if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
- die $LINENO "could add duplicate host to single aggregate"
-fi
-nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
-nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
-
-# Test aggregate-delete
-# =====================
-nova aggregate-delete $AGGREGATE_ID
-nova aggregate-delete $AGGREGATE2_ID
-exit_if_aggregate_present $AGGREGATE_NAME
-
-set +o xtrace
-echo "**************************************************"
-echo "End DevStack Exercise: $0"
-echo "**************************************************"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
deleted file mode 100755
index 7478bdf..0000000
--- a/exercises/boot_from_volume.sh
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/env bash
-
-# **boot_from_volume.sh**
-
-# This script demonstrates how to boot from a volume. It does the following:
-#
-# * Create a bootable volume
-# * Boot a volume-backed instance
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled cinder || exit 55
-
-# Ironic does not support boot from volume.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-boot_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-bfv-inst}
-VOL_NAME=${VOL_NAME:-ex-vol-bfv}
-
-
-# Launching a server
-# ==================
-
-# List servers for project:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
- # Cells does not support security groups, so force the use of "default"
- SECGROUP="default"
- echo "Using the default security group because of Cells."
-else
- # Create a secgroup
- if ! nova secgroup-list | grep -q $SECGROUP; then
- nova secgroup-create $SECGROUP "$SECGROUP description"
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
- fi
- fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
- nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
- nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
- # grab the first flavor in the list to launch if default doesn't exist
- INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
- echo "server didn't terminate!"
- exit 1
-fi
-
-# Setup Keypair
-KEY_NAME=test_key
-KEY_FILE=key.pem
-nova keypair-delete $KEY_NAME || true
-nova keypair-add $KEY_NAME > $KEY_FILE
-chmod 600 $KEY_FILE
-
-# Set up volume
-# -------------
-
-# Delete any old volume
-cinder delete $VOL_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
- echo "Volume $VOL_NAME not deleted"
- exit 1
-fi
-
-# Create the bootable volume
-start_time=$(date +%s)
-cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
- die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- echo "Volume $VOL_NAME not created"
- exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Boot instance
-# -------------
-
-# Boot using the --block-device-mapping param. The format of mapping is:
-# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
-# Leaving the middle two fields blank appears to do-the-right-thing
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server didn't become active!"
- exit 1
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Clean up
-# --------
-
-# Delete volume backed instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- echo "Server $VM_NAME not deleted"
- exit 1
-fi
-
-# Wait for volume to be released
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- echo "Volume $VOL_NAME not released"
- exit 1
-fi
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
- echo "Volume $VOL_NAME not deleted"
- exit 1
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-if [[ $SECGROUP = "default" ]] ; then
- echo "Skipping deleting default security group"
-else
- # Delete secgroup
- nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
deleted file mode 100755
index b380968..0000000
--- a/exercises/client-args.sh
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-args.sh**
-
-# Test OpenStack client authentication arguments handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-# Save the known variables for later
-export x_PROJECT_NAME=$OS_PROJECT_NAME
-export x_USERNAME=$OS_USERNAME
-export x_PASSWORD=$OS_PASSWORD
-export x_AUTH_URL=$OS_AUTH_URL
-
-# Unset the usual variables to force argument processing
-unset OS_PROJECT_NAME
-unset OS_USERNAME
-unset OS_PASSWORD
-unset OS_AUTH_URL
-
-# Common authentication args
-PROJECT_ARG="--os-project-name=$x_PROJECT_NAME"
-ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
- STATUS_KEYSTONE="Skipped"
- else
- echo -e "\nTest Keystone"
- if openstack $PROJECT_ARG $ARGS catalog show identity; then
- STATUS_KEYSTONE="Succeeded"
- else
- STATUS_KEYSTONE="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
- STATUS_NOVA="Skipped"
- else
- # Test OSAPI
- echo -e "\nTest Nova"
- if nova $PROJECT_ARG $ARGS flavor-list; then
- STATUS_NOVA="Succeeded"
- else
- STATUS_NOVA="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
- STATUS_CINDER="Skipped"
- else
- echo -e "\nTest Cinder"
- if cinder $PROJECT_ARG $ARGS list; then
- STATUS_CINDER="Succeeded"
- else
- STATUS_CINDER="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
- STATUS_GLANCE="Skipped"
- else
- echo -e "\nTest Glance"
- if openstack $PROJECT_ARG $ARGS image list; then
- STATUS_GLANCE="Succeeded"
- else
- STATUS_GLANCE="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Swift client
-# ------------
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
- STATUS_SWIFT="Skipped"
- else
- echo -e "\nTest Swift"
- if swift $PROJECT_ARG $ARGS stat; then
- STATUS_SWIFT="Succeeded"
- else
- STATUS_SWIFT="Failed"
- RETURN=1
- fi
- fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
- if [[ -n "$2" ]]; then
- echo "$1: $2"
- fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
- echo "*********************************************************************"
- echo "SUCCESS: End DevStack Exercise: $0"
- echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
deleted file mode 100755
index fff04df..0000000
--- a/exercises/client-env.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env bash
-
-# **client-env.sh**
-
-# Test OpenStack client environment variable handling
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc admin
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Unset all of the known NOVA_* vars
-unset NOVA_API_KEY
-unset NOVA_ENDPOINT_NAME
-unset NOVA_PASSWORD
-unset NOVA_PROJECT_ID
-unset NOVA_REGION_NAME
-unset NOVA_URL
-unset NOVA_USERNAME
-
-for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
- is_set $i
- if [[ $? -ne 0 ]]; then
- echo "$i expected to be set"
- ABORT=1
- fi
-done
-if [[ -n "$ABORT" ]]; then
- exit 1
-fi
-
-# Set global return
-RETURN=0
-
-# Keystone client
-# ---------------
-if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
- STATUS_KEYSTONE="Skipped"
- else
- echo -e "\nTest Keystone"
- if openstack endpoint show identity; then
- STATUS_KEYSTONE="Succeeded"
- else
- STATUS_KEYSTONE="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Nova client
-# -----------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
- STATUS_NOVA="Skipped"
- else
- # Test OSAPI
- echo -e "\nTest Nova"
- if nova flavor-list; then
- STATUS_NOVA="Succeeded"
- else
- STATUS_NOVA="Failed"
- RETURN=1
- fi
-
- fi
-fi
-
-# Cinder client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
- STATUS_CINDER="Skipped"
- else
- echo -e "\nTest Cinder"
- if cinder list; then
- STATUS_CINDER="Succeeded"
- else
- STATUS_CINDER="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Glance client
-# -------------
-
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
- STATUS_GLANCE="Skipped"
- else
- echo -e "\nTest Glance"
- if openstack image list; then
- STATUS_GLANCE="Succeeded"
- else
- STATUS_GLANCE="Failed"
- RETURN=1
- fi
- fi
-fi
-
-# Swift client
-# ------------
-
-
-if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
- if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
- STATUS_SWIFT="Skipped"
- else
- echo -e "\nTest Swift"
- if swift stat; then
- STATUS_SWIFT="Succeeded"
- else
- STATUS_SWIFT="Failed"
- RETURN=1
- fi
- fi
-fi
-
-set +o xtrace
-
-
-# Results
-# =======
-
-function report {
- if [[ -n "$2" ]]; then
- echo "$1: $2"
- fi
-}
-
-echo -e "\n"
-report "Keystone" $STATUS_KEYSTONE
-report "Nova" $STATUS_NOVA
-report "Cinder" $STATUS_CINDER
-report "Glance" $STATUS_GLANCE
-report "Swift" $STATUS_SWIFT
-
-if (( $RETURN == 0 )); then
- echo "*********************************************************************"
- echo "SUCCESS: End DevStack Exercise: $0"
- echo "*********************************************************************"
-fi
-
-exit $RETURN
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
deleted file mode 100755
index 5abc713..0000000
--- a/exercises/floating_ips.sh
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/env bash
-
-# **floating_ips.sh** - using the cloud can be fun
-
-# Test instance connectivity with the ``nova`` command from ``python-novaclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-test_secgroup}
-
-# Default floating IP pool name
-DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-public}
-
-# Additional floating IP pool and range
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-
-# Instance name
-VM_NAME="ex-float"
-
-# Cells does not support floating ips API calls
-is_service_enabled n-cell && exit 55
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
- nova secgroup-create $SECGROUP "$SECGROUP description"
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- die $LINENO "Security group not created"
- fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
- nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
- nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
- # grab the first flavor in the list to launch if default doesn't exist
- INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
- die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
- die $LINENO "server didn't terminate!"
- exit 1
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Floating IPs
-# ------------
-
-# Allocate a floating IP from the default pool
-FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
-
-# List floating addresses
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
- die $LINENO "Floating IP not allocated"
-fi
-
-# Add floating IP to our server
-nova add-floating-ip $VM_UUID $FLOATING_IP || \
- die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
-
-# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
-ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME"
-
-if ! is_service_enabled neutron; then
- # Allocate an IP from second floating pool
- TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
- die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
-
- # list floating addresses
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
- die $LINENO "Floating IP not allocated"
- fi
-fi
-
-# Dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
- die $LINENO "Failure deleting security group rule from $SECGROUP"
-
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
- die $LINENO "Security group rule not deleted from $SECGROUP"
-fi
-
-# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
- # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
- ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail
-fi
-
-# Clean up
-# --------
-
-if ! is_service_enabled neutron; then
- # Delete second floating IP
- nova floating-ip-delete $TEST_FLOATING_IP || \
- die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
-fi
-
-# Delete the floating ip
-nova floating-ip-delete $FLOATING_IP || \
- die $LINENO "Failure deleting floating IP $FLOATING_IP"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-# Wait for termination
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- die $LINENO "Server $VM_NAME not deleted"
-fi
-
-# Delete secgroup
-nova secgroup-delete $SECGROUP || \
- die $LINENO "Failure deleting security group $SECGROUP"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
deleted file mode 100755
index e8c8f62..0000000
--- a/exercises/neutron-adv-test.sh
+++ /dev/null
@@ -1,466 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright 2012, Cisco Systems
-# Copyright 2012, VMware, Inc.
-# Copyright 2012, NTT MCL, Inc.
-#
-# Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com
-#
-# **neutron-adv-test.sh**
-
-# Perform integration testing of Nova and other components with Neutron.
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-
-set -o errtrace
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-# Environment
-# -----------
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import neutron functions
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# If neutron is not enabled we exit with exitcode 55, which means exercise is skipped.
-neutron_plugin_check_adv_test_requirements || exit 55
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Neutron Settings
-# ----------------
-
-PROJECTS="DEMO1"
-# TODO (nati)_Test public network
-#PROJECTS="DEMO1,DEMO2"
-
-PUBLIC_NAME="admin"
-DEMO1_NAME="demo1"
-DEMO2_NAME="demo2"
-
-PUBLIC_NUM_NET=1
-DEMO1_NUM_NET=1
-DEMO2_NUM_NET=2
-
-PUBLIC_NET1_CIDR="200.0.0.0/24"
-DEMO1_NET1_CIDR="10.10.0.0/24"
-DEMO2_NET1_CIDR="10.20.0.0/24"
-DEMO2_NET2_CIDR="10.20.1.0/24"
-
-PUBLIC_NET1_GATEWAY="200.0.0.1"
-DEMO1_NET1_GATEWAY="10.10.0.1"
-DEMO2_NET1_GATEWAY="10.20.0.1"
-DEMO2_NET2_GATEWAY="10.20.1.1"
-
-PUBLIC_NUM_VM=1
-DEMO1_NUM_VM=1
-DEMO2_NUM_VM=2
-
-PUBLIC_VM1_NET='admin-net1'
-DEMO1_VM1_NET='demo1-net1'
-# Multinic settings. But this is fail without nic setting in OS image
-DEMO2_VM1_NET='demo2-net1'
-DEMO2_VM2_NET='demo2-net2'
-
-PUBLIC_NUM_ROUTER=1
-DEMO1_NUM_ROUTER=1
-DEMO2_NUM_ROUTER=1
-
-PUBLIC_ROUTER1_NET="admin-net1"
-DEMO1_ROUTER1_NET="demo1-net1"
-DEMO2_ROUTER1_NET="demo2-net1"
-
-# Various functions
-# -----------------
-
-function foreach_project {
- COMMAND=$1
- for PROJECT in ${PROJECTS//,/ };do
- eval ${COMMAND//%PROJECT%/$PROJECT}
- done
-}
-
-function foreach_project_resource {
- COMMAND=$1
- RESOURCE=$2
- for PROJECT in ${PROJECTS//,/ };do
- eval 'NUM=$'"${PROJECT}_NUM_$RESOURCE"
- for i in `seq $NUM`;do
- local COMMAND_LOCAL=${COMMAND//%PROJECT%/$PROJECT}
- COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i}
- eval $COMMAND_LOCAL
- done
- done
-}
-
-function foreach_project_vm {
- COMMAND=$1
- foreach_project_resource "$COMMAND" 'VM'
-}
-
-function foreach_project_net {
- COMMAND=$1
- foreach_project_resource "$COMMAND" 'NET'
-}
-
-function get_image_id {
- local IMAGE_ID
- IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
- die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
- echo "$IMAGE_ID"
-}
-
-function get_project_id {
- local PROJECT_NAME=$1
- local PROJECT_ID
- PROJECT_ID=`openstack project list | grep " $PROJECT_NAME " | head -n 1 | get_field 1`
- die_if_not_set $LINENO PROJECT_ID "Failure retrieving PROJECT_ID for $PROJECT_NAME"
- echo "$PROJECT_ID"
-}
-
-function get_user_id {
- local USER_NAME=$1
- local USER_ID
- USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
- die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
- echo "$USER_ID"
-}
-
-function get_role_id {
- local ROLE_NAME=$1
- local ROLE_ID
- ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
- die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
- echo "$ROLE_ID"
-}
-
-function get_network_id {
- local NETWORK_NAME="$1"
- local NETWORK_ID
- NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
- echo $NETWORK_ID
-}
-
-function get_flavor_id {
- local INSTANCE_TYPE=$1
- local FLAVOR_ID
- FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
- die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
- echo "$FLAVOR_ID"
-}
-
-function confirm_server_active {
- local VM_UUID=$1
- if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server '$VM_UUID' did not become active!"
- false
- fi
-}
-
-function neutron_debug_admin {
- local os_username=$OS_USERNAME
- local os_project_id=$OS_PROJECT_ID
- source $TOP_DIR/openrc admin admin
- neutron-debug $@
- source $TOP_DIR/openrc $os_username $os_project_id
-}
-
-function add_project {
- openstack project create $1
- openstack user create $2 --password ${ADMIN_PASSWORD} --project $1
- openstack role add Member --project $1 --user $2
-}
-
-function remove_project {
- local PROJECT=$1
- local PROJECT_ID
- PROJECT_ID=$(get_project_id $PROJECT)
- openstack project delete $PROJECT_ID
-}
-
-function remove_user {
- local USER=$1
- local USER_ID
- USER_ID=$(get_user_id $USER)
- openstack user delete $USER_ID
-}
-
-function create_projects {
- source $TOP_DIR/openrc admin admin
- add_project demo1 demo1 demo1
- add_project demo2 demo2 demo2
- source $TOP_DIR/openrc demo demo
-}
-
-function delete_projects_and_users {
- source $TOP_DIR/openrc admin admin
- remove_user demo1
- remove_project demo1
- remove_user demo2
- remove_project demo2
- echo "removed all projects"
- source $TOP_DIR/openrc demo demo
-}
-
-function create_network {
- local PROJECT=$1
- local GATEWAY=$2
- local CIDR=$3
- local NUM=$4
- local EXTRA=$5
- local NET_NAME="${PROJECT}-net$NUM"
- local ROUTER_NAME="${PROJECT}-router${NUM}"
- source $TOP_DIR/openrc admin admin
- local PROJECT_ID
- PROJECT_ID=$(get_project_id $PROJECT)
- source $TOP_DIR/openrc $PROJECT $PROJECT
- local NET_ID
- NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
- die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
- openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
- neutron_debug_admin probe-create --device-owner compute $NET_ID
- source $TOP_DIR/openrc demo demo
-}
-
-function create_networks {
- foreach_project_net 'create_network ${%PROJECT%_NAME} ${%PROJECT%_NET%NUM%_GATEWAY} ${%PROJECT%_NET%NUM%_CIDR} %NUM% ${%PROJECT%_NET%NUM%_EXTRA}'
- #TODO(nati) test security group function
- # allow ICMP for both project's security groups
- #source $TOP_DIR/openrc demo1 demo1
- #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
- #source $TOP_DIR/openrc demo2 demo2
- #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_vm {
- local PROJECT=$1
- local NUM=$2
- local NET_NAMES=$3
- source $TOP_DIR/openrc $PROJECT $PROJECT
- local NIC=""
- for NET_NAME in ${NET_NAMES//,/ };do
- NIC="$NIC --nic net-id="`get_network_id $NET_NAME`
- done
- #TODO (nati) Add multi-nic test
- #TODO (nati) Add public-net test
- local VM_UUID
- VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
- --image $(get_image_id) \
- $NIC \
- $PROJECT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
- die_if_not_set $LINENO VM_UUID "Failure launching $PROJECT-server$NUM"
- confirm_server_active $VM_UUID
-}
-
-function create_vms {
- foreach_project_vm 'create_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function ping_ip {
- # Test agent connection. Assumes namespaces are disabled, and
- # that DHCP is in use, but not L3
- local VM_NAME=$1
- local NET_NAME=$2
- IP=$(get_instance_ip $VM_NAME $NET_NAME)
- ping_check $IP $BOOT_TIMEOUT $NET_NAME
-}
-
-function check_vm {
- local PROJECT=$1
- local NUM=$2
- local VM_NAME="$PROJECT-server$NUM"
- local NET_NAME=$3
- source $TOP_DIR/openrc $PROJECT $PROJECT
- ping_ip $VM_NAME $NET_NAME
- # TODO (nati) test ssh connection
- # TODO (nati) test inter connection between vm
- # TODO (nati) test dhcp host routes
- # TODO (nati) test multi-nic
-}
-
-function check_vms {
- foreach_project_vm 'check_vm ${%PROJECT%_NAME} %NUM% ${%PROJECT%_VM%NUM%_NET}'
-}
-
-function shutdown_vm {
- local PROJECT=$1
- local NUM=$2
- source $TOP_DIR/openrc $PROJECT $PROJECT
- VM_NAME=${PROJECT}-server$NUM
- nova delete $VM_NAME
-}
-
-function shutdown_vms {
- foreach_project_vm 'shutdown_vm ${%PROJECT%_NAME} %NUM%'
- if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
- die $LINENO "Some VMs failed to shutdown"
- fi
-}
-
-function delete_network {
- local PROJECT=$1
- local NUM=$2
- local NET_NAME="${PROJECT}-net$NUM"
- source $TOP_DIR/openrc admin admin
- local PROJECT_ID
- PROJECT_ID=$(get_project_id $PROJECT)
- #TODO(nati) comment out until l3-agent merged
- #for res in port subnet net router;do
- for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
- delete_probe $net_id
- openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
- openstack network delete $net_id
- done
- source $TOP_DIR/openrc demo demo
-}
-
-function delete_networks {
- foreach_project_net 'delete_network ${%PROJECT%_NAME} %NUM%'
- # TODO(nati) add secuirty group check after it is implemented
- # source $TOP_DIR/openrc demo1 demo1
- # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
- # source $TOP_DIR/openrc demo2 demo2
- # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
-}
-
-function create_all {
- create_projects
- create_networks
- create_vms
-}
-
-function delete_all {
- shutdown_vms
- delete_networks
- delete_projects_and_users
-}
-
-function all {
- create_all
- check_vms
- delete_all
-}
-
-# Test functions
-# --------------
-
-function test_functions {
- IMAGE=$(get_image_id)
- echo $IMAGE
-
- PROJECT_ID=$(get_project_id demo)
- echo $PROJECT_ID
-
- FLAVOR_ID=$(get_flavor_id m1.tiny)
- echo $FLAVOR_ID
-
- NETWORK_ID=$(get_network_id admin)
- echo $NETWORK_ID
-}
-
-# Usage and main
-# --------------
-
-function usage {
- echo "$0: [-h]"
- echo " -h, --help Display help message"
- echo " -t, --project Create projects"
- echo " -n, --net Create networks"
- echo " -v, --vm Create vms"
- echo " -c, --check Check connection"
- echo " -x, --delete-projects Delete projects"
- echo " -y, --delete-nets Delete networks"
- echo " -z, --delete-vms Delete vms"
- echo " -T, --test Test functions"
-}
-
-function main {
-
- echo Description
-
- if [ $# -eq 0 ] ; then
- # if no args are provided, run all tests
- all
- else
-
- while [ "$1" != "" ]; do
- case $1 in
- -h | --help ) usage
- exit
- ;;
- -n | --net ) create_networks
- exit
- ;;
- -v | --vm ) create_vms
- exit
- ;;
- -t | --project ) create_projects
- exit
- ;;
- -c | --check ) check_vms
- exit
- ;;
- -T | --test ) test_functions
- exit
- ;;
- -x | --delete-projects ) delete_projects_and_users
- exit
- ;;
- -y | --delete-nets ) delete_networks
- exit
- ;;
- -z | --delete-vms ) shutdown_vms
- exit
- ;;
- -a | --all ) all
- exit
- ;;
- * ) usage
- exit 1
- esac
- shift
- done
- fi
-}
-
-trap failed ERR
-function failed {
- local r=$?
- set +o errtrace
- set +o xtrace
- echo "Failed to execute"
- echo "Starting cleanup..."
- delete_all
- echo "Finished cleanup"
- exit $r
-}
-
-# Kick off script
-# ---------------
-
-echo $*
-main $*
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
deleted file mode 100755
index 2f78e39..0000000
--- a/exercises/sec_groups.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-# **sec_groups.sh**
-
-# Test security groups via the command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If nova api is not enabled we exit with exitcode 55 so that
-# the exercise is skipped
-is_service_enabled n-api || exit 55
-
-
-# Testing Security Groups
-# =======================
-
-# List security groups
-nova secgroup-list
-
-# Create random name for new sec group and create secgroup of said name
-SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)"
-nova secgroup-create $SEC_GROUP_NAME 'a test security group'
-
-# Add some rules to the secgroup
-RULES_TO_ADD=( 22 3389 5900 )
-
-for RULE in "${RULES_TO_ADD[@]}"; do
- nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Check to make sure rules were added
-SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
-die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
-for i in "${RULES_TO_ADD[@]}"; do
- skip=
- for j in "${SEC_GROUP_RULES[@]}"; do
- [[ $i == $j ]] && { skip=1; break; }
- done
- [[ -n $skip ]] || exit 1
-done
-
-# Delete rules and secgroup
-for RULE in "${RULES_TO_ADD[@]}"; do
- nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
-done
-
-# Delete secgroup
-nova secgroup-delete $SEC_GROUP_NAME || \
- die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
deleted file mode 100755
index 8aa376b..0000000
--- a/exercises/swift.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-
-# **swift.sh**
-
-# Test swift via the ``python-openstackclient`` command line
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If swift is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled s-proxy || exit 55
-
-# Container name
-CONTAINER=ex-swift
-OBJECT=/etc/issue
-
-
-# Testing Swift
-# =============
-
-# Check if we have to swift via keystone
-openstack object store account show || die $LINENO "Failure getting account status"
-
-# We start by creating a test container
-openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
-
-# add a file into it.
-openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER"
-
-# list the objects
-openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
-
-# delete the object first
-openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER"
-
-# delete the container
-openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
deleted file mode 100755
index e7c3560..0000000
--- a/exercises/volumes.sh
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/env bash
-
-# **volumes.sh**
-
-# Test cinder volumes with the ``cinder`` command from ``python-cinderclient``
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import project functions
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If cinder is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled cinder || exit 55
-
-# Ironic does not currently support volume attachment.
-[ "$VIRT_DRIVER" == "ironic" ] && exit 55
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Security group name
-SECGROUP=${SECGROUP:-vol_secgroup}
-
-# Instance and volume names
-VM_NAME=${VM_NAME:-ex-vol-inst}
-VOL_NAME="ex-vol-$(openssl rand -hex 4)"
-
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# List the images available
-openstack image list
-
-# Grab the id of the image to launch
-IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-
-# Security Groups
-# ---------------
-
-# List security groups
-nova secgroup-list
-
-if is_service_enabled n-cell; then
- # Cells does not support security groups, so force the use of "default"
- SECGROUP="default"
- echo "Using the default security group because of Cells."
-else
- # Create a secgroup
- if ! nova secgroup-list | grep -q $SECGROUP; then
- nova secgroup-create $SECGROUP "$SECGROUP description"
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
- fi
- fi
-fi
-
-# Configure Security Group Rules
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
- nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-fi
-if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
- nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
-fi
-
-# List secgroup rules
-nova secgroup-list-rules $SECGROUP
-
-# Set up instance
-# ---------------
-
-# List flavors
-nova flavor-list
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
- # grab the first flavor in the list to launch if default doesn't exist
- INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
- die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
- die $LINENO "server didn't terminate!"
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
-
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
-
-# Volumes
-# -------
-
-# Verify it doesn't exist
-if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
- die $LINENO "Volume $VOL_NAME already exists"
-fi
-
-# Create a new volume
-start_time=$(date +%s)
-cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
- die $LINENO "Failure creating volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- die $LINENO "Volume $VOL_NAME not created"
-fi
-end_time=$(date +%s)
-echo "Completed cinder create in $((end_time - start_time)) seconds"
-
-# Get volume ID
-VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
-
-# Attach to server
-DEVICE=/dev/vdb
-start_time=$(date +%s)
-nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
- die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
- die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-attach in $((end_time - start_time)) seconds"
-
-VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
-if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
- die $LINENO "Volume not attached to correct instance"
-fi
-
-# Clean up
-# --------
-
-# Detach volume
-start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
-fi
-end_time=$(date +%s)
-echo "Completed volume-detach in $((end_time - start_time)) seconds"
-
-# Delete volume
-start_time=$(date +%s)
-cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
- die $LINENO "Volume $VOL_NAME not deleted"
-fi
-end_time=$(date +%s)
-echo "Completed cinder delete in $((end_time - start_time)) seconds"
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- die $LINENO "Server $VM_NAME not deleted"
-fi
-
-if [[ $SECGROUP = "default" ]] ; then
- echo "Skipping deleting default security group"
-else
- # Delete secgroup
- nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index bfd7567..efcfc03 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,5 +1,5 @@
<VirtualHost *:80>
- WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
+ WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi.py
WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP}
WSGIApplicationGroup %{GLOBAL}
diff --git a/files/apache-neutron.template b/files/apache-neutron.template
new file mode 100644
index 0000000..c7796b9
--- /dev/null
+++ b/files/apache-neutron.template
@@ -0,0 +1,36 @@
+Listen %PUBLICPORT%
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" neutron_combined
+
+<Directory %NEUTRON_BIN%>
+ Require all granted
+</Directory>
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess neutron-server processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup neutron-server
+ WSGIScriptAlias / %NEUTRON_BIN%/neutron-api
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ ErrorLogFormat "%M"
+ ErrorLog /var/log/%APACHE_NAME%/neutron.log
+ CustomLog /var/log/%APACHE_NAME%/neutron_access.log neutron_combined
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+</VirtualHost>
+
+
+%SSLLISTEN%<VirtualHost *:443>
+%SSLLISTEN% %SSLENGINE%
+%SSLLISTEN% %SSLCERTFILE%
+%SSLLISTEN% %SSLKEYFILE%
+%SSLLISTEN%</VirtualHost>
+
+Alias /networking %NEUTRON_BIN%/neutron-api
+<Location /networking>
+ SetHandler wsgi-script
+ Options +ExecCGI
+ WSGIProcessGroup neutron-server
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+</Location>
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index c11e9f0..9c724cb 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,8 +1,9 @@
+cdrkit-cdrtools-compat # dist:sle12
cryptsetup
dosfstools
libosinfo
lvm2
-mkisofs
+mkisofs # not:sle12
open-iscsi
sg3_utils
# Stuff for diablo volumes
diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 4103a40..1d58121 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,3 +1,4 @@
+cdrkit-cdrtools-compat # dist:sle12
conntrack-tools
curl
dnsmasq
@@ -11,7 +12,8 @@
libvirt # NOPRIME
libvirt-python # NOPRIME
mariadb # NOPRIME
-mkisofs # required for config_drive
+# mkisofs is required for config_drive
+mkisofs # not:sle12
parted
polkit
# qemu as fallback if kvm cannot be used
diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/n-spice b/files/rpms/n-spice
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/nova b/files/rpms/nova
index 4140cd7..8d73644 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -13,7 +13,6 @@
m2crypto
mysql-devel
mysql-server # NOPRIME
-numpy # needed by websockify for spice console
parted
polkit
rabbitmq-server # NOPRIME
diff --git a/functions b/functions
index f63595d..187ad23 100644
--- a/functions
+++ b/functions
@@ -282,7 +282,6 @@
image create \
"$image_name" --public \
--container-format=bare --disk-format=ploop \
- --property hypervisor_type=vz \
--property vm_mode=$vm_mode < "${image}"
return
fi
@@ -740,7 +739,7 @@
# Mount the disk with mount options to make it as efficient as possible
if ! egrep -q ${storage_data_dir} /proc/mounts; then
- sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
+ sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
${disk_image} ${storage_data_dir}
fi
}
diff --git a/functions-common b/functions-common
index fae936a..bace9e0 100644
--- a/functions-common
+++ b/functions-common
@@ -92,7 +92,6 @@
--file $CLOUDS_YAML \
--os-cloud devstack \
--os-region-name $REGION_NAME \
- --os-identity-api-version 3 \
$CA_CERT_ARG \
--os-auth-url $KEYSTONE_SERVICE_URI \
--os-username demo \
@@ -104,7 +103,6 @@
--file $CLOUDS_YAML \
--os-cloud devstack-alt \
--os-region-name $REGION_NAME \
- --os-identity-api-version 3 \
$CA_CERT_ARG \
--os-auth-url $KEYSTONE_SERVICE_URI \
--os-username alt_demo \
@@ -116,13 +114,23 @@
--file $CLOUDS_YAML \
--os-cloud devstack-admin \
--os-region-name $REGION_NAME \
- --os-identity-api-version 3 \
$CA_CERT_ARG \
--os-auth-url $KEYSTONE_SERVICE_URI \
--os-username admin \
--os-password $ADMIN_PASSWORD \
--os-project-name admin
+ # admin with a system-scoped token -> devstack-system
+ $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-system-admin \
+ --os-region-name $REGION_NAME \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_SERVICE_URI \
+ --os-username admin \
+ --os-password $ADMIN_PASSWORD \
+ --os-system-scope all
+
# CLean up any old clouds.yaml files we had laying around
rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml
}
@@ -228,9 +236,9 @@
xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
- echo $msg 1>&2;
+ echo "$msg" 1>&2;
if [[ -n ${LOGDIR} ]]; then
- echo $msg >> "${LOGDIR}/error.log"
+ echo "$msg" >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
@@ -283,7 +291,7 @@
xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
- echo $msg
+ echo "$msg"
$xtrace
return $exitcode
}
@@ -371,12 +379,14 @@
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
# For Fedora, just use 'f' and the release
DISTRO="f$os_RELEASE"
- elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
+ elif is_opensuse; then
DISTRO="opensuse-$os_RELEASE"
# Tumbleweed uses "n/a" as a codename, and the release is a datestring
- # like 20180218, so not very useful.
- [ "$os_CODENAME" = "n/a" ] && DISTRO="opensuse-tumbleweed"
- elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+ # like 20180218, so not very useful. Leap however uses a release
+ # with a "dot", so for example 15.0
+ [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \
+ DISTRO="opensuse-tumbleweed"
+ elif is_suse_linux_enterprise; then
# just use major release
DISTRO="sle${os_RELEASE%.*}"
elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
@@ -450,11 +460,30 @@
# (openSUSE, SLE).
# is_suse
function is_suse {
+ is_opensuse || is_suse_linux_enterprise
+}
+
+
+# Determine if current distribution is an openSUSE distribution
+# is_opensuse
+function is_opensuse {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
- [[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]]
+ [[ "$os_VENDOR" =~ (openSUSE) ]]
+}
+
+
+# Determine if current distribution is a SUSE Linux Enterprise (SLE)
+# distribution
+# is_suse_linux_enterprise
+function is_suse_linux_enterprise {
+ if [[ -z "$os_VENDOR" ]]; then
+ GetOSVersion
+ fi
+
+ [[ "$os_VENDOR" =~ (^SUSE) ]]
}
@@ -1376,7 +1405,36 @@
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
- zypper --non-interactive install --auto-agree-with-licenses "$@"
+ zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@"
+}
+
+# Run bindep and install packages it outputs
+#
+# Usage:
+# install_bindep <path-to-bindep.txt> [profile,profile]
+#
+# Note unlike the bindep command itself, profile(s) specified should
+# be a single, comma-separated string, no spaces.
+function install_bindep {
+ local file=$1
+ local profiles=${2:-""}
+ local pkgs
+
+ if [[ ! -f $file ]]; then
+ die $LINENO "Can not find bindep file: $file"
+ fi
+
+ # converting here makes it much easier to work with passing
+ # arguments
+ profiles=${profiles/,/ /}
+
+ # Note bindep returns 1 when packages need to be installed, so we
+ # have to ignore it's return for "-e"
+ pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true)
+
+ if [[ -n "${pkgs}" ]]; then
+ install_package ${pkgs}
+ fi
}
function write_user_unit_file {
@@ -1439,24 +1497,24 @@
# do some sanity checks on $cmd to see things we don't expect to work
if [[ "$cmd" =~ "sudo" ]]; then
- local msg=<<EOF
+ read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here
You are trying to use run_process with sudo, this is not going to work under systemd.
-If you need to run a service as a user other than $STACK_USER call it with:
+If you need to run a service as a user other than \$STACK_USER call it with:
run_process \$name \$cmd \$group \$user
EOF
- die $LINENO $msg
+ die $LINENO "$msg"
fi
if [[ ! "$cmd" =~ ^/ ]]; then
- local msg=<<EOF
+ read -r -d '' msg << EOF || true # read returns 1 for EOF, but it is ok here
The cmd="$cmd" does not start with an absolute path. It will fail to
start under systemd.
Please update your run_process stanza to have an absolute path.
EOF
- die $LINENO $msg
+ die $LINENO "$msg"
fi
}
diff --git a/inc/ini-config b/inc/ini-config
index 6fe7788..7993682 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -88,17 +88,22 @@
}
# Determinate is the given option present in the INI file
-# ini_has_option config-file section option
+# ini_has_option [-sudo] config-file section option
function ini_has_option {
local xtrace
xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
local line
- line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+ line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
$xtrace
[ -n "$line" ]
}
@@ -173,8 +178,10 @@
xtrace=$(set +o | grep xtrace)
set +o xtrace
local sudo=""
+ local sudo_option=""
if [ $1 == "-sudo" ]; then
sudo="sudo "
+ sudo_option="-sudo "
shift
fi
local file=$1
@@ -187,11 +194,11 @@
return
fi
- if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+ if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then
# Add section at the end
echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
fi
- if ! ini_has_option "$file" "$section" "$option"; then
+ if ! ini_has_option $sudo_option "$file" "$section" "$option"; then
# Add it
$sudo sed -i -e "/^\[$section\]/ a\\
$option = $value
@@ -228,7 +235,7 @@
# the reverse order. Do a reverse here to keep the original order.
values="$v ${values}"
done
- if ! grep -q "^\[$section\]" "$file"; then
+ if ! $sudo grep -q "^\[$section\]" "$file"; then
# Add section at the end
echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
else
diff --git a/inc/python b/inc/python
index 96be107..19e1228 100644
--- a/inc/python
+++ b/inc/python
@@ -49,15 +49,9 @@
fi
$xtrace
- if python3_enabled && [ "$os_VENDOR" = "Fedora" -a $os_RELEASE -gt 26 ]; then
- # Default Python 3 install prefix changed to /usr/local in Fedora 27:
- # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe
- echo "/usr/local/bin"
- elif is_fedora || is_suse; then
- echo "/usr/bin"
- else
- echo "/usr/local/bin"
- fi
+ local PYTHON_PATH=/usr/local/bin
+ ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin
+ echo $PYTHON_PATH
}
# Wrapper for ``pip install`` that only installs versions of libraries
@@ -87,41 +81,12 @@
pip_install $clean_name[$extras]
}
-# Determine the python versions supported by a package
-function get_python_versions_for_package {
- local name=$1
- cd $name && python setup.py --classifiers \
- | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' '
-}
-
-# Check for python3 classifier in local directory
-function check_python3_support_for_package_local {
- local name=$1
- cd $name
- set +e
- classifier=$(python setup.py --classifiers \
- | grep 'Programming Language :: Python :: 3$')
- set -e
- echo $classifier
-}
-
-# Check for python3 classifier on pypi
-function check_python3_support_for_package_remote {
- local name=$1
- set +e
- classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \
- | grep '"Programming Language :: Python :: 3"')
- set -e
- echo $classifier
-}
-
-# python3_enabled_for() checks if the service(s) specified as arguments are
-# enabled by the user in ``ENABLED_PYTHON3_PACKAGES``.
+# python3_enabled_for() assumes the service(s) specified as arguments are
+# enabled for python 3 unless explicitly disabled. See python3_disabled_for().
#
# Multiple services specified as arguments are ``OR``'ed together; the test
# is a short-circuit boolean, i.e it returns on the first match.
#
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
# python3_enabled_for dir [dir ...]
function python3_enabled_for {
local xtrace
@@ -132,7 +97,9 @@
local dirs=$@
local dir
for dir in ${dirs}; do
- [[ ,${ENABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
+ if ! python3_disabled_for "${dir}"; then
+ enabled=0
+ fi
done
$xtrace
@@ -163,42 +130,29 @@
return $enabled
}
-# enable_python3_package() adds the repositories passed as argument to the
-# ``ENABLED_PYTHON3_PACKAGES`` list, if they are not already present.
+# enable_python3_package() -- no-op for backwards compatibility
#
# For example:
# enable_python3_package nova
#
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
# enable_python3_package dir [dir ...]
function enable_python3_package {
local xtrace
xtrace=$(set +o | grep xtrace)
set +o xtrace
- local tmpsvcs="${ENABLED_PYTHON3_PACKAGES}"
- local python3
- for dir in $@; do
- if [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]]; then
- warn $LINENO "Attempt to enable_python3_package ${dir} when it has been disabled"
- continue
- fi
- if ! python3_enabled_for $dir; then
- tmpsvcs+=",$dir"
- fi
- done
- ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$tmpsvcs")
+ echo "It is no longer necessary to call enable_python3_package()."
$xtrace
}
-# disable_python3_package() prepares the services passed as argument to be
-# removed from the ``ENABLED_PYTHON3_PACKAGES`` list, if they are present.
+# disable_python3_package() adds the services passed as argument to
+# the ``DISABLED_PYTHON3_PACKAGES`` list.
#
# For example:
# disable_python3_package swift
#
-# Uses globals ``ENABLED_PYTHON3_PACKAGES`` and ``DISABLED_PYTHON3_PACKAGES``
+# Uses global ``DISABLED_PYTHON3_PACKAGES``
# disable_python3_package dir [dir ...]
function disable_python3_package {
local xtrace
@@ -206,16 +160,11 @@
set +o xtrace
local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}"
- local enabled_svcs=",${ENABLED_PYTHON3_PACKAGES},"
local dir
for dir in $@; do
disabled_svcs+=",$dir"
- if python3_enabled_for $dir; then
- enabled_svcs=${enabled_svcs//,$dir,/,}
- fi
done
DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs")
- ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$enabled_svcs")
$xtrace
}
@@ -282,48 +231,20 @@
cmd_pip=$(get_pip_command $PYTHON2_VERSION)
local sudo_pip="sudo -H"
if python3_enabled; then
- # Look at the package classifiers to find the python
- # versions supported, and if we find the version of
- # python3 we've been told to use, use that instead of the
- # default pip
- local python_versions
-
# Special case some services that have experimental
# support for python3 in progress, but don't claim support
# in their classifier
echo "Check python version for : $package_dir"
if python3_disabled_for ${package_dir##*/}; then
echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
- elif python3_enabled_for ${package_dir##*/}; then
- echo "Explicitly using $PYTHON3_VERSION version to install $package_dir based on ENABLED_PYTHON3_PACKAGES"
+ else
+ # For everything that is not explicitly blacklisted with
+ # DISABLED_PYTHON3_PACKAGES, assume it supports python3
+ # and we will let pip sort out the install, regardless of
+ # the package being local or remote.
+ echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior"
sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- elif [[ -d "$package_dir" ]]; then
- python_versions=$(get_python_versions_for_package $package_dir)
- if [[ $python_versions =~ $PYTHON3_VERSION ]]; then
- echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on classifiers"
- sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
- cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- else
- # The package may not have yet advertised python3.5
- # support so check for just python3 classifier and log
- # a warning.
- python3_classifier=$(check_python3_support_for_package_local $package_dir)
- if [[ ! -z "$python3_classifier" ]]; then
- echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings"
- sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
- cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- fi
- fi
- else
- # Check pypi as we don't have the package on disk
- package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*')
- python3_classifier=$(check_python3_support_for_package_remote $package)
- if [[ ! -z "$python3_classifier" ]]; then
- echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings"
- sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
- cmd_pip=$(get_pip_command $PYTHON3_VERSION)
- fi
fi
fi
fi
@@ -445,7 +366,14 @@
# another project.
#
# use this for non namespaced libraries
+#
+# setup_dev_lib [-bindep] <name>
function setup_dev_lib {
+ local bindep
+ if [[ $1 == -bindep* ]]; then
+ bindep="${1}"
+ shift
+ fi
local name=$1
local dir=${GITDIR[$name]}
if python3_enabled; then
@@ -455,10 +383,10 @@
# of Python.
echo "Installing $name again without Python 3 enabled"
USE_PYTHON3=False
- setup_develop $dir
+ setup_develop $bindep $dir
USE_PYTHON3=True
fi
- setup_develop $dir
+ setup_develop $bindep $dir
}
# this should be used if you want to install globally, all libraries should
@@ -469,11 +397,17 @@
# extras: comma-separated list of optional dependencies to install
# (e.g., ldap,memcache).
# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
+# bindep: Set "-bindep" as first argument to install bindep.txt packages
# The command is like "pip install <project_dir>[<extras>]"
function setup_install {
+ local bindep
+ if [[ $1 == -bindep* ]]; then
+ bindep="${1}"
+ shift
+ fi
local project_dir=$1
local extras=$2
- _setup_package_with_constraints_edit $project_dir "" $extras
+ _setup_package_with_constraints_edit $bindep $project_dir "" $extras
}
# this should be used for projects which run services, like all services
@@ -485,20 +419,14 @@
# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install -e <project_dir>[<extras>]"
function setup_develop {
+ local bindep
+ if [[ $1 == -bindep* ]]; then
+ bindep="${1}"
+ shift
+ fi
local project_dir=$1
local extras=$2
- _setup_package_with_constraints_edit $project_dir -e $extras
-}
-
-# determine if a project as specified by directory is in
-# projects.txt. This will not be an exact match because we throw away
-# the namespacing when we clone, but it should be good enough in all
-# practical ways.
-function is_in_projects_txt {
- local project_dir=$1
- local project_name
- project_name=$(basename $project_dir)
- grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
+ _setup_package_with_constraints_edit $bindep $project_dir -e $extras
}
# ``pip install -e`` the package, which processes the dependencies
@@ -517,6 +445,11 @@
# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install <flags> <project_dir>[<extras>]"
function _setup_package_with_constraints_edit {
+ local bindep
+ if [[ $1 == -bindep* ]]; then
+ bindep="${1}"
+ shift
+ fi
local project_dir=$1
local flags=$2
local extras=$3
@@ -537,7 +470,7 @@
"$flags file://$project_dir#egg=$name"
fi
- setup_package $project_dir "$flags" $extras
+ setup_package $bindep $project_dir "$flags" $extras
# If this project is in LIBS_FROM_GIT, verify it was actually installed
# correctly. This helps catch errors caused by constraints mismatches.
@@ -549,17 +482,30 @@
}
# ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
+# using pip before running `setup.py develop`. The command is like
+# "pip install <flags> <project_dir>[<extras>]"
#
# Uses globals ``STACK_USER``
-# setup_package project_dir [flags] [extras]
-# project_dir: directory of project repo (e.g., /opt/stack/keystone)
-# flags: pip CLI options/flags
-# extras: comma-separated list of optional dependencies to install
-# (e.g., ldap,memcache).
-# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
-# The command is like "pip install <flags> <project_dir>[<extras>]"
+#
+# Usage:
+# setup_package [-bindep[=profile,profile]] <project_dir> <flags> [extras]
+#
+# -bindep : Use bindep to install dependencies; select extra profiles
+# as comma separated arguments after "="
+# project_dir : directory of project repo (e.g., /opt/stack/keystone)
+# flags : pip CLI options/flags
+# extras : comma-separated list of optional dependencies to install
+# (e.g., ldap,memcache).
+# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
function setup_package {
+ local bindep=0
+ local bindep_flag=""
+ local bindep_profiles=""
+ if [[ $1 == -bindep* ]]; then
+ bindep=1
+ IFS="=" read bindep_flag bindep_profiles <<< ${1}
+ shift
+ fi
local project_dir=$1
local flags=$2
local extras=$3
@@ -575,6 +521,11 @@
extras="[$extras]"
fi
+ # install any bindep packages
+ if [[ $bindep == 1 ]]; then
+ install_bindep $project_dir/bindep.txt $bindep_profiles
+ fi
+
pip_install $flags "$project_dir$extras"
# ensure that further actions can do things like setup.py sdist
if [[ "$flags" == "-e" ]]; then
diff --git a/lib/cinder b/lib/cinder
index 92d0295..047b25b 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -96,9 +96,9 @@
# https://bugs.launchpad.net/cinder/+bug/1180976
CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
-# Centos7 switched to using LIO and that's all that's supported,
-# although the tgt bits are in EPEL we don't want that for CI
-if is_fedora; then
+# Centos7 and OpenSUSE switched to using LIO and that's all that's supported,
+# although the tgt bits are in EPEL and OpenSUSE we don't want that for CI
+if is_fedora || is_suse; then
CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
die "lioadm is the only valid Cinder target_helper config on this platform"
@@ -268,7 +268,12 @@
configure_cinder_image_volume_cache
fi
- if is_service_enabled swift; then
+ if is_service_enabled c-bak; then
+ # NOTE(mriedem): The default backup driver uses swift and if we're
+ # on a subnode we might not know if swift is enabled, but chances are
+ # good that it is on the controller so configure the backup service
+ # to use it. If we want to configure the backup service to use
+ # a non-swift driver, we'll likely need environment variables.
iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
fi
@@ -344,18 +349,12 @@
# block-storage is the official service type
get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
- get_or_create_service "cinder" "volume" "Cinder Volume Service"
if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
get_or_create_endpoint \
"block-storage" \
"$REGION_NAME" \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
- get_or_create_endpoint \
- "volume" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s"
-
get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
get_or_create_endpoint \
"volumev2" \
@@ -373,11 +372,6 @@
"$REGION_NAME" \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
- get_or_create_endpoint \
- "volume" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s"
-
get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
get_or_create_endpoint \
"volumev2" \
@@ -440,7 +434,14 @@
if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
install_package tgt
elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
- install_package targetcli
+ if [[ ${DISTRO} == "bionic" ]]; then
+ # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
+ sudo mkdir -p /etc/target
+
+ install_package targetcli-fb
+ else
+ install_package targetcli
+ fi
fi
}
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 00a0bb3..33c9706 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -65,7 +65,7 @@
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
- iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index cf61056..4d0f5f3 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -18,6 +18,12 @@
MYSQL_SERVICE_NAME=mysql
if is_fedora && ! is_oraclelinux; then
MYSQL_SERVICE_NAME=mariadb
+elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
+ # Older mariadb packages on SLES 12 provided mysql.service. The
+ # newer ones on SLES 12 and 15 use mariadb.service; they also
+ # provide a mysql.service symlink for backwards-compatibility, but
+ # let's not rely on that.
+ MYSQL_SERVICE_NAME=mariadb
fi
# Functions
diff --git a/lib/etcd3 b/lib/etcd3
index 26d07fd..4f3a7a4 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -27,6 +27,10 @@
ETCD_DATA_DIR="$DATA_DIR/etcd"
ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
ETCD_BIN_DIR="$DEST/bin"
+# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run
+# etcd-heavy services in the gate VM's, e.g. Kubernetes.
+ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK)
+ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512}
if is_ubuntu ; then
UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1`
@@ -46,6 +50,9 @@
cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT "
fi
cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
+ if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
+ cmd+=" --debug"
+ fi
local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root"
@@ -86,6 +93,9 @@
$SYSTEMCTL daemon-reload
+ if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+ sudo umount $ETCD_DATA_DIR
+ fi
sudo rm -rf $ETCD_DATA_DIR
}
@@ -95,6 +105,9 @@
# Create the necessary directories
sudo mkdir -p $ETCD_BIN_DIR
sudo mkdir -p $ETCD_DATA_DIR
+ if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+ sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR
+ fi
# Download and cache the etcd tgz for subsequent use
local etcd_file
diff --git a/lib/glance b/lib/glance
index 94f6a22..65487cb 100644
--- a/lib/glance
+++ b/lib/glance
@@ -236,8 +236,8 @@
CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
- iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
- iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+ iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
+ iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
fi
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
@@ -345,7 +345,7 @@
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
else
- run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+ run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
fi
echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
diff --git a/lib/keystone b/lib/keystone
index 57cb24d..02e2822 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -217,7 +217,7 @@
iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
- iniset_rpc_backend keystone $KEYSTONE_CONF
+ iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
local service_port=$KEYSTONE_SERVICE_PORT
local auth_port=$KEYSTONE_AUTH_PORT
diff --git a/lib/lvm b/lib/lvm
index f047181..d9e78a0 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -99,8 +99,15 @@
if ! sudo vgs $vg; then
# Only create if the file doesn't already exists
[[ -f $backing_file ]] || truncate -s $size $backing_file
+
+ local directio=""
+ # Check to see if we can do direct-io
+ if losetup -h | grep -q direct-io; then
+ directio="--direct-io=on"
+ fi
+
local vg_dev
- vg_dev=`sudo losetup -f --show $backing_file`
+ vg_dev=$(sudo losetup -f --show $directio $backing_file)
# Only create volume group if it doesn't already exist
if ! sudo vgs $vg; then
diff --git a/lib/neutron b/lib/neutron
index 9f9b132..1066d8e 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -28,6 +28,12 @@
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
NEUTRON_DIR=$DEST/neutron
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -58,6 +64,8 @@
NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+
# By default, use the ML2 plugin
NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
@@ -87,9 +95,6 @@
NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
-# This is needed because _neutron_ovs_base_configure_l3_agent will set
-# external_network_bridge
-Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
# an external network bridge
PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
@@ -175,9 +180,14 @@
# Neutron API server & Neutron plugin
if is_service_enabled neutron-api; then
local policy_file=$NEUTRON_CONF_DIR/policy.json
- cp $NEUTRON_DIR/etc/policy.json $policy_file
# Allow neutron user to administer neutron to match neutron account
- sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file
+ # NOTE(amotoki): This is required for nova works correctly with neutron.
+ if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+ cp $NEUTRON_DIR/etc/policy.json $policy_file
+ sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $policy_file
+ else
+ echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $policy_file
+ fi
cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
@@ -286,7 +296,7 @@
# Format logging
setup_logging $NEUTRON_CONF
- if is_service_enabled tls-proxy; then
+ if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
# Set the service port for a proxy to take the original
iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -357,6 +367,15 @@
# create_neutron_accounts() - Create required service accounts
function create_neutron_accounts_new {
+ local neutron_url
+
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+ else
+ neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+ fi
+
+
if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
create_service_user "neutron"
@@ -364,8 +383,7 @@
neutron_service=$(get_or_create_service "neutron" \
"network" "Neutron Service")
get_or_create_endpoint $neutron_service \
- "$REGION_NAME" \
- "$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/"
+ "$REGION_NAME" "$neutron_url"
fi
}
@@ -427,6 +445,7 @@
function start_neutron_api {
local service_port=$NEUTRON_SERVICE_PORT
local service_protocol=$NEUTRON_SERVICE_PROTOCOL
+ local neutron_url
if is_service_enabled tls-proxy; then
service_port=$NEUTRON_SERVICE_PORT_INT
service_protocol="http"
@@ -440,17 +459,24 @@
opts+=" --config-file $cfg_file"
done
- # Start the Neutron service
- # TODO(sc68cal) Stop hard coding this
- run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
-
- if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$NEUTRON_SERVICE_HOST:$service_port; then
- die $LINENO "neutron-api did not start"
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
+ enable_service neutron-rpc-server
+ run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
+ else
+ # Start the Neutron service
+ # TODO(sc68cal) Stop hard coding this
+ run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
+ neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
+ # Start proxy if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+ fi
fi
- # Start proxy if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
+ if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
+ die $LINENO "neutron-api did not start"
fi
}
@@ -497,6 +523,10 @@
stop_process $serv
done
+ if is_service_enabled neutron-rpc-server; then
+ stop_process neutron-rpc-server
+ fi
+
if is_service_enabled neutron-dhcp; then
stop_process neutron-dhcp
pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
@@ -551,6 +581,13 @@
# neutron-legacy is removed.
# TODO(sc68cal) Remove when neutron-legacy is no more.
function cleanup_neutron {
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ stop_process neutron-api
+ stop_process neutron-rpc-server
+ remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
+ sudo rm -f $(apache_site_config_for neutron-api)
+ fi
+
if is_neutron_legacy_enabled; then
# Call back to old function
cleanup_mutnauq "$@"
@@ -566,6 +603,10 @@
else
configure_neutron_new "$@"
fi
+
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
+ fi
}
function configure_neutron_nova {
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 15bcfe3..8257115 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -86,6 +86,15 @@
NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
+# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
+# - False (default) : Run neutron under Eventlet
+# - True : Run neutron under uwsgi
+# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
+# enough
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
+
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
+
# Agent binaries. Note, binary paths for other agents are set in per-service
# scripts in lib/neutron_plugins/services/
AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -402,6 +411,13 @@
# Migrated from keystone_data.sh
function create_mutnauq_accounts {
+ local neutron_url
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
+ else
+ neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
+ fi
+
if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
create_service_user "neutron"
@@ -409,8 +425,7 @@
get_or_create_service "neutron" "network" "Neutron Service"
get_or_create_endpoint \
"network" \
- "$REGION_NAME" \
- "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
+ "$REGION_NAME" "$neutron_url"
fi
}
@@ -460,6 +475,7 @@
local service_port=$Q_PORT
local service_protocol=$Q_PROTOCOL
local cfg_file_options
+ local neutron_url
cfg_file_options="$(determine_config_files neutron-server)"
@@ -468,16 +484,24 @@
service_protocol="http"
fi
# Start the Neutron service
- run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ enable_service neutron-api
+ run_process neutron-api "$NEUTRON_BIN_DIR/uwsgi --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+ neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
+ enable_service neutron-rpc-server
+ run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+ else
+ run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+ neutron_url=$service_protocol://$Q_HOST:$service_port
+ # Start proxy if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+ fi
+ fi
echo "Waiting for Neutron to start..."
- local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port"
+ local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-
- # Start proxy if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
- fi
}
# Control of the l2 agent is separated out to make it easier to test partial
@@ -532,7 +556,12 @@
[ ! -z "$pid" ] && sudo kill -9 $pid
fi
- stop_process q-svc
+ if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+ stop_process neutron-rpc-server
+ stop_process neutron-api
+ else
+ stop_process q-svc
+ fi
if is_service_enabled q-l3; then
sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
@@ -604,7 +633,7 @@
IP_UP="sudo ip link set $to_intf up"
if [[ "$af" == "inet" ]]; then
IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
- ARP_CMD="arping -A -c 3 -w 4.5 -I $to_intf $IP "
+ ARP_CMD="sudo arping -A -c 3 -w 4.5 -I $to_intf $IP "
fi
fi
@@ -670,10 +699,15 @@
cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
- cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
# allow neutron user to administer neutron to match neutron account
- sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+ # NOTE(amotoki): This is required for nova works correctly with neutron.
+ if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+ cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+ sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+ else
+ echo '{"context_is_admin": "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+ fi
# Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
# For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
@@ -715,7 +749,7 @@
# Format logging
setup_logging $NEUTRON_CONF
- if is_service_enabled tls-proxy; then
+ if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
# Set the service port for a proxy to take the original
iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 52c6ad5..d3f5bd5 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Neuton Big Switch/FloodLight plugin
+# Neutron Big Switch/FloodLight plugin
# ------------------------------------
# Save trace setting
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index f2302e3..fa3f862 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -8,21 +8,23 @@
set +o xtrace
function neutron_lb_cleanup {
- sudo ip link set $PUBLIC_BRIDGE down
- sudo brctl delbr $PUBLIC_BRIDGE
+ sudo ip link delete $PUBLIC_BRIDGE
+ bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/`
+ if [[ -z "$bridge_list" ]]; then
+ return
+ fi
if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then
- for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
+ for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
sudo ip link delete $port
done
elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then
- for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
+ for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
sudo ip link delete $port
done
fi
- for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do
- sudo ip link set $bridge down
- sudo brctl delbr $bridge
+ for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do
+ sudo ip link delete $bridge
done
}
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 523024e..2e63fe3 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -69,7 +69,7 @@
restart_service openvswitch
sudo systemctl enable openvswitch
elif is_suse; then
- if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+ if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
restart_service openvswitch-switch
else
# workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
@@ -96,10 +96,6 @@
}
function _neutron_ovs_base_configure_l3_agent {
- if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
- fi
-
neutron-ovs-cleanup --config-file $NEUTRON_CONF
if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then
ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 ||
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 9be32b7..ec289f6 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -103,7 +103,7 @@
default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
-default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}')
+default_v6_route_devs=$(ip -6 route list match default table all | grep via | awk '{print $5}')
function _determine_config_l3 {
local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
@@ -395,6 +395,10 @@
# This logic is specific to using the l3-agent for layer 3
if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+ # Ensure IPv6 forwarding is enabled on the host
+ sudo sysctl -w net.ipv6.conf.all.forwarding=1
+ # if the Linux host considers itself to be a router then it will
+ # ignore all router advertisements
# Ensure IPv6 RAs are accepted on interfaces with a default route.
# This is needed for neutron-based devstack clouds to work in
# IPv6-only clouds in the gate. Please do not remove this without
@@ -405,8 +409,6 @@
# device name would be reinterpreted as a slash, causing an error.
sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
done
- # Ensure IPv6 forwarding is enabled on the host
- sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Configure and enable public bridge
# Override global IPV6_ROUTER_GW_IP with the true value from neutron
IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
diff --git a/lib/nova b/lib/nova
index 0003179..1fb50df 100644
--- a/lib/nova
+++ b/lib/nova
@@ -183,6 +183,10 @@
# and Glance.
NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
+# Enable debugging levels for iscsid service (goes from 0-8)
+ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG)
+ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4}
+
# Functions
# ---------
@@ -303,17 +307,6 @@
# to simulate multiple systems.
if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
if is_ubuntu; then
- if [[ ! "$DISTRO" > natty ]]; then
- local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
- sudo mkdir -p /cgroup
- if ! grep -q cgroup /etc/fstab; then
- echo "$cgline" | sudo tee -a /etc/fstab
- fi
- if ! mount -n | grep -q cgroup; then
- sudo mount /cgroup
- fi
- fi
-
# enable nbd for lxc unless you're using an lvm backend
# otherwise you can't boot instances
if [[ "$NOVA_BACKEND" != "LVM" ]]; then
@@ -338,10 +331,22 @@
sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
fi
fi
- if is_suse; then
- # iscsid is not started by default
- start_service iscsid
+
+ if [[ ${ISCSID_DEBUG} == "True" ]]; then
+ # Install an override that starts iscsid with debugging
+ # enabled.
+ cat > /tmp/iscsid.override <<EOF
+[Service]
+ExecStart=
+ExecStart=/usr/sbin/iscsid -d${ISCSID_DEBUG_LEVEL}
+EOF
+ sudo mkdir -p /etc/systemd/system/iscsid.service.d
+ sudo mv /tmp/iscsid.override /etc/systemd/system/iscsid.service.d/override.conf
+ sudo systemctl daemon-reload
fi
+
+ # ensure that iscsid is started, even when disabled by default
+ restart_service iscsid
fi
# Rebuild the config file from scratch
@@ -608,11 +613,35 @@
fi
}
+# Configure access to placement from a nova service, usually
+# compute, but sometimes conductor.
+function configure_placement_nova_compute {
+ # Use the provided config file path or default to $NOVA_CONF.
+ local conf=${1:-$NOVA_CONF}
+ iniset $conf placement auth_type "password"
+ iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $conf placement username placement
+ iniset $conf placement password "$SERVICE_PASSWORD"
+ iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement project_name "$SERVICE_TENANT_NAME"
+ iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement region_name "$REGION_NAME"
+}
+
function configure_console_compute {
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
- NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then
+ # Use the old URL when installing novnc packages.
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ elif vercmp ${NOVNC_BRANCH} "<" "1.0.0"; then
+ # Use the old URL when installing older novnc source.
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ else
+ # Use the new URL when building >=v1.0.0 from source.
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_lite.html"}
+ fi
iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
@@ -663,6 +692,22 @@
sudo mkdir -p /etc/pki/nova-novnc
deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem
deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem
+ # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+ # default, and the deploy_int* methods use 'sudo cp' to copy the
+ # files, making them owned by root:root.
+ # Change ownership of everything under /etc/pki/nova-novnc to
+ # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read
+ # the key file.
+ sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc
+ # This is needed to enable TLS in the proxy itself, example log:
+ # WebSocket server settings:
+ # - Listen on 0.0.0.0:6080
+ # - Flash security policy server
+ # - Web server (no directory listings). Web root: /usr/share/novnc
+ # - SSL/TLS support
+ # - proxying from 0.0.0.0:6080 to None:None
+ iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem"
+ iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem"
fi
fi
@@ -908,14 +953,14 @@
local compute_cell_conf=$NOVA_CONF
fi
+ cp $compute_cell_conf $NOVA_CPU_CONF
+
if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
# NOTE(danms): Grenade doesn't setup multi-cell rabbit, so
# skip these bits and use the normal config.
- NOVA_CPU_CONF=$compute_cell_conf
echo "Skipping multi-cell conductor fleet setup"
else
# "${CELLSV2_SETUP}" is "superconductor"
- cp $compute_cell_conf $NOVA_CPU_CONF
# FIXME(danms): Should this be configurable?
iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
# Since the nova-compute service cannot reach nova-scheduler over
@@ -924,6 +969,10 @@
iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
fi
+ # Make sure we nuke any database config
+ inidelete $NOVA_CPU_CONF database connection
+ inidelete $NOVA_CPU_CONF api_database connection
+
# Console proxies were configured earlier in create_nova_conf. Now that the
# nova-cpu.conf has been created, configure the console settings required
# by the compute process.
@@ -1161,7 +1210,7 @@
if is_service_enabled n-api; then
if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then
# Note that danms hates these flavors and apologizes for sdague
- openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256
+ openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 cirros256
openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M
openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G
openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index fcb4777..4639869 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -155,9 +155,15 @@
echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
sudo mkdir -p /etc/pki/libvirt-vnc
- sudo chown libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
+ # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+ # default and the deploy_int* methods use 'sudo cp' to copy the
+ # files, making them owned by root:root.
+ # Change ownership of everything under /etc/pki/libvirt-vnc to
+ # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key
+ # file.
+ sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
fi
fi
diff --git a/lib/placement b/lib/placement
index a1602ba..a89cd26 100644
--- a/lib/placement
+++ b/lib/placement
@@ -3,9 +3,6 @@
# lib/placement
# Functions to control the configuration and operation of the **Placement** service
#
-# Currently the placement service is embedded in nova. Eventually we
-# expect this to change so this file is started as a separate entity
-# despite making use of some *NOVA* variables and files.
# Dependencies:
#
@@ -29,23 +26,21 @@
# Defaults
# --------
-PLACEMENT_CONF_DIR=/etc/nova
-PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf
-PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement}
-# Nova virtual environment
+PLACEMENT_DIR=$DEST/placement
+PLACEMENT_CONF_DIR=/etc/placement
+PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf
+PLACEMENT_AUTH_CACHE_DIR=${PLACEMENT_AUTH_CACHE_DIR:-/var/cache/placement}
+PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone}
+# Placement virtual environment
if [[ ${USE_VENV} = True ]]; then
- PROJECT_VENV["nova"]=${NOVA_DIR}.venv
- PLACEMENT_BIN_DIR=${PROJECT_VENV["nova"]}/bin
+ PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv
+ PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin
else
PLACEMENT_BIN_DIR=$(get_python_exec_prefix)
fi
-PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/nova-placement-api
+PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api
PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini
-# The placement service can optionally use a separate database
-# connection. Set PLACEMENT_DB_ENABLED to True to use it.
-PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED)
-
if is_service_enabled tls-proxy; then
PLACEMENT_SERVICE_PROTOCOL="https"
fi
@@ -67,30 +62,28 @@
# cleanup_placement() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_placement {
- sudo rm -f $(apache_site_config_for nova-placement-api)
sudo rm -f $(apache_site_config_for placement-api)
remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
+ sudo rm -f $PLACEMENT_AUTH_CACHE_DIR/*
}
# _config_placement_apache_wsgi() - Set WSGI config files
function _config_placement_apache_wsgi {
local placement_api_apache_conf
local venv_path=""
- local nova_bin_dir=""
- nova_bin_dir=$(get_python_exec_prefix)
+ local placement_bin_dir=""
+ placement_bin_dir=$(get_python_exec_prefix)
placement_api_apache_conf=$(apache_site_config_for placement-api)
- # reuse nova's venv if there is one as placement code lives
- # there
if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
- nova_bin_dir=${PROJECT_VENV["nova"]}/bin
+ venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages"
+ placement_bin_dir=${PROJECT_VENV["placement"]}/bin
fi
sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
sudo sed -e "
s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
+ s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g;
s|%SSLENGINE%|$placement_ssl|g;
s|%SSLCERTFILE%|$placement_certfile|g;
s|%SSLKEYFILE%|$placement_keyfile|g;
@@ -100,29 +93,20 @@
" -i $placement_api_apache_conf
}
-function configure_placement_nova_compute {
- # Use the provided config file path or default to $NOVA_CONF.
- local conf=${1:-$NOVA_CONF}
- iniset $conf placement auth_type "password"
- iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf placement username placement
- iniset $conf placement password "$SERVICE_PASSWORD"
- iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $conf placement project_name "$SERVICE_TENANT_NAME"
- iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
- # TODO(cdent): auth_strategy, which is common to see in these
- # blocks is not currently used here. For the time being the
- # placement api uses the auth_strategy configuration setting
- # established by the nova api. This avoids, for the time, being,
- # creating redundant configuration items that are just used for
- # testing.
+# create_placement_conf() - Write config
+function create_placement_conf {
+ rm -f $PLACEMENT_CONF
+ iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+ iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
+ iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY
+ configure_auth_token_middleware $PLACEMENT_CONF placement $PLACEMENT_AUTH_CACHE_DIR
+ setup_logging $PLACEMENT_CONF
}
# configure_placement() - Set config files, create data dirs, etc
function configure_placement {
- if [ "$PLACEMENT_DB_ENABLED" != False ]; then
- iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
- fi
+ sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR
+ create_placement_conf
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement"
@@ -143,25 +127,28 @@
"$placement_api_url"
}
+# create_placement_cache_dir() - Create directories for keystone cache
+function create_placement_cache_dir {
+ # Create cache dir
+ sudo install -d -o $STACK_USER $PLACEMENT_AUTH_CACHE_DIR
+ rm -f $PLACEMENT_AUTH_CACHE_DIR/*
+}
+
# init_placement() - Create service user and endpoints
-# If PLACEMENT_DB_ENABLED is true, create the separate placement db
-# using, for now, the api_db migrations.
function init_placement {
- if [ "$PLACEMENT_DB_ENABLED" != False ]; then
- recreate_database placement
- # Database migration will be handled when nova does an api_db sync
- # TODO(cdent): When placement is extracted we'll do our own sync
- # here.
- fi
+ recreate_database placement
+ $PLACEMENT_BIN_DIR/placement-manage db sync
create_placement_accounts
+ create_placement_cache_dir
}
# install_placement() - Collect source and prepare
function install_placement {
install_apache_wsgi
# Install the openstackclient placement client plugin for CLI
- # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r.
- pip_install osc-placement
+ pip_install_gr osc-placement
+ git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH
+ setup_develop $PLACEMENT_DIR
}
# start_placement_api() - Start the API processes ahead of other things
diff --git a/lib/swift b/lib/swift
index 3b3e608..e2ee0cb 100644
--- a/lib/swift
+++ b/lib/swift
@@ -607,7 +607,7 @@
# Mount the disk with mount options to make it as efficient as possible
mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
- sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
+ sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8 \
${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
fi
diff --git a/lib/tcpdump b/lib/tcpdump
new file mode 100644
index 0000000..16e8269
--- /dev/null
+++ b/lib/tcpdump
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# lib/tcpdump
+# Functions to start and stop a tcpdump
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - start_tcpdump
+# - stop_tcpdump
+
+# Save trace setting
+_XTRACE_TCPDUMP=$(set +o | grep xtrace)
+set +o xtrace
+
+TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap}
+
+# e.g. for iscsi
+# "-i any tcp port 3260"
+TCPDUMP_ARGS=${TCPDUMP_ARGS:-""}
+
+# start_tcpdump() - Start running processes
+function start_tcpdump {
+ # Run a tcpdump with given arguments and save the packet capture
+ if is_service_enabled tcpdump; then
+ if [[ -z "${TCPDUMP_ARGS}" ]]; then
+ die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set"
+ fi
+ touch ${TCPDUMP_OUTPUT}
+ run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root
+ fi
+}
+
+# stop_tcpdump() stop tcpdump process
+function stop_tcpdump {
+ stop_process tcpdump
+}
+
+# Restore xtrace
+$_XTRACE_TCPDUMP
diff --git a/lib/tempest b/lib/tempest
index b8b313e..8c6fa01 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -102,6 +102,14 @@
remove_disabled_services "$extensions_list" "$disabled_exts"
}
+# image_size_in_gib - converts an image size from bytes to GiB, rounded up
+# Takes an image ID parameter as input
+function image_size_in_gib {
+ local size
+ size=$(openstack image show $1 -c size -f value)
+ echo $size | python -c "import math; print int(math.ceil(float(int(raw_input()) / 1024.0 ** 3)))"
+}
+
# configure_tempest() - Set config files, create data dirs, etc
function configure_tempest {
if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -125,6 +133,7 @@
local public_network_id
local public_router_id
local ssh_connect_method="floating"
+ local disk
# Save IFS
ifs=$IFS
@@ -190,11 +199,15 @@
available_flavors=$(nova flavor-list)
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
- openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
+ # Determine the flavor disk size based on the image size.
+ disk=$(image_size_in_gib $image_uuid)
+ openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 m1.nano
fi
flavor_ref=42
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
- openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
+ # Determine the alt flavor disk size based on the alt image size.
+ disk=$(image_size_in_gib $image_uuid_alt)
+ openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 m1.micro
fi
flavor_ref_alt=84
else
@@ -242,6 +255,9 @@
# and the public_network_id should not be set.
if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
+ # make sure shared network presence does not confuses the tempest tests
+ openstack network create --share shared
+ openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
fi
iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -263,8 +279,6 @@
iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION
iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
- # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation
- iniset $TEMPEST_CONFIG identity admin_domain_scope True
if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
iniset $TEMPEST_CONFIG auth admin_username $admin_username
iniset $TEMPEST_CONFIG auth admin_password "$password"
@@ -279,8 +293,8 @@
iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
fi
iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3}
- if [[ "$TEMPEST_AUTH_VERSION" != "v2.0" ]]; then
- # we're going to disable v2 admin unless we're using v2.0 by default.
+ if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then
+ # we're going to disable v2 admin unless we're using v2 by default.
iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False
fi
@@ -461,9 +475,6 @@
TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
fi
iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
- # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
- iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
- iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1)
local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
# Reset microversions to None where v2 is running which does not support microversion.
@@ -582,11 +593,11 @@
fi
# The requirements might be on a different branch, while tempest needs master requirements.
- (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt
+ (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt
# Auth:
- iniset $TEMPEST_CONFIG auth tempest_roles "Member"
+ iniset $TEMPEST_CONFIG auth tempest_roles "member"
if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
@@ -668,7 +679,9 @@
function install_tempest_plugins {
pushd $TEMPEST_DIR
if [[ $TEMPEST_PLUGINS != 0 ]] ; then
- tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt $TEMPEST_PLUGINS
+ # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements.
+ (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
+ tox -evenv-tempest -- pip install -c u-c-m.txt $TEMPEST_PLUGINS
echo "Checking installed Tempest plugins:"
tox -evenv-tempest -- tempest list-plugins
fi
diff --git a/lib/tls b/lib/tls
index e3ed3cc..0032449 100644
--- a/lib/tls
+++ b/lib/tls
@@ -227,9 +227,13 @@
function init_cert {
if [[ ! -r $DEVSTACK_CERT ]]; then
if [[ -n "$TLS_IP" ]]; then
- # Lie to let incomplete match routines work
- # see https://bugs.python.org/issue23239
- TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
+ if python3_enabled; then
+ TLS_IP="IP:$TLS_IP"
+ else
+ # Lie to let incomplete match routines work with python2
+ # see https://bugs.python.org/issue23239
+ TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
+ fi
fi
make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
@@ -543,6 +547,9 @@
LimitRequestFieldSize $f_header_size
RequestHeader set X-Forwarded-Proto "https"
+ # Avoid races (at the cost of performance) to re-use a pooled connection
+ # where the connection is closed (bug 1807518).
+ SetEnv proxy-initial-not-pooled
<Location />
ProxyPass http://$b_host:$b_port/ retry=0 nocanon
ProxyPassReverse http://$b_host:$b_port/
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index afbf11d..bd44153 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -49,7 +49,7 @@
function _check_elasticsearch_ready {
# poll elasticsearch to see if it's started
- if ! wait_for_service 30 http://localhost:9200; then
+ if ! wait_for_service 120 http://localhost:9200; then
die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
fi
}
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index de4f8ed..905806d 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,17 @@
rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
fi
+ # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
+ # failed to start due to denials from SELinux — useful for CentOS
+ # and Fedora machines. For Ubuntu (which runs AppArmor), DevStack
+ # already captures the contents of /var/log/kern.log (via
+ # `journalctl -t kernel` redirected into syslog.txt.gz), which
+ # contains AppArmor-related messages.
+ if [ -f /var/log/audit/audit.log ] ; then
+ sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log &&
+ chmod +r {{ stage_dir }}/audit.log;
+ fi
+
# gzip and save any coredumps in /var/core
if [ -d /var/core ]; then
sudo gzip -r /var/core
diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst
index a34e070..9e3c919 100644
--- a/roles/export-devstack-journal/README.rst
+++ b/roles/export-devstack-journal/README.rst
@@ -1,11 +1,15 @@
Export journal files from devstack services
-Export the systemd journal for every devstack service in native
-journal format as well as text. Also, export a syslog-style file with
-kernal and sudo messages.
+This performs a number of logging collection services
-Writes the output to the ``logs/`` subdirectory of
-``stage_dir``.
+* Export the systemd journal in native format
+* For every devstack service, export logs to text in a file named
+ ``screen-*`` to maintain legacy compatability when devstack services
+ used to run in a screen session and were logged separately.
+* Export a syslog-style file with kernel and sudo messages for legacy
+ compatability.
+
+Writes the output to the ``logs/`` subdirectory of ``stage_dir``.
**Role Variables**
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index 6e760c1..cbec444 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -6,32 +6,49 @@
state: directory
owner: "{{ ansible_user }}"
-# TODO: convert this to ansible
-- name: Export journal files
+- name: Export legacy stack screen log files
become: true
shell:
cmd: |
u=""
name=""
- for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do
+ for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do
name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz
done
- # Export the journal in export format to make it downloadable
- # for later searching. It can then be rewritten to a journal native
- # format locally using systemd-journal-remote. This makes a class of
- # debugging much easier. We don't do the native conversion here as
- # some distros do not package that tooling.
- journalctl -u 'devstack@*' -o export | \
- xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
-
- # The journal contains everything running under systemd, we'll
- # build an old school version of the syslog with just the
- # kernel and sudo messages.
+- name: Export legacy syslog.txt
+ become: true
+ shell:
+ # The journal contains everything running under systemd, we'll
+ # build an old school version of the syslog with just the
+ # kernel and sudo messages.
+ cmd: |
journalctl \
-t kernel \
-t sudo \
--no-pager \
--since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
| gzip - > {{ stage_dir }}/logs/syslog.txt.gz
+
+# TODO: convert this to ansible
+# - make a list of the above units
+# - iterate the list here
+- name: Export journal
+ become: true
+ shell:
+ # Export the journal in export format to make it downloadable
+ # for later searching. It can then be rewritten to a journal native
+ # format locally using systemd-journal-remote. This makes a class of
+ # debugging much easier. We don't do the native conversion here as
+ # some distros do not package that tooling.
+ cmd: |
+ journalctl -o export \
+ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
+ | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
+
+- name: Save journal README
+ become: true
+ template:
+ src: devstack.journal.README.txt.j2
+ dest: '{{ stage_dir }}/logs/devstack.journal.README.txt'
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
new file mode 100644
index 0000000..598eb7f
--- /dev/null
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -0,0 +1,33 @@
+Devstack systemd journal
+========================
+
+The devstack.journal file is a copy of the systemd journal during the
+devstack run.
+
+To use it, you will need to convert it so journalctl can read it
+locally. After downloading the file:
+
+ $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal
+
+Note this binary is not in the regular path. On Debian/Ubuntu
+platforms, you will need to have the "sytemd-journal-remote" package
+installed.
+
+It should result in something like:
+
+ Finishing after writing <large number> entries
+
+You can then use journalctl to examine this file. For example, to see
+all devstack services try:
+
+ $ journalctl --file ./output.journal -u 'devstack@*'
+
+To see just cinder API server logs restrict the match with
+
+ $ journalctl --file ./output.journal -u 'devstack@c-api'
+
+There may be many types of logs available in the journal, a command like
+
+ $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u
+
+can help you find interesting things to filter on.
\ No newline at end of file
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
index 5a198b2..276c4e0 100644
--- a/roles/fetch-devstack-log-dir/tasks/main.yaml
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -1,5 +1,10 @@
+# as the user in the guest may not exist on the executor
+# we do not preserve the group or owner of the copied logs.
+
- name: Collect devstack logs
synchronize:
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
src: "{{ devstack_base_dir }}/logs"
+ group: no
+ owner: no
diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst
index 4ebf839..49d22c3 100644
--- a/roles/setup-devstack-source-dirs/README.rst
+++ b/roles/setup-devstack-source-dirs/README.rst
@@ -9,3 +9,8 @@
:default: /opt/stack
The devstack base directory.
+
+ .. zuul:rolevar:: devstack_sources_branch
+ :default: None
+
+ The target branch to be setup (where available).
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index e6bbae2..160757e 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -1,9 +1,13 @@
-- name: Find all source repos used by this job
+- name: Find all OpenStack source repos used by this job
find:
paths:
- - src/git.openstack.org/openstack
- - src/git.openstack.org/openstack-dev
- - src/git.openstack.org/openstack-infra
+ - src/opendev.org/opendev
+ - src/opendev.org/openstack
+ - src/opendev.org/openstack-dev
+ - src/opendev.org/openstack-infra
+ - src/opendev.org/starlingx
+ - src/opendev.org/x
+ - src/opendev.org/zuul
file_type: directory
register: found_repos
@@ -12,6 +16,59 @@
with_items: '{{ found_repos.files }}'
become: yes
+# Github projects are github.com/username/repo (username might be a
+# top-level project too), so we have to do a two-step swizzle to just
+# get the full repo path (ansible's find module doesn't help with this
+# :/)
+- name: Find top level github projects
+ find:
+ paths:
+ - src/github.com
+ file_type: directory
+ register: found_github_projects
+
+- name: Find actual github repos
+ find:
+ paths: '{{ found_github_projects.files | map(attribute="path") | list }}'
+ file_type: directory
+ register: found_github_repos
+ when: found_github_projects.files
+
+- name: Copy github repos into devstack working directory
+ command: rsync -a {{ item.path }} {{ devstack_base_dir }}
+ with_items: '{{ found_github_repos.files }}'
+ become: yes
+ when: found_github_projects.files
+
+- name: Setup refspec for repos into devstack working directory
+ shell:
+ # Copied almost "as-is" from devstack-gate setup-workspace function
+ # but removing the dependency on functions.sh
+ # TODO this should be rewritten as a python module.
+ cmd: |
+ cd {{ devstack_base_dir }}/{{ item.path | basename }}
+ base_branch={{ devstack_sources_branch }}
+ if git branch -a | grep "$base_branch" > /dev/null ; then
+ git checkout $base_branch
+ elif [[ "$base_branch" == stable/* ]]; then
+ # Look for an eol tag for the stable branch.
+ eol_tag=${base_branch#stable/}-eol
+ if git tag -l |grep $eol_tag >/dev/null; then
+ git checkout $eol_tag
+ git reset --hard $eol_tag
+ if ! git clean -x -f -d -q ; then
+ sleep 1
+ git clean -x -f -d -q
+ fi
+ fi
+ else
+ git checkout master
+ fi
+ args:
+ executable: /bin/bash
+ with_items: '{{ found_repos.files }}'
+ when: devstack_sources_branch is defined
+
- name: Set ownership of repos
file:
path: '{{ devstack_base_dir }}'
diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst
index e9739cd..d0a51e7 100644
--- a/roles/write-devstack-local-conf/README.rst
+++ b/roles/write-devstack-local-conf/README.rst
@@ -88,3 +88,12 @@
If a plugin declares a dependency on another plugin (via
``plugin_requires`` in the plugin's settings file), this role will
automatically emit ``enable_plugin`` lines in the correct order.
+
+.. zuul:rolevar:: tempest_plugins
+ :type: list
+
+ A list of tempest plugins which are installed alongside tempest.
+
+ The list of values will be combined with the base devstack directory
+ and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable
+ already exists, its value is *not* changed.
diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py
index bba7e31..2f97d0e 100644
--- a/roles/write-devstack-local-conf/library/devstack_local_conf.py
+++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py
@@ -155,8 +155,8 @@
continue
self.loadDevstackPluginInfo(settings)
- define_re = re.compile(r'^define_plugin\s+(\w+).*')
- require_re = re.compile(r'^plugin_requires\s+(\w+)\s+(\w+).*')
+ define_re = re.compile(r'^define_plugin\s+(\S+).*')
+ require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*')
def loadDevstackPluginInfo(self, fn):
name = None
reqs = set()
@@ -207,18 +207,23 @@
class LocalConf(object):
def __init__(self, localrc, localconf, base_services, services, plugins,
- base_dir, projects, project):
+ base_dir, projects, project, tempest_plugins):
self.localrc = []
+ self.warnings = []
self.meta_sections = {}
self.plugin_deps = {}
self.base_dir = base_dir
self.projects = projects
self.project = project
- if plugins:
- self.handle_plugins(plugins)
+ self.tempest_plugins = tempest_plugins
if services or base_services:
self.handle_services(base_services, services or {})
self.handle_localrc(localrc)
+ # Plugins must be the last items in localrc, otherwise
+ # the configuration lines which follows them in the file are
+ # not applied to the plugins (for example, the value of DEST.)
+ if plugins:
+ self.handle_plugins(plugins)
if localconf:
self.handle_localconf(localconf)
@@ -243,12 +248,19 @@
def handle_localrc(self, localrc):
lfg = False
+ tp = False
if localrc:
vg = VarGraph(localrc)
for k, v in vg.getVars():
- self.localrc.append('{}={}'.format(k, v))
+ # Avoid double quoting
+ if len(v) and v[0]=='"':
+ self.localrc.append('{}={}'.format(k, v))
+ else:
+ self.localrc.append('{}="{}"'.format(k, v))
if k == 'LIBS_FROM_GIT':
lfg = True
+ elif k == 'TEMPEST_PLUGINS':
+ tp = True
if not lfg and (self.projects or self.project):
required_projects = []
@@ -263,6 +275,19 @@
self.localrc.append('LIBS_FROM_GIT={}'.format(
','.join(required_projects)))
+ if self.tempest_plugins:
+ if not tp:
+ tp_dirs = []
+ for tempest_plugin in self.tempest_plugins:
+ tp_dirs.append(os.path.join(self.base_dir, tempest_plugin))
+ self.localrc.append('TEMPEST_PLUGINS="{}"'.format(
+ ' '.join(tp_dirs)))
+ else:
+ self.warnings.append('TEMPEST_PLUGINS already defined ({}),'
+ 'requested value {} ignored'.format(
+ tp, self.tempest_plugins))
+
+
def handle_localconf(self, localconf):
for phase, phase_data in localconf.items():
for fn, fn_data in phase_data.items():
@@ -297,6 +322,7 @@
path=dict(type='str'),
projects=dict(type='dict'),
project=dict(type='dict'),
+ tempest_plugins=dict(type='list'),
)
)
@@ -308,10 +334,11 @@
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
- p.get('project'))
+ p.get('project'),
+ p.get('tempest_plugins'))
lc.write(p['path'])
- module.exit_json()
+ module.exit_json(warnings=lc.warnings)
try:
diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py
index 791552d..7c526b3 100644
--- a/roles/write-devstack-local-conf/library/test.py
+++ b/roles/write-devstack-local-conf/library/test.py
@@ -23,6 +23,20 @@
from collections import OrderedDict
class TestDevstackLocalConf(unittest.TestCase):
+
+ @staticmethod
+ def _init_localconf(p):
+ lc = LocalConf(p.get('localrc'),
+ p.get('local_conf'),
+ p.get('base_services'),
+ p.get('services'),
+ p.get('plugins'),
+ p.get('base_dir'),
+ p.get('projects'),
+ p.get('project'),
+ p.get('tempest_plugins'))
+ return lc
+
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
@@ -40,9 +54,9 @@
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
- ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
- ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
- ('baz', 'git://git.openstack.org/openstack/baz-plugin'),
+ ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+ ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
+ ('baz', 'https://git.openstack.org/openstack/baz-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
@@ -51,14 +65,7 @@
plugins=plugins,
base_dir='./test',
path=os.path.join(self.tmpdir, 'test.local.conf'))
- lc = LocalConf(p.get('localrc'),
- p.get('local_conf'),
- p.get('base_services'),
- p.get('services'),
- p.get('plugins'),
- p.get('base_dir'),
- p.get('projects'),
- p.get('project'))
+ lc = self._init_localconf(p)
lc.write(p['path'])
plugins = []
@@ -78,12 +85,12 @@
with open(os.path.join(
self.tmpdir,
'foo-plugin', 'devstack', 'settings'), 'w') as f:
- f.write('define_plugin foo\n')
+ f.write('define_plugin foo-plugin\n')
with open(os.path.join(
self.tmpdir,
'bar-plugin', 'devstack', 'settings'), 'w') as f:
- f.write('define_plugin bar\n')
- f.write('plugin_requires bar foo\n')
+ f.write('define_plugin bar-plugin\n')
+ f.write('plugin_requires bar-plugin foo-plugin\n')
localrc = {'test_localrc': '1'}
local_conf = {'install':
@@ -94,8 +101,8 @@
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
- ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
- ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+ ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'),
+ ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
@@ -104,6 +111,15 @@
plugins=plugins,
base_dir=self.tmpdir,
path=os.path.join(self.tmpdir, 'test.local.conf'))
+ lc = self._init_localconf(p)
+ lc.write(p['path'])
+
+ plugins = []
+ with open(p['path']) as f:
+ for line in f:
+ if line.startswith('enable_plugin'):
+ plugins.append(line.split()[1])
+ self.assertEqual(['foo-plugin', 'bar-plugin'], plugins)
def test_libs_from_git(self):
"Test that LIBS_FROM_GIT is auto-generated"
@@ -129,14 +145,7 @@
path=os.path.join(self.tmpdir, 'test.local.conf'),
projects=projects,
project=project)
- lc = LocalConf(p.get('localrc'),
- p.get('local_conf'),
- p.get('base_services'),
- p.get('services'),
- p.get('plugins'),
- p.get('base_dir'),
- p.get('projects'),
- p.get('project'))
+ lc = self._init_localconf(p)
lc.write(p['path'])
lfg = None
@@ -168,14 +177,7 @@
base_dir='./test',
path=os.path.join(self.tmpdir, 'test.local.conf'),
projects=projects)
- lc = LocalConf(p.get('localrc'),
- p.get('local_conf'),
- p.get('base_services'),
- p.get('services'),
- p.get('plugins'),
- p.get('base_dir'),
- p.get('projects'),
- p.get('project'))
+ lc = self._init_localconf(p)
lc.write(p['path'])
lfg = None
@@ -183,7 +185,25 @@
for line in f:
if line.startswith('LIBS_FROM_GIT'):
lfg = line.strip().split('=')[1]
- self.assertEqual('oslo.db', lfg)
+ self.assertEqual('"oslo.db"', lfg)
+
+ def test_avoid_double_quote(self):
+ "Test that there a no duplicated quotes"
+ localrc = {'TESTVAR': '"quoted value"'}
+ p = dict(localrc=localrc,
+ base_services=[],
+ base_dir='./test',
+ path=os.path.join(self.tmpdir, 'test.local.conf'),
+ projects={})
+ lc = self._init_localconf(p)
+ lc.write(p['path'])
+
+ testvar = None
+ with open(p['path']) as f:
+ for line in f:
+ if line.startswith('TESTVAR'):
+ testvar = line.strip().split('=')[1]
+ self.assertEqual('"quoted value"', testvar)
def test_plugin_circular_deps(self):
"Test that plugins with circular dependencies fail"
@@ -211,8 +231,8 @@
# We use ordereddict here to make sure the plugins are in the
# *wrong* order for testing.
plugins = OrderedDict([
- ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
- ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+ ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+ ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
])
p = dict(localrc=localrc,
local_conf=local_conf,
@@ -222,14 +242,50 @@
base_dir=self.tmpdir,
path=os.path.join(self.tmpdir, 'test.local.conf'))
with self.assertRaises(Exception):
- lc = LocalConf(p.get('localrc'),
- p.get('local_conf'),
- p.get('base_services'),
- p.get('services'),
- p.get('plugins'),
- p.get('base_dir'))
+ lc = self._init_localconf(p)
lc.write(p['path'])
+ def _find_tempest_plugins_value(self, file_path):
+ tp = None
+ with open(file_path) as f:
+ for line in f:
+ if line.startswith('TEMPEST_PLUGINS'):
+ found = line.strip().split('=')[1]
+ self.assertIsNone(tp,
+ "TEMPEST_PLUGIN ({}) found again ({})".format(
+ tp, found))
+ tp = found
+ return tp
+
+ def test_tempest_plugins(self):
+ "Test that TEMPEST_PLUGINS is correctly populated."
+ p = dict(base_services=[],
+ base_dir='./test',
+ path=os.path.join(self.tmpdir, 'test.local.conf'),
+ tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+ lc = self._init_localconf(p)
+ lc.write(p['path'])
+
+ tp = self._find_tempest_plugins_value(p['path'])
+ self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp)
+ self.assertEqual(len(lc.warnings), 0)
+
+ def test_tempest_plugins_not_overridden(self):
+ """Test that the existing value of TEMPEST_PLUGINS is not overridden
+ by the user-provided value, but a warning is emitted."""
+ localrc = {'TEMPEST_PLUGINS': 'someplugin'}
+ p = dict(localrc=localrc,
+ base_services=[],
+ base_dir='./test',
+ path=os.path.join(self.tmpdir, 'test.local.conf'),
+ tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+ lc = self._init_localconf(p)
+ lc.write(p['path'])
+
+ tp = self._find_tempest_plugins_value(p['path'])
+ self.assertEqual('"someplugin"', tp)
+ self.assertEqual(len(lc.warnings), 1)
+
if __name__ == '__main__':
unittest.main()
diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml
index 9a6b083..bfd0860 100644
--- a/roles/write-devstack-local-conf/tasks/main.yaml
+++ b/roles/write-devstack-local-conf/tasks/main.yaml
@@ -10,4 +10,5 @@
local_conf: "{{ devstack_local_conf|default(omit) }}"
base_dir: "{{ devstack_base_dir|default(omit) }}"
projects: "{{ zuul.projects }}"
- project: "{{ zuul.project }}"
\ No newline at end of file
+ project: "{{ zuul.project }}"
+ tempest_plugins: "{{ tempest_plugins|default(omit) }}"
diff --git a/setup.cfg b/setup.cfg
index fcd2b13..825d386 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,7 +4,7 @@
description-file =
README.rst
author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/devstack/latest
classifier =
Intended Audience :: Developers
diff --git a/stack.sh b/stack.sh
index 56e00bf..4f6e5b6 100755
--- a/stack.sh
+++ b/stack.sh
@@ -60,6 +60,9 @@
LC_ALL=en_US.utf8
export LC_ALL
+# Clear all OpenStack related envvars
+unset `env | grep -E '^OS_' | cut -d = -f 1`
+
# Make sure umask is sane
umask 022
@@ -221,7 +224,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f28|f29|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -335,6 +338,13 @@
# no-op on other platforms.
sudo yum-config-manager --enable rhel-7-server-optional-rpms
+ # Enable the Software Collections (SCL) repository for CentOS.
+ # This repository includes useful software (e.g. the Go Toolset)
+ # which is not present in the main repository.
+ if [[ "$os_VENDOR" =~ (CentOS) ]]; then
+ yum_install centos-release-scl
+ fi
+
if is_oraclelinux; then
sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
fi
@@ -607,6 +617,7 @@
source $TOP_DIR/lib/neutron
source $TOP_DIR/lib/ldap
source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/tcpdump
source $TOP_DIR/lib/etcd3
# Extras Source
@@ -794,6 +805,11 @@
# Install required infra support libraries
install_infra
+# Install bindep
+$VIRTUALENV_CMD $DEST/bindep-venv
+# TODO(ianw) : optionally install from zuul checkout?
+$DEST/bindep-venv/bin/pip install bindep
+
# Extras Pre-install
# ------------------
# Phase: pre-install
@@ -820,6 +836,18 @@
install_etcd3
fi
+# Setup TLS certs
+# ---------------
+
+# Do this early, before any webservers are set up to ensure
+# we don't run into problems with missing certs when apache
+# is restarted.
+if is_service_enabled tls-proxy; then
+ configure_CA
+ init_CA
+ init_cert
+fi
+
# Check Out and Install Source
# ----------------------------
@@ -844,13 +872,6 @@
install_neutronclient
fi
-# Setup TLS certs
-if is_service_enabled tls-proxy; then
- configure_CA
- init_CA
- init_cert
-fi
-
# Install middleware
install_keystonemiddleware
@@ -894,8 +915,6 @@
stack_install_service neutron
fi
-# Nova configuration is used by placement so we need to create nova.conf
-# first.
if is_service_enabled nova; then
# Compute service
stack_install_service nova
@@ -1043,6 +1062,12 @@
# A better kind of sysstat, with the top process per time slice
start_dstat
+# Run a background tcpdump for debugging
+# Note: must set TCPDUMP_ARGS with the enabled service
+if is_service_enabled tcpdump; then
+ start_tcpdump
+fi
+
# Etcd
# -----
@@ -1137,6 +1162,7 @@
echo_summary "Configuring Neutron"
configure_neutron
+
# Run init_neutron only on the node hosting the Neutron API server
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled neutron; then
init_neutron
diff --git a/stackrc b/stackrc
index 2088bf4..2924d39 100644
--- a/stackrc
+++ b/stackrc
@@ -129,15 +129,9 @@
# Control whether Python 3 should be used at all.
export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
-# Control whether Python 3 is enabled for specific services by the
-# base name of the directory from which they are installed. See
-# enable_python3_package to edit this variable and use_python3_for to
-# test membership.
-export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,openstacksdk"
-
# Explicitly list services not to run under Python 3. See
# disable_python3_package to edit this variable.
-export DISABLED_PYTHON3_PACKAGES=""
+export DISABLED_PYTHON3_PACKAGES="swift"
# When Python 3 is supported by an application, adding the specific
# version of Python 3 to this variable will install the app using that
@@ -149,6 +143,13 @@
_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
+# Create a virtualenv with this
+if [[ ${USE_PYTHON3} == True ]]; then
+ export VIRTUALENV_CMD="python3 -m venv"
+else
+ export VIRTUALENV_CMD="virtualenv "
+fi
+
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
# Old-style user-supplied config
@@ -236,8 +237,7 @@
# ------------
# Base GIT Repo URL
-# Another option is https://git.openstack.org
-GIT_BASE=${GIT_BASE:-git://git.openstack.org}
+GIT_BASE=${GIT_BASE:-https://git.openstack.org}
# The location of REQUIREMENTS once cloned
REQUIREMENTS_DIR=$DEST/requirements
@@ -258,7 +258,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
-DEVSTACK_SERIES="rocky"
+DEVSTACK_SERIES="train"
##############
#
@@ -298,6 +298,10 @@
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH}
+# placement service
+PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git}
+PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH}
+
##############
#
# Testing Components
@@ -603,7 +607,7 @@
# a websockets/html5 or flash powered VNC console for vm instances
NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -681,7 +685,7 @@
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
-CIRROS_VERSION=${CIRROS_VERSION:-"0.3.5"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -732,10 +736,10 @@
EXTRA_CACHE_URLS=""
# etcd3 defaults
-ETCD_VERSION=${ETCD_VERSION:-v3.2.17}
-ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"}
-ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"}
-ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"}
+ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
# etcd v3.2.x doesn't have anything for s390x
ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
# Make sure etcd3 downloads the correct architecture
@@ -763,7 +767,7 @@
fi
ETCD_PORT=${ETCD_PORT:-2379}
ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380}
-ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
+ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download}
ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index adf20cd..08143d2 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -272,7 +272,7 @@
export_proxy_variables
expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
- results=$(env | egrep '(http(s)?|no)_proxy=')
+ results=$(env | egrep '(http(s)?|no)_proxy=' | sort)
if [[ $expected = $results ]]; then
passed "OK: Proxy variables are exported when proxy variables are set"
else
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index f7dc89a..6ed1647 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -125,14 +125,14 @@
assert_equal "$VAL" "33,44" "inset at EOF"
# test empty option
-if ini_has_option ${TEST_INI} ddd empty; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
passed "ini_has_option: ddd.empty present"
else
failed "ini_has_option failed: ddd.empty not found"
fi
# test non-empty option
-if ini_has_option ${TEST_INI} bbb handlers; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} bbb handlers; then
passed "ini_has_option: bbb.handlers present"
else
failed "ini_has_option failed: bbb.handlers not found"
diff --git a/tests/test_python.sh b/tests/test_python.sh
index 8652798..1f5453c 100755
--- a/tests/test_python.sh
+++ b/tests/test_python.sh
@@ -12,14 +12,9 @@
echo "Testing Python 3 functions"
# Initialize variables manipulated by functions under test.
-export ENABLED_PYTHON3_PACKAGES=""
export DISABLED_PYTHON3_PACKAGES=""
-assert_false "should not be enabled yet" python3_enabled_for testpackage1
-
-enable_python3_package testpackage1
-assert_equal "$ENABLED_PYTHON3_PACKAGES" "testpackage1" "unexpected result"
-assert_true "should be enabled" python3_enabled_for testpackage1
+assert_true "should be enabled by default" python3_enabled_for testpackage1
assert_false "should not be disabled yet" python3_disabled_for testpackage2
diff --git a/tools/dstat.sh b/tools/dstat.sh
index 01c6d9b..e6cbb0f 100755
--- a/tools/dstat.sh
+++ b/tools/dstat.sh
@@ -12,8 +12,17 @@
# Retrieve log directory as argument from calling script.
LOGDIR=$1
+DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem"
+if dstat --version | grep -q 'pcp-dstat' ; then
+ # dstat is unmaintained, and moving to a plugin of performance
+ # co-pilot. Fedora 29 for example has rolled this out. It's
+ # mostly compatible, except for a few options which are not
+ # implemented (yet?)
+ DSTAT_TOP_OPTS=""
+fi
+
# Command line arguments for primary DStat process.
-DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap --tcp"
+DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp"
# Command-line arguments for secondary background DStat process.
DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log"
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 9147932..7482239 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -202,7 +202,40 @@
# install requests with the bundled urllib3 to avoid conflicts
pip_install --upgrade --force-reinstall requests
fi
+
fi
+
+ # Since pip10, pip will refuse to uninstall files from packages
+ # that were created with distutils (rather than more modern
+ # setuptools). This is because it technically doesn't have a
+ # manifest of what to remove. However, in most cases, simply
+ # overwriting works. So this hacks around those packages that
+ # have been dragged in by some other system dependency
+ sudo rm -rf /usr/lib/python2.7/site-packages/enum34*.egg-info
+ sudo rm -rf /usr/lib/python2.7/site-packages/ipaddress*.egg-info
+ sudo rm -rf /usr/lib/python2.7/site-packages/ply-*.egg-info
+ sudo rm -rf /usr/lib/python2.7/site-packages/typing-*.egg-info
+}
+
+function fixup_suse {
+ if ! is_suse; then
+ return
+ fi
+
+ # Disable apparmor profiles in openSUSE distros
+ # to avoid issues with haproxy and dnsmasq
+ if [ -x /usr/sbin/aa-enabled ] && sudo /usr/sbin/aa-enabled -q; then
+ sudo systemctl disable apparmor
+ sudo /usr/sbin/aa-teardown
+ fi
+
+ # Since pip10, pip will refuse to uninstall files from packages
+ # that were created with distutils (rather than more modern
+ # setuptools). This is because it technically doesn't have a
+ # manifest of what to remove. However, in most cases, simply
+ # overwriting works. So this hacks around those packages that
+ # have been dragged in by some other system dependency
+ sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
}
# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
@@ -239,5 +272,6 @@
fixup_uca
fixup_python_packages
fixup_fedora
+ fixup_suse
fixup_virtualenv
}
diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh
index 95f1331..27c9c41 100755
--- a/tools/generate-devstack-plugins-list.sh
+++ b/tools/generate-devstack-plugins-list.sh
@@ -65,7 +65,7 @@
# ====================== ===
# Plugin Name URL
# ====================== ===
-# foobar `git://... <http://...>`__
+# foobar `https://... <https://...>`__
# ...
printf "\n\n"
@@ -74,7 +74,7 @@
title_underline ${name_col_len}
for plugin in ${sorted_plugins}; do
- giturl="git://git.openstack.org/openstack/${plugin}"
+ giturl="https://git.openstack.org/openstack/${plugin}"
gitlink="https://git.openstack.org/cgit/openstack/${plugin}"
printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__"
done
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index eb7265f..9187c66 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -41,12 +41,19 @@
'auth_url': args.os_auth_url,
'username': args.os_username,
'password': args.os_password,
- 'project_name': args.os_project_name,
},
}
- if args.os_identity_api_version == '3':
+ if args.os_project_name and args.os_system_scope:
+ print(
+ "WARNING: os_project_name and os_system_scope were both"
+ " given. os_system_scope will take priority.")
+ if args.os_project_name and not args.os_system_scope:
+ self._cloud_data['auth']['project_name'] = args.os_project_name
+ if args.os_identity_api_version == '3' and not args.os_system_scope:
self._cloud_data['auth']['user_domain_id'] = 'default'
self._cloud_data['auth']['project_domain_id'] = 'default'
+ if args.os_system_scope:
+ self._cloud_data['auth']['system_scope'] = args.os_system_scope
if args.os_cacert:
self._cloud_data['cacert'] = args.os_cacert
@@ -83,12 +90,13 @@
parser.add_argument('--os-cloud', required=True)
parser.add_argument('--os-region-name', default='RegionOne')
parser.add_argument('--os-identity-api-version', default='3')
- parser.add_argument('--os-volume-api-version', default='2')
+ parser.add_argument('--os-volume-api-version', default='3')
parser.add_argument('--os-cacert')
parser.add_argument('--os-auth-url', required=True)
parser.add_argument('--os-username', required=True)
parser.add_argument('--os-password', required=True)
- parser.add_argument('--os-project-name', required=True)
+ parser.add_argument('--os-project-name')
+ parser.add_argument('--os-system-scope')
args = parser.parse_args()
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 7506082..88af19d 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -163,7 +163,9 @@
def network_dump():
_header("Network Dump")
+ _dump_cmd("bridge link")
_dump_cmd("brctl show")
+ _dump_cmd("ip link show type bridge")
ip_cmds = ["neigh", "addr", "link", "route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
diff --git a/tox.ini b/tox.ini
index 74436b0..f643fdb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,7 @@
install_command = pip install {opts} {packages}
[testenv:bashate]
+basepython = python3
# if you want to test out some changes you have made to bashate
# against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
# modified bashate tree
@@ -34,6 +35,7 @@
-print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
+basepython = python3
deps = -r{toxinidir}/doc/requirements.txt
whitelist_externals = bash
setenv =
@@ -42,5 +44,6 @@
python setup.py build_sphinx
[testenv:venv]
+basepython = python3
deps = -r{toxinidir}/doc/requirements.txt
commands = {posargs}