Merge "remove unnecessary cleanup in nova startup"
diff --git a/.gitignore b/.gitignore
index d2c127d..8553b3f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
*.log
*.log.[1-9]
*.pem
+*.pyc
.localrc.auto
.localrc.password
.prereqs
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000..ad5eb0a
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,480 @@
+- nodeset:
+ name: openstack-single-node
+ nodes:
+ - name: controller
+ label: ubuntu-xenial
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-centos-7
+ nodes:
+ - name: controller
+ label: centos-7
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-opensuse-423
+ nodes:
+ - name: controller
+ label: opensuse-423
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-opensuse-tumbleweed
+ nodes:
+ - name: controller
+ label: opensuse-tumbleweed
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: devstack-single-node-fedora-27
+ nodes:
+ - name: controller
+ label: fedora-27
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+- nodeset:
+ name: openstack-two-node
+ nodes:
+ - name: controller
+ label: ubuntu-xenial
+ - name: compute1
+ label: ubuntu-xenial
+ groups:
+ # Node where tests are executed and test results collected
+ - name: tempest
+ nodes:
+ - controller
+ # Nodes running the compute service
+ - name: compute
+ nodes:
+ - controller
+ - compute1
+ # Nodes that are not the controller
+ - name: subnode
+ nodes:
+ - compute1
+ # Switch node for multinode networking setup
+ - name: switch
+ nodes:
+ - controller
+ # Peer nodes for multinode networking setup
+ - name: peers
+ nodes:
+ - compute1
+
+- job:
+ name: devstack-base
+ parent: multinode
+ abstract: true
+ description: |
+ Base abstract Devstack job.
+
+ Defines plays and base variables, but it does not include any project
+ and it does not run any service by default. This is a common base for
+ all single Devstack jobs, single or multinode.
+ Variables are defined in job.vars, which is what is then used by single
+ node jobs and by multi node jobs for the controller, as well as in
+ job.group-vars.peers, which is what is used by multi node jobs for subnode
+ nodes (everything but the controller).
+ required-projects:
+ - openstack-dev/devstack
+ roles:
+ - zuul: openstack-infra/devstack-gate
+ - zuul: openstack-infra/openstack-zuul-jobs
+ vars:
+ devstack_localrc:
+ DATABASE_PASSWORD: secretdatabase
+ RABBIT_PASSWORD: secretrabbit
+ ADMIN_PASSWORD: secretadmin
+ SERVICE_PASSWORD: secretservice
+ NETWORK_GATEWAY: 10.1.0.1
+ FIXED_RANGE: 10.1.0.0/20
+ IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
+ FLOATING_RANGE: 172.24.5.0/24
+ PUBLIC_NETWORK_GATEWAY: 172.24.5.1
+ LOGFILE: /opt/stack/logs/devstacklog.txt
+ LOG_COLOR: false
+ VERBOSE: true
+ VERBOSE_NO_TIMESTAMP: true
+ NOVNC_FROM_PACKAGE: true
+ ERROR_ON_CLONE: true
+ # Gate jobs can't deal with nested virt. Disable it.
+ LIBVIRT_TYPE: qemu
+ devstack_services:
+ # Ignore any default set by devstack. Emit a "disable_all_services".
+ base: false
+ zuul_copy_output:
+ '{{ devstack_conf_dir }}/local.conf': 'logs'
+ '{{ devstack_conf_dir }}/localrc': 'logs'
+ '{{ devstack_conf_dir }}/.localrc.auto': 'logs'
+ '{{ devstack_conf_dir }}/.stackenv': 'logs'
+ '{{ devstack_log_dir }}/dstat-csv.log': 'logs'
+ '{{ devstack_log_dir }}/devstacklog.txt': 'logs'
+ '{{ devstack_log_dir }}/devstacklog.txt.summary': 'logs'
+ '{{ devstack_full_log}}': 'logs'
+ '{{ stage_dir }}/verify_tempest_conf.log': 'logs'
+ '{{ stage_dir }}/apache': 'logs'
+ '{{ stage_dir }}/apache_config': 'logs'
+ '{{ stage_dir }}/etc': 'logs'
+ '/var/log/rabbitmq': 'logs'
+ '/var/log/postgresql': 'logs'
+ '/var/log/mysql.err': 'logs'
+ '/var/log/mysql.log': 'logs'
+ '/var/log/libvirt': 'logs'
+ '/etc/sudoers': 'logs'
+ '/etc/sudoers.d': 'logs'
+ '{{ stage_dir }}/iptables.txt': 'logs'
+ '{{ stage_dir }}/df.txt': 'logs'
+ '{{ stage_dir }}/pip2-freeze.txt': 'logs'
+ '{{ stage_dir }}/pip3-freeze.txt': 'logs'
+ '{{ stage_dir }}/dpkg-l.txt': 'logs'
+ '{{ stage_dir }}/rpm-qa.txt': 'logs'
+ '{{ stage_dir }}/core': 'logs'
+ '{{ stage_dir }}/listen53.txt': 'logs'
+ '{{ stage_dir }}/deprecations.log': 'logs'
+ '/var/log/ceph': 'logs'
+ '/var/log/openvswitch': 'logs'
+ '/var/log/glusterfs': 'logs'
+ '/etc/glusterfs/glusterd.vol': 'logs'
+ '/etc/resolv.conf': 'logs'
+ '/var/log/unbound.log': 'logs'
+ extensions_to_txt:
+ conf: True
+ log: True
+ localrc: True
+ stackenv: True
+ auto: True
+ group-vars:
+ subnode:
+ devstack_localrc:
+ DATABASE_PASSWORD: secretdatabase
+ RABBIT_PASSWORD: secretrabbit
+ ADMIN_PASSWORD: secretadmin
+ SERVICE_PASSWORD: secretservice
+ NETWORK_GATEWAY: 10.1.0.1
+ FIXED_RANGE: 10.1.0.0/20
+ IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
+ FLOATING_RANGE: 172.24.5.0/24
+ PUBLIC_NETWORK_GATEWAY: 172.24.5.1
+ LOGFILE: /opt/stack/logs/devstacklog.txt
+ LOG_COLOR: false
+ VERBOSE: true
+ VERBOSE_NO_TIMESTAMP: true
+ NOVNC_FROM_PACKAGE: true
+ ERROR_ON_CLONE: true
+ LIBVIRT_TYPE: qemu
+ devstack_services:
+ base: false
+ pre-run: playbooks/pre.yaml
+ run: playbooks/devstack.yaml
+ post-run: playbooks/post.yaml
+ irrelevant-files:
+ # Documentation related
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^releasenotes/.*$
+ # Translations
+ - ^.*/locale/.*po$
+
+- job:
+ name: devstack
+ parent: devstack-base
+ description: |
+ Base devstack job for integration gate.
+
+ This base job can be used for single node and multinode devstack jobs.
+
+ With a single node nodeset, this job sets up an "all-in-one" (aio)
+ devstack with the six OpenStack services included in the devstack tree:
+ keystone, glance, cinder, neutron, nova and swift.
+
+ With a two node nodeset, this job sets up an aio + compute node.
+ The controller can be customised using host-vars.controller, the
+ sub-nodes can be customised using group-vars.subnode.
+
+ Descendent jobs can enable / disable services, add devstack configuration
+ options, enable devstack plugins, configure log files or directories to be
+ transferred to the log server.
+
+ The job assumes that there is only one controller node. The number of
+ subnodes can be scaled up seamlessly by setting a custom nodeset in
+ job.nodeset.
+
+ The run playbook consists of a single role, so it can be easily rewritten
+ and extended.
+ nodeset: openstack-single-node
+ required-projects:
+ - openstack/cinder
+ - openstack/glance
+ - openstack/keystone
+ - openstack/neutron
+ - openstack/nova
+ - openstack/requirements
+ - openstack/swift
+ timeout: 7200
+ vars:
+ devstack_localrc:
+ # Common OpenStack services settings
+ SWIFT_REPLICAS: 1
+ SWIFT_START_ALL_SERVICES: false
+ SWIFT_HASH: 1234123412341234
+ CINDER_PERIODIC_INTERVAL: 10
+ DEBUG_LIBVIRT_COREDUMPS: True
+ NOVA_VNC_ENABLED: true
+ VNCSERVER_LISTEN: 0.0.0.0
+ VNCSERVER_PROXYCLIENT_ADDRESS: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
+ # Multinode specific settings
+ SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+ HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+ PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ global_physnet_mtu: "{{ external_bridge_mtu }}"
+ devstack_services:
+ # Core services enabled for this branch.
+ # This list replaces the test-matrix.
+ # Shared services
+ dstat: true
+ etcd3: true
+ mysql: true
+ peakmem_tracker: true
+ rabbit: true
+ tls-proxy: true
+ # Keystone services
+ key: true
+ # Glance services
+ g-api: true
+ g-reg: true
+ # Nova services
+ n-api: true
+ n-api-meta: true
+ n-cauth: true
+ n-cond: true
+ n-cpu: true
+ n-novnc: true
+ n-obj: true
+ n-sch: true
+ placement-api: true
+ # Neutron services
+ # We need to keep using the neutron-legacy based services for
+ # now until all issues with the new lib/neutron code are solved
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ q-svc: true
+ # neutron-api: true
+ # neutron-agent: true
+ # neutron-dhcp: true
+ # neutron-l3: true
+ # neutron-metadata-agent: true
+ # neutron-metering: true
+ # Swift services
+ s-account: true
+ s-container: true
+ s-object: true
+ s-proxy: true
+ # Cinder services
+ c-api: true
+ c-bak: true
+ c-sch: true
+ c-vol: true
+ cinder: true
+ # Services we don't need.
+ # This section is not really needed, it's for readability.
+ horizon: false
+ tempest: false
+ # Test matrix emits ceilometer but ceilomenter is not installed in the
+ # integrated gate, so specifying the services has not effect.
+ # ceilometer-*: false
+ group-vars:
+ subnode:
+ devstack_services:
+ # Core services enabled for this branch.
+ # This list replaces the test-matrix.
+ # Shared services
+ dstat: true
+ peakmem_tracker: true
+ tls-proxy: true
+ # Nova services
+ n-cpu: true
+ placement-client: true
+ # Neutron services
+ neutron-agent: true
+ # Cinder services
+ c-bak: true
+ c-vol: true
+ # Services we don't run at all on subnode.
+ # This section is not really needed, it's for readability.
+ # keystone: false
+ # s-*: false
+ horizon: false
+ tempest: false
+ # Test matrix emits ceilometer but ceilomenter is not installed in the
+ # integrated gate, so specifying the services has not effect.
+ # ceilometer-*: false
+ devstack_localrc:
+ # Multinode specific settings
+ HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
+ SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+ PUBLIC_BRIDGE_MTU: "{{ external_bridge_mtu }}"
+ # Subnode specific settings
+ DATABASE_TYPE: mysql
+ GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292"
+ Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+ RABBIT_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+ DATABASE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
+
+- job:
+ name: devstack-multinode
+ parent: devstack
+ nodeset: openstack-two-node
+ description: |
+ Simple multinode test to verify multinode functionality on devstack side.
+ This is not meant to be used as a parent job.
+
+# NOTE(ianw) Platform tests have traditionally been non-voting because
+# we often have to rush things through devstack to stabilise the gate,
+# and these platforms don't have the round-the-clock support to avoid
+# becoming blockers in that situation.
+- job:
+ name: devstack-platform-centos-7
+ parent: tempest-full
+ description: Centos 7 platform test
+ nodeset: devstack-single-node-centos-7
+ voting: false
+
+- job:
+ name: devstack-platform-opensuse-423
+ parent: tempest-full
+ description: openSUSE 43.2 platform test
+ nodeset: devstack-single-node-opensuse-423
+ voting: false
+
+- job:
+ name: devstack-platform-opensuse-tumbleweed
+ parent: tempest-full
+ description: openSUSE Tumbleweed platform test
+ nodeset: devstack-single-node-opensuse-tumbleweed
+ voting: false
+
+- job:
+ name: devstack-platform-fedora-27
+ parent: tempest-full
+ description: Fedora 27 platform test
+ nodeset: devstack-single-node-fedora-27
+ voting: false
+
+- job:
+ name: devstack-tox-base
+ parent: devstack
+ description: |
+ Base job for devstack-based functional tests that use tox.
+
+ This job is not intended to be run directly. It's just here
+ for organizational purposes for devstack-tox-functional and
+ devstack-tox-functional-consumer.
+ post-run: playbooks/tox/post.yaml
+ vars:
+ tox_envlist: functional
+ tox_install_siblings: false
+
+- job:
+ name: devstack-tox-functional
+ parent: devstack-tox-base
+ description: |
+ Base job for devstack-based functional tests that use tox.
+
+ Runs devstack, then runs the tox ``functional`` environment,
+ then collects tox/testr build output like normal tox jobs.
+
+ Turns off tox sibling installation. Projects may be involved
+ in the devstack deployment and so may be in the required-projects
+ list, but may not want to test against master of the other
+ projects in their tox env. Child jobs can set tox_install_siblings
+ to True to re-enable sibling processing.
+ run: playbooks/tox/run-both.yaml
+
+- job:
+ name: devstack-tox-functional-consumer
+ parent: devstack
+ description: |
+ Base job for devstack-based functional tests for projects that
+ consume the devstack cloud.
+
+ This base job should only be used by projects that are not involved
+ in the devstack deployment step, but are instead projects that are using
+ devstack to get a cloud against which they can test things.
+
+ Runs devstack in pre-run, then runs the tox ``functional`` environment,
+ then collects tox/testr build output like normal tox jobs.
+
+ Turns off tox sibling installation. Projects may be involved
+ in the devstack deployment and so may be in the required-projects
+ list, but may not want to test against master of the other
+ projects in their tox env. Child jobs can set tox_install_siblings
+ to True to re-enable sibling processing.
+ pre-run:
+ - playbooks/devstack.yaml
+ - playbooks/tox/pre.yaml
+ run: playbooks/tox/run.yaml
+
+- job:
+ name: devstack-unit-tests
+ description: |
+ Runs unit tests on devstack project.
+
+ It runs ``run_tests.sh``.
+ pre-run: playbooks/unit-tests/pre.yaml
+ run: playbooks/unit-tests/run.yaml
+
+- project:
+ check:
+ jobs:
+ - devstack
+ - devstack-platform-centos-7
+ - devstack-platform-opensuse-423
+ - devstack-platform-opensuse-tumbleweed
+ - devstack-platform-fedora-27
+ - devstack-multinode
+ - devstack-unit-tests
+ gate:
+ jobs:
+ - devstack
+ - devstack-unit-tests
+ # Please add a note on each job and conditions for the job not
+ # being experimental any more, so we can keep this list somewhat
+ # pruned.
+ #
+ # * nova-cells-v1: maintained by nova for cells v1 (nova-cells service);
+ # nova gates on this job, it's in experimental for testing cells v1
+ # changes to devstack w/o gating on it for all devstack changes.
+ # * nova-next: maintained by nova for unreleased/undefaulted
+ # things like cellsv2 and placement-api
+ experimental:
+ jobs:
+ - nova-cells-v1:
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - nova-next
diff --git a/HACKING.rst b/HACKING.rst
index fc67f09..d5d6fbc 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -20,7 +20,7 @@
contains the usual links for blueprints, bugs, etc.
__ contribute_
-.. _contribute: http://docs.openstack.org/infra/manual/developers.html
+.. _contribute: https://docs.openstack.org/infra/manual/developers.html
__ lp_
.. _lp: https://launchpad.net/~devstack
@@ -255,7 +255,7 @@
* The ``OS_*`` environment variables should be the only ones used for all
authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
-.. _CLIAuth: http://wiki.openstack.org/CLIAuth
+.. _CLIAuth: https://wiki.openstack.org/CLIAuth
* The exercise MUST clean up after itself if successful. If it is not successful,
it is assumed that state will be left behind; this allows a chance for developers
diff --git a/README.rst b/README.rst
index b4240bd..6885546 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,5 @@
-DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud.
+DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud
+from git source trees.
Goals
=====
@@ -14,7 +15,7 @@
* To provide an environment for the OpenStack CI testing on every commit
to the projects
-Read more at http://docs.openstack.org/developer/devstack
+Read more at https://docs.openstack.org/devstack/latest
IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you
execute before you run them, as they install software and will alter your
@@ -27,9 +28,9 @@
The DevStack master branch generally points to trunk versions of OpenStack
components. For older, stable versions, look for branches named
stable/[release] in the DevStack repo. For example, you can do the
-following to create a Newton OpenStack cloud::
+following to create a Pike OpenStack cloud::
- git checkout stable/newton
+ git checkout stable/pike
./stack.sh
You can also pick specific OpenStack project releases by setting the appropriate
@@ -54,7 +55,7 @@
endpoints, like so:
* Horizon: http://myhost/
-* Keystone: http://myhost:5000/v2.0/
+* Keystone: http://myhost/identity/v2.0/
We also provide an environment file that you can use to interact with your
cloud via CLI::
@@ -92,5 +93,5 @@
`local.conf`. It is likely that you will need to provide and modify
this file if you want anything other than the most basic setup. Start
by reading the `configuration guide
-<https://docs.openstack.org/developer/devstack/configuration.html>`_
+<https://docs.openstack.org/devstack/latest/configuration.html>`_
for details of the configuration file and the many available options.
diff --git a/clean.sh b/clean.sh
index 9ffe3be..a29ebd9 100755
--- a/clean.sh
+++ b/clean.sh
@@ -88,6 +88,7 @@
cleanup_glance
cleanup_keystone
cleanup_nova
+cleanup_placement
cleanup_neutron
cleanup_swift
cleanup_horizon
@@ -121,9 +122,6 @@
if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then
sudo rm -rf $LOGDIR
fi
-if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then
- sudo rm -rf $SCREEN_LOGDIR
-fi
# Clean out the sytemd user unit files if systemd was used.
if [[ "$USE_SYSTEMD" = "True" ]]; then
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..f65e9df
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,11 @@
+pbr>=2.0.0,!=2.1.0
+
+Pygments
+docutils
+sphinx>=1.6.2
+openstackdocstheme>=1.11.0
+nwdiag
+blockdiag
+sphinxcontrib-blockdiag
+sphinxcontrib-nwdiag
+zuul-sphinx>=0.2.0
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 6e3ec02..e9708fa 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -26,7 +26,13 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'oslosphinx', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ]
+extensions = [ 'sphinx.ext.autodoc', 'zuul_sphinx', 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ]
+
+# openstackdocstheme options
+repository_name = 'openstack-dev/devstack'
+bug_project = 'devstack'
+bug_tag = ''
+html_last_updated_fmt = '%Y-%m-%d %H:%M'
todo_include_todos = True
@@ -87,7 +93,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'nature'
+html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 064bf51..7efe4d6 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -41,6 +41,7 @@
- **extra** - runs after services are started and before any files in
``extra.d`` are executed
- **post-extra** - runs after files in ``extra.d`` are executed
+- **test-config** - runs after tempest (and plugins) are configured
The file is processed strictly in sequence; meta-sections may be
specified more than once but if any settings are duplicated the last to
@@ -136,7 +137,7 @@
::
- OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0
+ OS_AUTH_URL=http://$SERVICE_HOST:5000/v3.0
KEYSTONECLIENT\_DEBUG, NOVACLIENT\_DEBUG
Set command-line client log level to ``DEBUG``. These are commented
@@ -286,6 +287,18 @@
LOG_COLOR=False
+When using the logfile, by default logs are sent to the console and
+the file. You can set ``VERBOSE`` to ``false`` if you only wish the
+logs to be sent to the file (this may avoid having double-logging in
+some cases where you are capturing the script output and the log
+files). If ``VERBOSE`` is ``true`` you can additionally set
+``VERBOSE_NO_TIMESTAMP`` to avoid timestamps being added to each
+output line sent to the console. This can be useful in some
+situations where the console output is being captured by a runner or
+framework (e.g. Ansible) that adds its own timestamps. Note that the
+log lines sent to the ``LOGFILE`` will still be prefixed with a
+timestamp.
+
Logging the Service Output
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -294,7 +307,7 @@
To query the logs use the ``journalctl`` command, such as::
- journalctl --unit devstack@*
+ sudo journalctl --unit devstack@*
More examples can be found in :ref:`journalctl-examples`.
@@ -643,7 +656,7 @@
Cells
~~~~~
-`Cells <http://wiki.openstack.org/blueprint-nova-compute-cells>`__ is
+`Cells <https://wiki.openstack.org/wiki/Blueprint-nova-compute-cells>`__ is
an alternative scaling option. To setup a cells environment add the
following to your ``localrc`` section:
@@ -667,7 +680,7 @@
VOLUME_GROUP_NAME="stack-volumes"
VOLUME_NAME_PREFIX="volume-"
- VOLUME_BACKING_FILE_SIZE=10250M
+ VOLUME_BACKING_FILE_SIZE=24G
Keystone
@@ -779,9 +792,15 @@
DOWNLOAD_DEFAULT_IMAGES=False
IMAGE_URLS="https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-s390x-disk1.img"
+ # Provide a custom etcd3 binary download URL and ints sha256.
+ # The binary must be located under '/<etcd version>/etcd-<etcd-version>-linux-s390x.tar.gz'
+ # on this URL.
+ # Build instructions for etcd3: https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd
+ ETCD_DOWNLOAD_URL=<your-etcd-download-url>
+ ETCD_SHA256=<your-etcd3-sha256>
+
enable_service n-sproxy
disable_service n-novnc
- disable_service etcd3 # https://bugs.launchpad.net/devstack/+bug/1693192
[[post-config|$NOVA_CONF]]
@@ -803,8 +822,11 @@
needed if you want to use the *serial console* outside of the all-in-one
setup.
-* The service ``etcd3`` needs to be disabled as long as bug report
- https://bugs.launchpad.net/devstack/+bug/1693192 is not resolved.
+* A link to an etcd3 binary and its sha256 needs to be provided as the
+ binary for s390x is not hosted on github like it is for other
+ architectures. For more details see
+ https://bugs.launchpad.net/devstack/+bug/1693192. Etcd3 can easily be
+ built along https://github.com/linux-on-ibm-z/docs/wiki/Building-etcd.
.. note:: To run *Tempest* against this *Devstack* all-in-one, you'll need
to use a guest image which is smaller than 1GB when uncompressed.
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index a186336..efb315c 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -18,6 +18,57 @@
Your best choice is probably to choose a `distribution of OpenStack
<https://www.openstack.org/marketplace/distros/>`__.
+Can I use DevStack as a development environment?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sure, you can. That said, there are a couple of things you should note before
+doing so:
+
+- DevStack makes a lot of configuration changes to your system and should not
+ be run in your main development environment.
+
+- All the repositories that DevStack clones when deploying are considered
+ volatile by default and thus are subject to hard resets. This is necessary to
+ keep you in sync with the latest upstream, which is what you want in a CI
+ situation, but it can result in branches being overwritten and files being
+ removed.
+
+ The corollary of this is that if you are working on a specific project, using
+ the DevStack project repository (defaulted to ``/opt/stack/<project>``) as
+ the single master repository for storing all your work is not recommended.
+ This behavior can be overridden by setting the ``RECLONE`` config option to
+ ``no``. Alternatively, you can avoid running ``stack.sh`` to redeploy by
+ restarting services manually. In any case, you should generally ensure work
+ in progress is pushed to Gerrit or otherwise backed up before running
+ ``stack.sh``.
+
+- If you use DevStack within a VM, you may wish to mount a local OpenStack
+ directory, such as ``~/src/openstack``, inside the VM and configure DevStack
+ to use this as the clone location using the ``{PROJECT}_REPO`` config
+ variables. For example, assuming you're using Vagrant and sharing your home
+ directory, you should place the following in ``local.conf``:
+
+ .. code-block:: shell
+
+ NEUTRON_REPO=/home/vagrant/src/neutron
+ NOVA_REPO=/home/vagrant/src/nova
+ KEYSTONE_REPO=/home/vagrant/src/keystone
+ GLANCE_REPO=/home/vagrant/src/glance
+ SWIFT_REPO=/home/vagrant/src/swift
+ HORIZON_REPO=/home/vagrant/src/horizon
+ CINDER_REPO=/home/vagrant/src/cinder
+ HEAT_REPO=/home/vagrant/src/heat
+ TEMPEST_REPO=/home/vagrant/src/tempest
+ HEATCLIENT_REPO=/home/vagrant/src/python-heatclient
+ GLANCECLIENT_REPO=/home/vagrant/src/python-glanceclient
+ NOVACLIENT_REPO=/home/vagrant/src/python-novaclient
+ NEUTRONCLIENT_REPO=/home/vagrant/src/python-neutronclient
+ OPENSTACKCLIENT_REPO=/home/vagrant/src/python-openstackclient
+ HEAT_CFNTOOLS_REPO=/home/vagrant/src/heat-cfntools
+ HEAT_TEMPLATES_REPO=/home/vagrant/src/heat-templates
+ NEUTRON_FWAAS_REPO=/home/vagrant/src/neutron-fwaas
+ # ...
+
Why a shell script, why not chef/puppet/...
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -32,9 +83,9 @@
`git.openstack.org
<https://git.openstack.org/cgit/openstack-dev/devstack>`__ and bug
reports go to `LaunchPad
-<http://bugs.launchpad.net/devstack/>`__. Contributions follow the
+<https://bugs.launchpad.net/devstack/>`__. Contributions follow the
usual process as described in the `developer guide
-<http://docs.openstack.org/infra/manual/developers.html>`__. This
+<https://docs.openstack.org/infra/manual/developers.html>`__. This
Sphinx documentation is housed in the doc directory.
Why not use packages?
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 4ed64bf..7dee520 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -2,7 +2,7 @@
=================================
Starting in the OpenStack Liberty release, the
-`neutron LBaaS v2 API <http://developer.openstack.org/api-ref-networking-v2-ext.html>`_
+`neutron LBaaS v2 API <https://developer.openstack.org/api-ref/network/v2/index.html>`_
is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference
driver is based on Octavia.
@@ -39,7 +39,6 @@
LOGFILE=$DEST/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
- SCREEN_LOGDIR=$DEST/logs
# Pre-requisite
ENABLED_SERVICES=rabbit,mysql,key
# Horizon
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 1a8ddbc..b4e2891 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -197,6 +197,22 @@
to poke at your shiny new OpenStack. The most recent log file is
available in ``stack.sh.log``.
+Starting in the Ocata release, Nova requires a `Cells v2`_ deployment. Compute
+node services must be mapped to a cell before they can be used.
+
+After each compute node is stacked, verify it shows up in the
+``nova service-list --binary nova-compute`` output. The compute service is
+registered in the cell database asynchronously so this may require polling.
+
+Once the compute node services shows up, run the ``./tools/discover_hosts.sh``
+script from the control node to map compute hosts to the single cell.
+
+The compute service running on the primary control node will be
+discovered automatically when the control node is stacked so this really
+only needs to be performed for subnodes.
+
+.. _Cells v2: https://docs.openstack.org/nova/latest/user/cells.html
+
Cleaning Up After DevStack
--------------------------
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 092809a..1b8dccd 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -396,7 +396,7 @@
In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a
publicly routed IPv4 subnet. In this specific instance we are using
-the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
+the special TEST-NET-3 subnet defined in `RFC 5737 <https://tools.ietf.org/html/rfc5737>`_,
which is used for documentation. In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE
would be a public IP address range that you or your organization has
allocated to you, so that you could access your instances from the
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index a91e0d1..0f105d7 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -13,7 +13,7 @@
<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
to allow read/write access to the serial console of an instance via
`nova-serialproxy
-<http://docs.openstack.org/developer/nova/man/nova-serialproxy.html>`_.
+<https://docs.openstack.org/nova/latest/cli/nova-serialproxy.html>`_.
The service can be enabled by adding ``n-sproxy`` to
``ENABLED_SERVICES``. Further options can be enabled via
@@ -62,11 +62,9 @@
Enabling the service is enough to be functional for a single machine DevStack.
-These config options are defined in `nova.console.serial
-<https://github.com/openstack/nova/blob/master/nova/console/serial.py#L33-L52>`_
-and `nova.cmd.serialproxy
-<https://github.com/openstack/nova/blob/master/nova/cmd/serialproxy.py#L26-L33>`_.
+These config options are defined in `nova.conf.serial_console
+<https://github.com/openstack/nova/blob/master/nova/conf/serial_console.py>`_.
For more information on OpenStack configuration see the `OpenStack
-Configuration Reference
-<http://docs.openstack.org/trunk/config-reference/content/list-of-compute-config-options.html>`_
+Compute Service Configuration Reference
+<https://docs.openstack.org/nova/latest/admin/configuration/index.html>`_
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 47087c5..2ff4ff0 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -139,6 +139,10 @@
Enable :doc:`devstack plugins <plugins>` to support additional
services, features, and configuration not present in base devstack.
+Use devstack in your CI with :doc:`Ansible roles <zuul_roles>` and
+:doc:`Jobs <zuul_jobs>` for Zuul V3. Migrate your devstack Zuul V2 jobs to Zuul
+V3 with this full migration :doc:`how-to <zuul_ci_jobs_migration>`.
+
Get :doc:`the big picture <overview>` of what we are trying to do
with devstack, and help us by :doc:`contributing to the project
<hacking>`.
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index bdbeaaa..74010cd 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -69,7 +69,7 @@
This is not a recommended configuration. Because of interactions
between ovs and bridging, if you reboot your box with active
- networking you may loose network connectivity to your system.
+ networking you may lose network connectivity to your system.
If you need your guests accessible on the network, but only have 1
interface (using something like a NUC), you can share your one
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index c07a8e6..814a2b1 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -24,7 +24,7 @@
- Ubuntu: current LTS release plus current development release
- Fedora: current release plus previous release
-- RHEL/Centos: current major release
+- RHEL/CentOS: current major release
- Other OS platforms may continue to be included but the maintenance of
those platforms shall not be assumed simply due to their presence.
Having a listed point-of-contact for each additional OS will greatly
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index c1c66b9..01ba9d1 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -26,19 +26,18 @@
====================================== ===
almanach `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
aodh `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
-app-catalog-ui `git://git.openstack.org/openstack/app-catalog-ui <https://git.openstack.org/cgit/openstack/app-catalog-ui>`__
astara `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
barbican `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
bilean `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
blazar `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
broadview-collector `git://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
+castellan-ui `git://git.openstack.org/openstack/castellan-ui <https://git.openstack.org/cgit/openstack/castellan-ui>`__
ceilometer `git://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
ceilometer-powervm `git://git.openstack.org/openstack/ceilometer-powervm <https://git.openstack.org/cgit/openstack/ceilometer-powervm>`__
-cerberus `git://git.openstack.org/openstack/cerberus <https://git.openstack.org/cgit/openstack/cerberus>`__
cloudkitty `git://git.openstack.org/openstack/cloudkitty <https://git.openstack.org/cgit/openstack/cloudkitty>`__
-collectd-ceilometer-plugin `git://git.openstack.org/openstack/collectd-ceilometer-plugin <https://git.openstack.org/cgit/openstack/collectd-ceilometer-plugin>`__
+collectd-openstack-plugins `git://git.openstack.org/openstack/collectd-openstack-plugins <https://git.openstack.org/cgit/openstack/collectd-openstack-plugins>`__
congress `git://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
-cue `git://git.openstack.org/openstack/cue <https://git.openstack.org/cgit/openstack/cue>`__
+cyborg `git://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
designate `git://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
devstack-plugin-additional-pkg-repos `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
devstack-plugin-amqp1 `git://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
@@ -53,6 +52,7 @@
devstack-plugin-nfs `git://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
devstack-plugin-pika `git://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
devstack-plugin-sheepdog `git://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
+devstack-plugin-vmax `git://git.openstack.org/openstack/devstack-plugin-vmax <https://git.openstack.org/cgit/openstack/devstack-plugin-vmax>`__
devstack-plugin-zmq `git://git.openstack.org/openstack/devstack-plugin-zmq <https://git.openstack.org/cgit/openstack/devstack-plugin-zmq>`__
dragonflow `git://git.openstack.org/openstack/dragonflow <https://git.openstack.org/cgit/openstack/dragonflow>`__
drbd-devstack `git://git.openstack.org/openstack/drbd-devstack <https://git.openstack.org/cgit/openstack/drbd-devstack>`__
@@ -65,18 +65,20 @@
glare `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
group-based-policy `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
heat `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
+heat-dashboard `git://git.openstack.org/openstack/heat-dashboard <https://git.openstack.org/cgit/openstack/heat-dashboard>`__
horizon-mellanox `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
ironic `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
ironic-inspector `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
ironic-staging-drivers `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
ironic-ui `git://git.openstack.org/openstack/ironic-ui <https://git.openstack.org/cgit/openstack/ironic-ui>`__
-k8s-cloud-provider `git://git.openstack.org/openstack/k8s-cloud-provider <https://git.openstack.org/cgit/openstack/k8s-cloud-provider>`__
karbor `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
karbor-dashboard `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
keystone `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
kingbird `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
+kolla-cli `git://git.openstack.org/openstack/kolla-cli <https://git.openstack.org/cgit/openstack/kolla-cli>`__
kuryr-kubernetes `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
kuryr-libnetwork `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
+kuryr-tempest-plugin `git://git.openstack.org/openstack/kuryr-tempest-plugin <https://git.openstack.org/cgit/openstack/kuryr-tempest-plugin>`__
magnum `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
magnum-ui `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
manila `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
@@ -91,12 +93,15 @@
monasca-analytics `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
monasca-api `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
monasca-ceilometer `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
+monasca-events-api `git://git.openstack.org/openstack/monasca-events-api <https://git.openstack.org/cgit/openstack/monasca-events-api>`__
monasca-log-api `git://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
+monasca-tempest-plugin `git://git.openstack.org/openstack/monasca-tempest-plugin <https://git.openstack.org/cgit/openstack/monasca-tempest-plugin>`__
monasca-transform `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
murano `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
networking-6wind `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
networking-arista `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
networking-bagpipe `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
+networking-baremetal `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
networking-bgpvpn `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
networking-brocade `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
networking-calico `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
@@ -105,27 +110,36 @@
networking-dpm `git://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
networking-fortinet `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
networking-generic-switch `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
+networking-hpe `git://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
networking-huawei `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
+networking-hyperv `git://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
networking-infoblox `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
networking-l2gw `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
+networking-lagopus `git://git.openstack.org/openstack/networking-lagopus <https://git.openstack.org/cgit/openstack/networking-lagopus>`__
networking-midonet `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
networking-mlnx `git://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
networking-nec `git://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
networking-odl `git://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
networking-onos `git://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
+networking-opencontrail `git://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
networking-ovn `git://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
networking-ovs-dpdk `git://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
networking-plumgrid `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
networking-powervm `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
networking-sfc `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
+networking-spp `git://git.openstack.org/openstack/networking-spp <https://git.openstack.org/cgit/openstack/networking-spp>`__
networking-vpp `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
networking-vsphere `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
neutron `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
+neutron-classifier `git://git.openstack.org/openstack/neutron-classifier <https://git.openstack.org/cgit/openstack/neutron-classifier>`__
neutron-dynamic-routing `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
neutron-fwaas `git://git.openstack.org/openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas>`__
+neutron-fwaas-dashboard `git://git.openstack.org/openstack/neutron-fwaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-fwaas-dashboard>`__
neutron-lbaas `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
neutron-lbaas-dashboard `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
+neutron-tempest-plugin `git://git.openstack.org/openstack/neutron-tempest-plugin <https://git.openstack.org/cgit/openstack/neutron-tempest-plugin>`__
neutron-vpnaas `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
+neutron-vpnaas-dashboard `git://git.openstack.org/openstack/neutron-vpnaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-vpnaas-dashboard>`__
nova-dpm `git://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
nova-lxd `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
nova-mksproxy `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
@@ -133,12 +147,17 @@
oaktree `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
octavia `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
octavia-dashboard `git://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
+omni `git://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
+openstacksdk `git://git.openstack.org/openstack/openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`__
os-xenapi `git://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
osprofiler `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
+oswin-tempest-plugin `git://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
panko `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
patrole `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
picasso `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
+qinling `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
rally `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
+rally-openstack `git://git.openstack.org/openstack/rally-openstack <https://git.openstack.org/cgit/openstack/rally-openstack>`__
sahara `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
sahara-dashboard `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
scalpels `git://git.openstack.org/openstack/scalpels <https://git.openstack.org/cgit/openstack/scalpels>`__
@@ -147,15 +166,20 @@
senlin `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
solum `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
stackube `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
+storlets `git://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
tacker `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
tap-as-a-service `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
tap-as-a-service-dashboard `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
+tatu `git://git.openstack.org/openstack/tatu <https://git.openstack.org/cgit/openstack/tatu>`__
+telemetry-tempest-plugin `git://git.openstack.org/openstack/telemetry-tempest-plugin <https://git.openstack.org/cgit/openstack/telemetry-tempest-plugin>`__
tricircle `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
trio2o `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
trove `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
trove-dashboard `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
+valet `git://git.openstack.org/openstack/valet <https://git.openstack.org/cgit/openstack/valet>`__
vitrage `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
vitrage-dashboard `git://git.openstack.org/openstack/vitrage-dashboard <https://git.openstack.org/cgit/openstack/vitrage-dashboard>`__
+vitrage-tempest-plugin `git://git.openstack.org/openstack/vitrage-tempest-plugin <https://git.openstack.org/cgit/openstack/vitrage-tempest-plugin>`__
vmware-nsx `git://git.openstack.org/openstack/vmware-nsx <https://git.openstack.org/cgit/openstack/vmware-nsx>`__
vmware-vspc `git://git.openstack.org/openstack/vmware-vspc <https://git.openstack.org/cgit/openstack/vmware-vspc>`__
watcher `git://git.openstack.org/openstack/watcher <https://git.openstack.org/cgit/openstack/watcher>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 5b3c6cf..89b9381 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -12,6 +12,15 @@
be sure that they will continue to work in the future as DevStack
evolves.
+Prerequisites
+=============
+
+If you are planning to create a plugin that is going to host a service in the
+service catalog (that is, your plugin will use the command
+``get_or_create_service``) please make sure that you apply to the `service
+types authority`_ to reserve a valid service-type. This will help to make sure
+that all deployments of your service use the same service-type.
+
Plugin Interface
================
@@ -45,6 +54,31 @@
default value only if the variable is unset or empty; e.g. in bash
syntax ``FOO=${FOO:-default}``.
+ The file should include a ``define_plugin`` line to indicate the
+ plugin's name, which is the name that should be used by users on
+ "enable_plugin" lines. It should generally be the last component of
+ the git repo path (e.g., if the plugin's repo is
+ openstack/devstack-foo, then the name here should be "foo") ::
+
+ define_plugin <YOUR PLUGIN>
+
+ If your plugin depends on another plugin, indicate it in this file
+ with one or more lines like the following::
+
+ plugin_requires <YOUR PLUGIN> <OTHER PLUGIN>
+
+ For a complete example, if the plugin "foo" depends on "bar", the
+ ``settings`` file should include::
+
+ define_plugin foo
+ plugin_requires foo bar
+
+ Devstack does not currently use this dependency information, so it's
+ important that users continue to add enable_plugin lines in the
+ correct order in ``local.conf``, however adding this information
+ allows other tools to consider dependency information when
+ automatically generating ``local.conf`` files.
+
- ``plugin.sh`` - the actual plugin. It is executed by devstack at
well defined points during a ``stack.sh`` run. The plugin.sh
internal structure is discussed below.
@@ -250,3 +284,5 @@
For additional inspiration on devstack plugins you can check out the
`Plugin Registry <plugin-registry.html>`_.
+
+.. _service types authority: https://specs.openstack.org/openstack/service-types-authority/
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 60a7719..9cc4017 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -94,33 +94,95 @@
Follow logs for a specific service::
- journalctl -f --unit devstack@n-cpu.service
+ sudo journalctl -f --unit devstack@n-cpu.service
Following logs for multiple services simultaneously::
- journalctl -f --unit devstack@n-cpu.service --unit
- devstack@n-cond.service
+ sudo journalctl -f --unit devstack@n-cpu.service --unit devstack@n-cond.service
or you can even do wild cards to follow all the nova services::
- journalctl -f --unit devstack@n-*
+ sudo journalctl -f --unit devstack@n-*
Use higher precision time stamps::
- journalctl -f -o short-precise --unit devstack@n-cpu.service
+ sudo journalctl -f -o short-precise --unit devstack@n-cpu.service
By default, journalctl strips out "unprintable" characters, including
ASCII color codes. To keep the color codes (which can be interpreted by
an appropriate terminal/pager - e.g. ``less``, the default)::
- journalctl -a --unit devstack@n-cpu.service
+ sudo journalctl -a --unit devstack@n-cpu.service
When outputting to the terminal using the default pager, long lines
-appear to be truncated, but horizontal scrolling is supported via the
-left/right arrow keys.
+will be truncated, but horizontal scrolling is supported via the
+left/right arrow keys. You can override this by setting the
+``SYSTEMD_LESS`` environment variable to e.g. ``FRXM``.
+
+You can pipe the output to another tool, such as ``grep``. For
+example, to find a server instance UUID in the nova logs::
+
+ sudo journalctl -a --unit devstack@n-* | grep 58391b5c-036f-44d5-bd68-21d3c26349e6
See ``man 1 journalctl`` for more.
+Debugging
+=========
+
+Using pdb
+---------
+
+In order to break into a regular pdb session on a systemd-controlled
+service, you need to invoke the process manually - that is, take it out
+of systemd's control.
+
+Discover the command systemd is using to run the service::
+
+ systemctl show devstack@n-sch.service -p ExecStart --no-pager
+
+Stop the systemd service::
+
+ sudo systemctl stop devstack@n-sch.service
+
+Inject your breakpoint in the source, e.g.::
+
+ import pdb; pdb.set_trace()
+
+Invoke the command manually::
+
+ /usr/local/bin/nova-scheduler --config-file /etc/nova/nova.conf
+
+Using remote-pdb
+----------------
+
+`remote-pdb`_ works while the process is under systemd control.
+
+Make sure you have remote-pdb installed::
+
+ sudo pip install remote-pdb
+
+Inject your breakpoint in the source, e.g.::
+
+ import remote_pdb; remote_pdb.set_trace()
+
+Restart the relevant service::
+
+ sudo systemctl restart devstack@n-api.service
+
+The remote-pdb code configures the telnet port when ``set_trace()`` is
+invoked. Do whatever it takes to hit the instrumented code path, and
+inspect the logs for a message displaying the listening port::
+
+ Sep 07 16:36:12 p8-100-neo devstack@n-api.service[772]: RemotePdb session open at 127.0.0.1:46771, waiting for connection ...
+
+Telnet to that port to enter the pdb session::
+
+ telnet 127.0.0.1 46771
+
+See the `remote-pdb`_ home page for more options.
+
+.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb
+
Known Issues
============
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
new file mode 100644
index 0000000..c00f06e
--- /dev/null
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -0,0 +1,301 @@
+===============================
+Migrating Zuul V2 CI jobs to V3
+===============================
+
+The OpenStack CI system moved from Zuul v2 to Zuul v3, and all CI jobs moved to
+the new CI system. All jobs have been migrated automatically to a format
+compatible with Zuul v3; the jobs produced in this way however are suboptimal
+and do not use the capabilities introduced by Zuul v3, which allow for re-use of
+job parts, in the form of Ansible roles, as well as inheritance between jobs.
+
+DevStack hosts a set of roles, plays and jobs that can be used by other
+repositories to define their DevStack based jobs. To benefit from them, jobs
+must be migrated from the legacy v2 ones into v3 native format.
+
+This document provides guidance and examples to make the migration process as
+painless and smooth as possible.
+
+Where to host the job definitions.
+==================================
+
+In Zuul V3 jobs can be defined in the repository that contains the code they
+excercise. If you are writing CI jobs for an OpenStack service you can define
+your DevStack based CI jobs in one of the repositories that host the code for
+your service. If you have a branchless repo, like a Tempest plugin, that is
+a convenient choice to host the job definitions since job changes do not have
+to be backported. For example, see the beginning of the ``.zuul.yaml`` from the
+sahara Tempest plugin repo:
+
+.. code:: yaml
+
+ # In http://git.openstack.org/cgit/openstack/sahara-tests/tree/.zuul.yaml:
+ - job:
+ name: sahara-tests-tempest
+ description: |
+ Run Tempest tests from the Sahara plugin.
+ parent: devstack-tempest
+
+Which base job to start from
+============================
+
+If your job needs an OpenStack cloud deployed via DevStack, but you don't plan
+on running Tempest tests, you can start from one of the base
+:doc:`jobs <zuul_jobs>` defined in the DevStack repo.
+
+The ``devstack`` job can be used for both single-node jobs and multi-node jobs,
+and it includes the list of services used in the integrated gate (keystone,
+glance, nova, cinder, neutron and swift). Different topologies can be achieved
+by switching the nodeset used in the child job.
+
+The ``devstack-base`` job is similar to ``devstack`` but it does not specify any
+required repo or service to be run in DevStack. It can be useful to setup
+children jobs that use a very narrow DevStack setup.
+
+If your job needs an OpenStack cloud deployed via DevStack, and you do plan
+on running Tempest tests, you can start from one of the base jobs defined in the
+Tempest repo.
+
+The ``devstack-tempest`` job can be used for both single-node jobs and
+multi-node jobs. Different topologies can be achieved by switching the nodeset
+used in the child job.
+
+Jobs can be customized as follows without writing any Ansible code:
+
+- add and/or remove DevStack services
+- add or modify DevStack and services configuration
+- install DevStack plugins
+- extend the number of sub-nodes (multinode only)
+- define extra log files and/or directories to be uploaded on logs.o.o
+- define extra log file extensions to be rewritten to .txt for ease of access
+
+Tempest jobs can be further customized as follows:
+
+- define the Tempest tox environment to be used
+- define the test concurrency
+- define the test regular expression
+
+Writing Ansible code, or importing existing custom roles, jobs can be further
+extended by:
+
+- adding pre and/or post playbooks
+- overriding the run playbook, add custom roles
+
+The (partial) example below extends a Tempest single node base job
+"devstack-tempest" in the Kuryr repository. The parent job name is defined in
+job.parent.
+
+.. code:: yaml
+
+ # https://git.openstack.org/cgit/openstack/kuryr-kubernetes/tree/.zuul.yaml:
+ - job:
+ name: kuryr-kubernetes-tempest-base
+ parent: devstack-tempest
+ description: Base kuryr-kubernetes-job
+ required-projects:
+ - openstack/devstack-plugin-container
+ - openstack/kuryr
+ - openstack/kuryr-kubernetes
+ - openstack/kuryr-tempest-plugin
+ - openstack/neutron-lbaas
+ vars:
+ tempest_test_regex: '^(kuryr_tempest_plugin.tests.)'
+ tox_envlist: 'all'
+ devstack_localrc:
+ KURYR_K8S_API_PORT: 8080
+ TEMPEST_PLUGINS: '/opt/stack/kuryr-tempest-plugin'
+ devstack_services:
+ kubernetes-api: true
+ kubernetes-controller-manager: true
+ kubernetes-scheduler: true
+ kubelet: true
+ kuryr-kubernetes: true
+ (...)
+ devstack_plugins:
+ kuryr-kubernetes: https://git.openstack.org/openstack/kuryr
+ devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container
+ neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas
+ (...)
+
+Job variables
+=============
+
+Variables can be added to the job in three different places:
+
+- job.vars: these are global variables available to all node in the nodeset
+- job.host-vars.[HOST]: these are variables available only to the specified HOST
+- job.group-vars.[GROUP]: these are variables available only to the specified
+ GROUP
+
+Zuul merges dict variables through job inheritance. Host and group variables
+override variables with the same name defined as global variables.
+
+In the example below, for the sundaes job, hosts that are not part of the
+subnode group will run vanilla and chocolate. Hosts in the subnode group will
+run stracciatella and strawberry.
+
+.. code:: yaml
+
+ - job:
+ name: ice-creams
+ vars:
+ devstack_service:
+ vanilla: true
+ chocolate: false
+ group-vars:
+ subnode:
+ devstack_service:
+ pistacchio: true
+ stracciatella: true
+
+ - job:
+ name: sundaes
+ parent: ice-creams
+ vars:
+ devstack_service:
+ chocolate: true
+ group-vars:
+ subnode:
+ devstack_service:
+ strawberry: true
+ pistacchio: false
+
+
+DevStack Gate Flags
+===================
+
+The old CI system worked using a combination of DevStack, Tempest and
+devstack-gate to setup a test environment and run tests against it. With Zuul
+V3, the logic that used to live in devstack-gate is moved into different repos,
+including DevStack, Tempest and grenade.
+
+DevStack-gate exposes an interface for job definition based on a number of
+DEVSTACK_GATE_* environment variables, or flags. This guide shows how to map
+DEVSTACK_GATE flags into the new
+system.
+
+The repo column indicates in which repository is hosted the code that replaces
+the devstack-gate flag. The new implementation column explains how to reproduce
+the same or a similar behaviour in Zuul v3 jobs. For localrc settings,
+devstack-gate defined a default value. In ansible jobs the default is either the
+value defined in the parent job, or the default from DevStack, if any.
+
+============================================== ============= ==================
+DevStack gate flag Repo New implementation
+============================================== ============= ==================
+OVERRIDE_ZUUL_BRANCH zuul override-checkout:
+ [branch]
+ in the job definition.
+DEVSTACK_GATE_NET_OVERLAY zuul-jobs A bridge called
+ br-infra is set up for
+ all jobs that inherit
+ from multinode with
+ a dedicated `bridge role <https://docs.openstack.org/infra/zuul-jobs/roles.html#role-multi-node-bridge>`_.
+DEVSTACK_GATE_FEATURE_MATRIX devstack-gate ``test_matrix_features``
+ variable of the
+ test-matrix role in
+ devstack-gate. This
+ is a temporary
+ solution, feature
+ matrix will go away.
+ In the future services
+ will be defined in
+ jobs only.
+DEVSTACK_CINDER_VOLUME_CLEAR devstack *CINDER_VOLUME_CLEAR: true/false*
+ in devstack_localrc
+ in the job vars.
+DEVSTACK_GATE_NEUTRON devstack True by default. To
+ disable, disable all
+ neutron services in
+ devstack_services in
+ the job definition.
+DEVSTACK_GATE_CONFIGDRIVE devstack *FORCE_CONFIG_DRIVE: true/false*
+ in devstack_localrc
+ in the job vars.
+DEVSTACK_GATE_INSTALL_TESTONLY devstack *INSTALL_TESTONLY_PACKAGES: true/false*
+ in devstack_localrc
+ in the job vars.
+DEVSTACK_GATE_VIRT_DRIVER devstack *VIRT_DRIVER: [virt driver]*
+ in devstack_localrc
+ in the job vars.
+DEVSTACK_GATE_LIBVIRT_TYPE devstack *LIBVIRT_TYPE: [libvirt type]*
+ in devstack_localrc
+ in the job vars.
+DEVSTACK_GATE_TEMPEST devstack Defined by the job
+ tempest that is used. The
+ ``devstack`` job only
+ runs devstack.
+ The ``devstack-tempest``
+ one triggers a Tempest
+ run as well.
+DEVSTACK_GATE_TEMPEST_FULL tempest *tox_envlist: full*
+ in the job vars.
+DEVSTACK_GATE_TEMPEST_ALL tempest *tox_envlist: all*
+ in the job vars.
+DEVSTACK_GATE_TEMPEST_ALL_PLUGINS tempest *tox_envlist: all-plugin*
+ in the job vars.
+DEVSTACK_GATE_TEMPEST_SCENARIOS tempest *tox_envlist: scenario*
+ in the job vars.
+TEMPEST_CONCURRENCY tempest *tempest_concurrency: [value]*
+ in the job vars. This
+ is available only on
+ jobs that inherit from
+ ``devstack-tempest``
+ down.
+DEVSTACK_GATE_TEMPEST_NOTESTS tempest *tox_envlist: venv-tempest*
+ in the job vars. This
+ will create Tempest
+ virtual environment
+ but run no tests.
+DEVSTACK_GATE_SMOKE_SERIAL tempest *tox_envlist: smoke-serial*
+ in the job vars.
+DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION tempest *tox_envlist: full-serial*
+ in the job vars.
+ *TEMPEST_ALLOW_TENANT_ISOLATION: false*
+ in devstack_localrc in
+ the job vars.
+============================================== ============= ==================
+
+The following flags have not been migrated yet or are legacy and won't be
+migrated at all.
+
+===================================== ====== ==========================
+DevStack gate flag Status Details
+===================================== ====== ==========================
+DEVSTACK_GATE_TOPOLOGY WIP The topology depends on the base
+ job that is used and more
+ specifically on the nodeset
+ attached to it. The new job
+ format allows project to define
+ the variables to be passed to
+ every node/node-group that exists
+ in the topology. Named topologies
+ that include the nodeset and the
+ matching variables can be defined
+ in the form of base jobs.
+DEVSTACK_GATE_GRENADE TBD Grenade Zuul V3 jobs will be
+ hosted in the grenade repo.
+GRENADE_BASE_BRANCH TBD Grenade Zuul V3 jobs will be
+ hosted in the grenade repo.
+DEVSTACK_GATE_NEUTRON_DVR TBD Depends on multinode support.
+DEVSTACK_GATE_EXERCISES TBD Can be done on request.
+DEVSTACK_GATE_IRONIC TBD This will probably be implemented
+ on ironic side.
+DEVSTACK_GATE_IRONIC_DRIVER TBD This will probably be implemented
+ on ironic side.
+DEVSTACK_GATE_IRONIC_BUILD_RAMDISK TBD This will probably be implemented
+ on ironic side.
+DEVSTACK_GATE_POSTGRES Legacy This flag exists in d-g but the
+ only thing that it does is
+ capture postgres logs. This is
+ already supported by the roles in
+ post, so the flag is useless in
+ the new jobs. postgres itself can
+ be enabled via the
+ devstack_service job variable.
+DEVSTACK_GATE_ZEROMQ Legacy This has no effect in d-g.
+DEVSTACK_GATE_MQ_DRIVER Legacy This has no effect in d-g.
+DEVSTACK_GATE_TEMPEST_STRESS_ARGS Legacy Stress is not in Tempest anymore.
+DEVSTACK_GATE_TEMPEST_HEAT_SLOW Legacy This is not used anywhere.
+DEVSTACK_GATE_CELLS Legacy This has no effect in d-g.
+DEVSTACK_GATE_NOVA_API_METADATA_SPLIT Legacy This has no effect in d-g.
+===================================== ====== ==========================
diff --git a/doc/source/zuul_jobs.rst b/doc/source/zuul_jobs.rst
new file mode 100644
index 0000000..cf203a8
--- /dev/null
+++ b/doc/source/zuul_jobs.rst
@@ -0,0 +1,4 @@
+Zuul CI Jobs
+============
+
+.. zuul:autojobs::
diff --git a/doc/source/zuul_roles.rst b/doc/source/zuul_roles.rst
new file mode 100644
index 0000000..4939281
--- /dev/null
+++ b/doc/source/zuul_roles.rst
@@ -0,0 +1,4 @@
+Zuul CI Roles
+=============
+
+.. zuul:autoroles::
diff --git a/files/debs/dstat b/files/debs/dstat
index 2b643b8..0d9da44 100644
--- a/files/debs/dstat
+++ b/files/debs/dstat
@@ -1 +1,2 @@
dstat
+python-psutil
diff --git a/files/debs/general b/files/debs/general
index 1dde03b..df872a0 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -15,6 +15,7 @@
libffi-dev # for pyOpenSSL
libjpeg-dev # Pillow 3.0.0
libmysqlclient-dev # MySQL-python
+libpcre3-dev # for python-pcre
libpq-dev # psycopg2
libssl-dev # for pyOpenSSL
libsystemd-dev # for systemd-python
@@ -29,7 +30,6 @@
python2.7
python-dev
python-gdbm # needed for testr
-screen
tar
tcpdump
unzip
diff --git a/files/debs/horizon b/files/debs/horizon
index 1f45b54..4833289 100644
--- a/files/debs/horizon
+++ b/files/debs/horizon
@@ -1,3 +1,2 @@
apache2 # NOPRIME
libapache2-mod-wsgi # NOPRIME
-libpcre3-dev # pyScss
diff --git a/files/debs/neutron b/files/debs/neutron-common
similarity index 100%
rename from files/debs/neutron
rename to files/debs/neutron-common
diff --git a/files/ldap/user.ldif.in b/files/ldap/user.ldif.in
new file mode 100644
index 0000000..16a9807
--- /dev/null
+++ b/files/ldap/user.ldif.in
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+
+# Demo LDAP user
+dn: cn=demo,ou=Users,${BASE_DN}
+cn: demo
+displayName: demo
+givenName: demo
+mail: demo@openstack.org
+objectClass: inetOrgPerson
+objectClass: top
+sn: demo
+uid: demo
+userPassword: demo
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
index 2b643b8..0d9da44 100644
--- a/files/rpms-suse/dstat
+++ b/files/rpms-suse/dstat
@@ -1 +1,2 @@
dstat
+python-psutil
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 370f240..0b69cb1 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -19,12 +19,12 @@
net-tools
openssh
openssl
+pcre-devel # python-pcre
postgresql-devel # psycopg2
psmisc
python-cmd2 # dist:opensuse-12.3
python-devel # pyOpenSSL
python-xml
-screen
systemd-devel # for systemd-python
tar
tcpdump
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron-common
similarity index 100%
rename from files/rpms-suse/neutron
rename to files/rpms-suse/neutron-common
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 0274642..e6addc6 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,4 +1,5 @@
iscsi-initiator-utils
lvm2
qemu-img
-scsi-target-utils # NOPRIME
+scsi-target-utils # not:rhel7,f25,f26,f27 NOPRIME
+targetcli # dist:rhel7,f25,f26,f27 NOPRIME
diff --git a/files/rpms/dstat b/files/rpms/dstat
index 2b643b8..0d9da44 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1 +1,2 @@
dstat
+python-psutil
diff --git a/files/rpms/general b/files/rpms/general
index 1393d18..5d9a4ad 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -9,9 +9,9 @@
graphviz # needed only for docs
httpd
httpd-devel
-iptables-services # NOPRIME f23,f24,f25
+iptables-services # NOPRIME f25,f26,f27
java-1.7.0-openjdk-headless # NOPRIME rhel7
-java-1.8.0-openjdk-headless # NOPRIME f23,f24,f25
+java-1.8.0-openjdk-headless # NOPRIME f25,f26,f27
libffi-devel
libjpeg-turbo-devel # Pillow 3.0.0
libxml2-devel # lxml
@@ -22,13 +22,13 @@
openssh-server
openssl
openssl-devel # to rebuild pyOpenSSL if needed
+pcre-devel # for python-pcre
pkgconfig
postgresql-devel # psycopg2
psmisc
pyOpenSSL # version in pip uses too much memory
python-devel
redhat-rpm-config # missing dep for gcc hardening flags, see rhbz#1217376
-screen
systemd-devel # for systemd-python
tar
tcpdump
diff --git a/files/rpms/horizon b/files/rpms/horizon
index aeb2cb5..fa5601a 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,5 +1,4 @@
Django
httpd # NOPRIME
mod_wsgi # NOPRIME
-pcre-devel # pyScss
pyxattr
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 1703083..5f19c6f 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,4 +1,3 @@
memcached
mod_ssl
-MySQL-python
sqlite
diff --git a/files/rpms/neutron b/files/rpms/neutron-common
similarity index 94%
rename from files/rpms/neutron
rename to files/rpms/neutron-common
index a4e029a..0cc8d11 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron-common
@@ -6,7 +6,6 @@
iptables
iputils
mysql-devel
-MySQL-python
mysql-server # NOPRIME
openvswitch # NOPRIME
rabbitmq-server # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index a368c55..9fb7282 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,12 +7,11 @@
genisoimage # required for config_drive
iptables
iputils
-kernel-modules # dist:f23,f24,f25
+kernel-modules # dist:f25,f26,f27
kpartx
libxml2-python
m2crypto
mysql-devel
-MySQL-python
mysql-server # NOPRIME
numpy # needed by websockify for spice console
parted
diff --git a/files/rpms/swift b/files/rpms/swift
index 2f12df0..be0db14 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
liberasurecode-devel
memcached
pyxattr
-rsync-daemon # dist:f23,f24,f25
+rsync-daemon # dist:f25,f26,f27
sqlite
xfsprogs
xinetd
diff --git a/functions b/functions
index e056c3f..24994c0 100644
--- a/functions
+++ b/functions
@@ -45,6 +45,37 @@
# export it so child shells have access to the 'short_source' function also.
export -f short_source
+# Download a file from a URL
+#
+# Will check cache (in $FILES) or download given URL.
+#
+# Argument is the URL to the remote file
+#
+# Will echo the local path to the file as the output. Will die on
+# failure to download.
+#
+# Files can be pre-cached for CI environments, see EXTRA_CACHE_URLS
+# and tools/image_list.sh
+function get_extra_file {
+ local file_url=$1
+
+ file_name=$(basename "$file_url")
+ if [[ $file_url != file* ]]; then
+ # If the file isn't cache, download it
+ if [[ ! -f $FILES/$file_name ]]; then
+ wget --progress=dot:giga -t 2 -c $file_url -O $FILES/$file_name
+ if [[ $? -ne 0 ]]; then
+ die "$file_url could not be downloaded"
+ fi
+ fi
+ echo "$FILES/$file_name"
+ return
+ else
+ # just strip the file:// bit and that's the path to the file
+ echo $file_url | sed 's/$file:\/\///g'
+ fi
+}
+
# Retrieve an image from a URL and upload into Glance.
# Uses the following variables:
@@ -333,7 +364,7 @@
esac
if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then
- img_property="--property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
+ img_property="--property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
fi
if is_arch "aarch64"; then
@@ -407,6 +438,31 @@
return $rval
}
+function wait_for_compute {
+ local timeout=$1
+ local rval=0
+ time_start "wait_for_service"
+ timeout $timeout bash -x <<EOF || rval=$?
+ ID=""
+ while [[ "\$ID" == "" ]]; do
+ sleep 1
+ if [[ "$VIRT_DRIVER" = 'fake' ]]; then
+ # When using the fake driver the compute hostnames have a suffix of 1 to NUMBER_FAKE_NOVA_COMPUTE
+ ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname`1 --service nova-compute -c ID -f value)
+ else
+ ID=\$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list --host `hostname` --service nova-compute -c ID -f value)
+ fi
+ done
+EOF
+ time_stop "wait_for_service"
+ # Figure out what's happening on platforms where this doesn't work
+ if [[ "$rval" != 0 ]]; then
+ echo "Didn't find service registered by hostname after $timeout seconds"
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" compute service list
+ fi
+ return $rval
+}
+
# ping check
# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK``
@@ -452,13 +508,13 @@
function get_instance_ip {
local vm_id=$1
local network_name=$2
- local nova_result
+ local addresses
local ip
- nova_result="$(nova show $vm_id)"
- ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+ addresses=$(openstack server show -c addresses -f value "$vm_id")
+ ip=$(echo $addresses | sed -n "s/^.*$network_name=\([0-9\.]*\).*$/\1/p")
if [[ $ip = "" ]];then
- echo "$nova_result"
+ echo "addresses of server $vm_id : $addresses"
die $LINENO "[Fail] Couldn't get ipaddress of VM"
fi
echo $ip
@@ -661,7 +717,7 @@
# Create a loopback disk and format it to XFS.
if [[ -e ${disk_image} ]]; then
if egrep -q ${storage_data_dir} /proc/mounts; then
- sudo umount ${storage_data_dir}/drives/sdb1
+ sudo umount ${storage_data_dir}
sudo rm -f ${disk_image}
fi
fi
@@ -750,6 +806,16 @@
echo $port
}
+# Save some state information
+#
+# Write out various useful state information to /etc/devstack-version
+function write_devstack_version {
+ cat - <<EOF | sudo tee /etc/devstack-version >/dev/null
+DevStack Version: ${DEVSTACK_SERIES}
+Change: $(git log --format="%H %s %ci" -1)
+OS Version: ${os_VENDOR} ${os_RELEASE} ${os_CODENAME}
+EOF
+}
# Restore xtrace
$_XTRACE_FUNCTIONS
diff --git a/functions-common b/functions-common
index 65db681..b1b0995 100644
--- a/functions-common
+++ b/functions-common
@@ -45,6 +45,7 @@
declare -A -g GITDIR
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
+KILL_PATH="$(which kill)"
# Save these variables to .stackenv
STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
@@ -319,10 +320,7 @@
if [[ -x $(command -v apt-get 2>/dev/null) ]]; then
sudo apt-get install -y lsb-release
elif [[ -x $(command -v zypper 2>/dev/null) ]]; then
- # XXX: old code paths seem to have assumed SUSE platforms also
- # had "yum". Keep this ordered above yum so we don't try to
- # install the rh package. suse calls it just "lsb"
- sudo zypper -n install lsb
+ sudo zypper -n install lsb-release
elif [[ -x $(command -v dnf 2>/dev/null) ]]; then
sudo dnf install -y redhat-lsb-core
elif [[ -x $(command -v yum 2>/dev/null) ]]; then
@@ -375,6 +373,9 @@
DISTRO="f$os_RELEASE"
elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
DISTRO="opensuse-$os_RELEASE"
+ # Tumbleweed uses "n/a" as a codename, and the release is a datestring
+ # like 20180218, so not very useful.
+ [ "$os_CODENAME" = "n/a" ] && DISTRO="opensuse-tumbleweed"
elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
# just use major release
DISTRO="sle${os_RELEASE%.*}"
@@ -388,8 +389,6 @@
DISTRO="rhel${os_RELEASE::1}"
elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
DISTRO="xs${os_RELEASE%.*}"
- elif [[ "$os_VENDOR" =~ (kvmibm) ]]; then
- DISTRO="${os_VENDOR}${os_RELEASE::1}"
else
# We can't make a good choice here. Setting a sensible DISTRO
# is part of the problem, but not the major issue -- we really
@@ -443,7 +442,7 @@
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] || \
- [ "$os_VENDOR" = "Virtuozzo" ] || [ "$os_VENDOR" = "kvmibm" ]
+ [ "$os_VENDOR" = "Virtuozzo" ]
}
@@ -519,7 +518,7 @@
if [[ ! -d $git_dest ]]; then
if [[ "$ERROR_ON_CLONE" = "True" ]]; then
echo "The $git_dest project was not found; if this is a gate job, add"
- echo "the project to the \$PROJECTS variable in the job definition."
+ echo "the project to 'required-projects' in the job definition."
die $LINENO "Cloning not allowed in this configuration"
fi
git_timed clone $git_clone_flags $git_remote $git_dest
@@ -864,10 +863,11 @@
# Gets user role id
user_role_id=$(openstack role assignment list \
+ --role $1 \
--user $2 \
--project $3 \
$domain_args \
- | grep " $1 " | get_field 1)
+ | grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
openstack role add $1 \
@@ -875,10 +875,11 @@
--project $3 \
$domain_args
user_role_id=$(openstack role assignment list \
+ --role $1 \
--user $2 \
--project $3 \
$domain_args \
- | grep " $1 " | get_field 1)
+ | grep '^|\s[a-f0-9]\+' | get_field 1)
fi
echo $user_role_id
}
@@ -889,18 +890,20 @@
local user_role_id
# Gets user role id
user_role_id=$(openstack role assignment list \
+ --role $1 \
--user $2 \
--domain $3 \
- | grep " $1 " | get_field 1)
+ | grep '^|\s[a-f0-9]\+' | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user and get it
openstack role add $1 \
--user $2 \
--domain $3
user_role_id=$(openstack role assignment list \
+ --role $1 \
--user $2 \
--domain $3 \
- | grep " $1 " | get_field 1)
+ | grep '^|\s[a-f0-9]\+' | get_field 1)
fi
echo $user_role_id
}
@@ -911,6 +914,7 @@
local group_role_id
# Gets group role id
group_role_id=$(openstack role assignment list \
+ --role $1 \
--group $2 \
--project $3 \
-f value)
@@ -920,6 +924,7 @@
--group $2 \
--project $3
group_role_id=$(openstack role assignment list \
+ --role $1 \
--group $2 \
--project $3 \
-f value)
@@ -1205,9 +1210,9 @@
if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then
file_to_parse="${file_to_parse} ${package_dir}/keystone"
fi
- elif [[ $service == q-* ]]; then
- if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then
- file_to_parse="${file_to_parse} ${package_dir}/neutron"
+ elif [[ $service == q-* || $service == neutron-* ]]; then
+ if [[ ! $file_to_parse =~ $package_dir/neutron-common ]]; then
+ file_to_parse="${file_to_parse} ${package_dir}/neutron-common"
fi
elif [[ $service == ir-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then
@@ -1374,62 +1379,6 @@
zypper --non-interactive install --auto-agree-with-licenses "$@"
}
-
-# Process Functions
-# =================
-
-# _run_process() is designed to be backgrounded by run_process() to simulate a
-# fork. It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it(). The log filename is derived
-# from the service name.
-# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
-# If an optional group is provided sg will be used to set the group of
-# the command.
-# _run_process service "command-line" [group]
-function _run_process {
- # disable tracing through the exec redirects, it's just confusing in the logs.
- xtrace=$(set +o | grep xtrace)
- set +o xtrace
-
- local service=$1
- local command="$2"
- local group=$3
-
- # Undo logging redirections and close the extra descriptors
- exec 1>&3
- exec 2>&3
- exec 3>&-
- exec 6>&-
-
- local logfile="${service}.log.${CURRENT_LOG_TIME}"
- local real_logfile="${LOGDIR}/${logfile}"
- if [[ -n ${LOGDIR} ]]; then
- exec 1>&"$real_logfile" 2>&1
- bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- # Drop the backward-compat symlink
- ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
- fi
-
- # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
- export PYTHONUNBUFFERED=1
- fi
-
- # reenable xtrace before we do *real* work
- $xtrace
-
- # Run under ``setsid`` to force the process to become a session and group leader.
- # The pid saved can be used with pkill -g to get the entire process group.
- if [[ -n "$group" ]]; then
- setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
- else
- setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
- fi
-
- # Just silently exit this process
- exit 0
-}
-
function write_user_unit_file {
local service=$1
local command="$2"
@@ -1445,6 +1394,9 @@
iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
iniset -sudo $unitfile "Service" "User" "$user"
iniset -sudo $unitfile "Service" "ExecStart" "$command"
+ iniset -sudo $unitfile "Service" "KillMode" "process"
+ iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
+ iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
if [[ -n "$group" ]]; then
iniset -sudo $unitfile "Service" "Group" "$group"
fi
@@ -1466,9 +1418,10 @@
iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service"
iniset -sudo $unitfile "Service" "User" "$user"
iniset -sudo $unitfile "Service" "ExecStart" "$command"
+ iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
iniset -sudo $unitfile "Service" "Type" "notify"
- iniset -sudo $unitfile "Service" "KillSignal" "SIGQUIT"
- iniset -sudo $unitfile "Service" "Restart" "Always"
+ iniset -sudo $unitfile "Service" "KillMode" "process"
+ iniset -sudo $unitfile "Service" "Restart" "always"
iniset -sudo $unitfile "Service" "NotifyAccess" "all"
iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
@@ -1508,13 +1461,8 @@
}
-# Defines a systemd service which can be enabled and started later on.
-# arg1: The openstack service name ('n-cpu', 'c-sch', ...).
-# arg2: The command to start (e.g. path to service binary + config files).
-# arg3: The group which owns the process.
-# arg4: The user which owns the process.
-# Returns: The systemd service name which got defined.
-function _define_systemd_service {
+# Helper function to build a basic unit file and run it under systemd.
+function _run_under_systemd {
local service=$1
local command="$2"
local cmd=$command
@@ -1529,22 +1477,9 @@
else
write_user_unit_file $systemd_service "$cmd" "$group" "$user"
fi
- echo $systemd_service
-}
-# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
-# This is used for ``service_check`` when all the ``screen_it`` are called finished
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# init_service_check
-function init_service_check {
- SCREEN_NAME=${SCREEN_NAME:-stack}
- SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
- if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
- mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
- fi
-
- rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
+ $SYSTEMCTL enable $systemd_service
+ $SYSTEMCTL start $systemd_service
}
# Find out if a process exists by partial name.
@@ -1561,7 +1496,6 @@
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# If an optional group is provided sg will be used to run the
# command as that group.
-# Uses globals ``USE_SCREEN``
# run_process service "command-line" [group] [user]
function run_process {
local service=$1
@@ -1570,151 +1504,19 @@
local user=$4
local name=$service
- local systemd_service
time_start "run_process"
- # Note we deliberately make all service files, even if the service
- # isn't enabled, so it can be enabled by a dev manually on command
- # line.
- if [[ "$USE_SYSTEMD" = "True" ]]; then
- systemd_service=$(_define_systemd_service "$name" "$command" "$group" "$user")
- fi
if is_service_enabled $service; then
- if [[ "$USE_SYSTEMD" = "True" ]]; then
- $SYSTEMCTL enable $systemd_service
- $SYSTEMCTL start $systemd_service
- elif [[ "$USE_SCREEN" = "True" ]]; then
- if [[ "$user" == "root" ]]; then
- command="sudo $command"
- fi
- screen_process "$name" "$command" "$group"
- else
- # Spawn directly without screen
- if [[ "$user" == "root" ]]; then
- command="sudo $command"
- fi
- _run_process "$name" "$command" "$group" &
- fi
+ _run_under_systemd "$name" "$command" "$group" "$user"
fi
time_stop "run_process"
}
-# Helper to launch a process in a named screen
-# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
-# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING``
-# screen_process name "command-line" [group]
-# Run a command in a shell in a screen window, if an optional group
-# is provided, use sg to set the group of the command.
-function screen_process {
- local name=$1
- local command="$2"
- local group=$3
-
- SCREEN_NAME=${SCREEN_NAME:-stack}
- SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
- screen -S $SCREEN_NAME -X screen -t $name
-
- local logfile="${name}.log.${CURRENT_LOG_TIME}"
- local real_logfile="${LOGDIR}/${logfile}"
- echo "LOGDIR: $LOGDIR"
- echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
- echo "log: $real_logfile"
- if [[ -n ${LOGDIR} ]]; then
- if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
- screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
- screen -S $SCREEN_NAME -p $name -X log on
- fi
- # If logging isn't active then avoid a broken symlink
- touch "$real_logfile"
- bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- # Drop the backward-compat symlink
- ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
- fi
- fi
-
- # sleep to allow bash to be ready to be send the command - we are
- # creating a new window in screen and then sends characters, so if
- # bash isn't running by the time we send the command, nothing
- # happens. This sleep was added originally to handle gate runs
- # where we needed this to be at least 3 seconds to pass
- # consistently on slow clouds. Now this is configurable so that we
- # can determine a reasonable value for the local case which should
- # be much smaller.
- sleep ${SCREEN_SLEEP:-3}
-
- NL=`echo -ne '\015'`
- # This fun command does the following:
- # - the passed server command is backgrounded
- # - the pid of the background process is saved in the usual place
- # - the server process is brought back to the foreground
- # - if the server process exits prematurely the fg command errors
- # and a message is written to stdout and the process failure file
- #
- # The pid saved can be used in stop_process() as a process group
- # id to kill off all child processes
- if [[ -n "$group" ]]; then
- command="sg $group '$command'"
- fi
-
- # Append the process to the screen rc file
- screen_rc "$name" "$command"
-
- screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start. Exit code: \$?\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL"
-}
-
-# Screen rc file builder
-# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING``
-# screen_rc service "command-line"
-function screen_rc {
- SCREEN_NAME=${SCREEN_NAME:-stack}
- SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
- if [[ ! -e $SCREENRC ]]; then
- # Name the screen session
- echo "sessionname $SCREEN_NAME" > $SCREENRC
- # Set a reasonable statusbar
- echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
- # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
- echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
- echo "screen -t shell bash" >> $SCREENRC
- fi
- # If this service doesn't already exist in the screenrc file
- if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
- NL=`echo -ne '\015'`
- echo "screen -t $1 bash" >> $SCREENRC
- echo "stuff \"$2$NL\"" >> $SCREENRC
-
- if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
- echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
- echo "log on" >>$SCREENRC
- fi
- fi
-}
-
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# screen_stop_service service
-function screen_stop_service {
- local service=$1
-
- SCREEN_NAME=${SCREEN_NAME:-stack}
- SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
- if is_service_enabled $service; then
- # Clean up the screen window
- screen -S $SCREEN_NAME -p $service -X kill || true
- fi
-}
-
# Stop a service process
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
-# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
+# Uses globals ``SERVICE_DIR``
# stop_process service
function stop_process {
local service=$1
@@ -1729,149 +1531,27 @@
$SYSTEMCTL stop devstack@$service.service
$SYSTEMCTL disable devstack@$service.service
fi
-
- if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
- pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
- # oslo.service tends to stop actually shutting down
- # reliably in between releases because someone believes it
- # is dying too early due to some inflight work they
- # have. This is a tension. It happens often enough we're
- # going to just account for it in devstack and assume it
- # doesn't work.
- #
- # Set OSLO_SERVICE_WORKS=True to skip this block
- if [[ -z "$OSLO_SERVICE_WORKS" ]]; then
- # TODO(danms): Remove this double-kill when we have
- # this fixed in all services:
- # https://bugs.launchpad.net/oslo-incubator/+bug/1446583
- sleep 1
- # /bin/true because pkill on a non existent process returns an error
- pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true
- fi
- rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
- fi
- if [[ "$USE_SCREEN" = "True" ]]; then
- # Clean up the screen window
- screen_stop_service $service
- fi
fi
}
-# Helper to get the status of each running service
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
-# service_check
+# use systemctl to check service status
function service_check {
local service
- local failures
- SCREEN_NAME=${SCREEN_NAME:-stack}
- SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-
- if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
- echo "No service status directory found"
- return
- fi
-
- # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME
- # make this -o errexit safe
- failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
-
- for service in $failures; do
- service=`basename $service`
- service=${service%.failure}
- echo "Error: Service $service is not running"
- done
-
- if [ -n "$failures" ]; then
- die $LINENO "More details about the above errors can be found with screen"
- fi
-}
-
-# Tail a log file in a screen if USE_SCREEN is true.
-# Uses globals ``USE_SCREEN``
-function tail_log {
- local name=$1
- local logfile=$2
-
- if [[ "$USE_SCREEN" = "True" ]]; then
- screen_process "$name" "sudo tail -f $logfile | sed -u 's/\\\\\\\\x1b/\o033/g'"
- fi
-}
-
-
-# Deprecated Functions
-# --------------------
-
-# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
-# fork. It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it(). The log filename is derived
-# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
-# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
-# _old_run_process service "command-line"
-function _old_run_process {
- local service=$1
- local command="$2"
-
- # Undo logging redirections and close the extra descriptors
- exec 1>&3
- exec 2>&3
- exec 3>&-
- exec 6>&-
-
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log
-
- # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
- export PYTHONUNBUFFERED=1
- fi
-
- exec /bin/bash -c "$command"
- die "$service exec failure: $command"
-}
-
-# old_run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command. This is meant to duplicate the semantics
-# of screen_it() without screen. PIDs are written to
-# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
-# old_run_process service "command-line"
-function old_run_process {
- local service=$1
- local command="$2"
-
- # Spawn the child process
- _old_run_process "$service" "$command" &
- echo $!
-}
-
-# Compatibility for existing start_XXXX() functions
-# Uses global ``USE_SCREEN``
-# screen_it service "command-line"
-function screen_it {
- if is_service_enabled $1; then
- # Append the service to the screen rc file
- screen_rc "$1" "$2"
-
- if [[ "$USE_SCREEN" = "True" ]]; then
- screen_process "$1" "$2"
- else
- # Spawn directly without screen
- old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+ for service in ${ENABLED_SERVICES//,/ }; do
+ # because some things got renamed like key => keystone
+ if $SYSTEMCTL is-enabled devstack@$service.service; then
+ # no-pager is needed because otherwise status dumps to a
+ # pager when in interactive mode, which will stop a manual
+ # devstack run.
+ $SYSTEMCTL status devstack@$service.service --no-pager
fi
- fi
+ done
}
-# Compatibility for existing stop_XXXX() functions
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# screen_stop service
-function screen_stop {
- # Clean up the screen window
- stop_process $1
-}
+function tail_log {
+ deprecated "With the removal of screen support, tail_log is deprecated and will be removed after Queens"
+}
# Plugin Functions
# =================
@@ -1887,7 +1567,7 @@
local name=$1
local url=$2
local branch=${3:-master}
- if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then
+ if is_plugin_enabled $name; then
die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}"
fi
DEVSTACK_PLUGINS+=",$name"
@@ -1896,6 +1576,19 @@
GITBRANCH[$name]=$branch
}
+# is_plugin_enabled <name>
+#
+# Check if the plugin was enabled, e.g. using enable_plugin
+#
+# ``name`` The name with which the plugin was enabled
+function is_plugin_enabled {
+ local name=$1
+ if [[ ",${DEVSTACK_PLUGINS}," =~ ",${name}," ]]; then
+ return 0
+ fi
+ return 1
+}
+
# fetch_plugins
#
# clones all plugins
@@ -2013,6 +1706,35 @@
fi
}
+# define_plugin <name>
+#
+# This function is a no-op. It allows a plugin to define its name So
+# that other plugins may reference it by name. It should generally be
+# the last component of the canonical git repo name. E.g.,
+# openstack/devstack-foo should use "devstack-foo" as the name here.
+#
+# This function is currently a noop, but the value may still be used
+# by external tools (as in plugin_requires) and may be used by
+# devstack in the future.
+#
+# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
+function define_plugin {
+ :
+}
+
+# plugin_requires <name> <other>
+#
+# This function is a no-op. It is currently used by external tools
+# (such as the devstack module for Ansible) to automatically generate
+# local.conf files. It is not currently used by devstack itself to
+# resolve dependencies.
+#
+# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
+# ``other`` is the name of another plugin
+function plugin_requires {
+ :
+}
+
# Service Functions
# =================
@@ -2321,7 +2043,7 @@
# Check if this is a valid ipv4 address string
function is_ipv4_address {
local address=$1
- local regex='([0-9]{1,3}.){3}[0-9]{1,3}'
+ local regex='([0-9]{1,3}\.){3}[0-9]{1,3}'
# TODO(clarkb) make this more robust
if [[ "$address" =~ $regex ]] ; then
return 0
@@ -2385,13 +2107,31 @@
}
+# Return just the <major>.<minor> for the given python interpreter
+function _get_python_version {
+ local interp=$1
+ local version
+ # disable erroring out here, otherwise if python 3 doesn't exist we fail hard.
+ if [[ -x $(which $interp 2> /dev/null) ]]; then
+ version=$($interp -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+ fi
+ echo ${version}
+}
+
# Return the current python as "python<major>.<minor>"
function python_version {
local python_version
- python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+ python_version=$(_get_python_version python2)
echo "python${python_version}"
}
+function python3_version {
+ local python3_version
+ python3_version=$(_get_python_version python3)
+ echo "python${python_version}"
+}
+
+
# Service wrapper to restart services
# restart_service service-name
function restart_service {
@@ -2564,12 +2304,7 @@
function cleanup_oscwrap {
local total=0
- if python3_enabled ; then
- local python=python3
- else
- local python=python
- fi
- total=$(cat $OSCWRAP_TIMER_FILE | $python -c "import sys; print(sum(int(l) for l in sys.stdin))")
+ total=$(cat $OSCWRAP_TIMER_FILE | $PYTHON -c "import sys; print(sum(int(l) for l in sys.stdin))")
_TIME_TOTAL["osc"]=$total
rm $OSCWRAP_TIMER_FILE
}
@@ -2579,11 +2314,13 @@
function time_totals {
local elapsed_time
local end_time
- local len=15
+ local len=20
local xtrace
+ local unaccounted_time
end_time=$(date +%s)
elapsed_time=$(($end_time - $_TIME_BEGIN))
+ unaccounted_time=$elapsed_time
# pad 1st column this far
for t in ${!_TIME_TOTAL[*]}; do
@@ -2600,16 +2337,19 @@
echo
echo "========================="
echo "DevStack Component Timing"
+ echo " (times are in seconds) "
echo "========================="
- printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time"
- echo
for t in ${!_TIME_TOTAL[*]}; do
local v=${_TIME_TOTAL[$t]}
# because we're recording in milliseconds
v=$(($v / 1000))
printf "%-${len}s %3d\n" "$t" "$v"
+ unaccounted_time=$(($unaccounted_time - $v))
done
+ echo "-------------------------"
+ printf "%-${len}s %3d\n" "Unaccounted time" "$unaccounted_time"
echo "========================="
+ printf "%-${len}s %3d\n" "Total runtime" "$elapsed_time"
$xtrace
}
diff --git a/inc/python b/inc/python
index 718cbb2..ec4233b 100644
--- a/inc/python
+++ b/inc/python
@@ -49,7 +49,11 @@
fi
$xtrace
- if is_fedora || is_suse; then
+ if python3_enabled && [ "$os_VENDOR" = "Fedora" -a $os_RELEASE -gt 26 ]; then
+ # Default Python 3 install prefix changed to /usr/local in Fedora 27:
+ # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe
+ echo "/usr/local/bin"
+ elif is_fedora || is_suse; then
echo "/usr/bin"
else
echo "/usr/local/bin"
@@ -219,7 +223,8 @@
# Wrapper for ``pip install`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
-# pip_install package [package ...]
+# Usage:
+# pip_install pip_arguments
function pip_install {
local xtrace result
xtrace=$(set +o | grep xtrace)
@@ -241,6 +246,26 @@
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
+
+ # Try to extract the path of the package we are installing into
+ # package_dir. We need this to check for test-requirements.txt,
+ # at least.
+ #
+ # ${!#} expands to the last positional argument to this function.
+ # With "extras" syntax included, our arguments might be something
+ # like:
+ # -e /path/to/fooproject[extra]
+ # Thus this magic line grabs just the path without extras
+ #
+ # Note that this makes no sense if this is a pypi (rather than
+ # local path) install; ergo you must check this path exists before
+ # use. Also, if we had multiple or mixed installs, we would also
+ # likely break. But for historical reasons, it's basically only
+ # the other wrapper functions in here calling this to install
+ # local packages, and they do so with single call per install. So
+ # this works (for now...)
+ local package_dir=${!#%\[*\]}
+
if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
# TRACK_DEPENDS=True installation creates a circular dependency when
# we attempt to install virtualenv into a virtualenv, so we must global
@@ -261,7 +286,6 @@
# versions supported, and if we find the version of
# python3 we've been told to use, use that instead of the
# default pip
- local package_dir=${!#}
local python_versions
# Special case some services that have experimental
@@ -313,7 +337,7 @@
# packages like setuptools?
local pip_version
pip_version=$(python -c "import pip; \
- print(pip.__version__.strip('.')[0])")
+ print(pip.__version__.split('.')[0])")
if (( pip_version<6 )); then
die $LINENO "Currently installed pip version ${pip_version} does not" \
"meet minimum requirements (>=6)."
@@ -323,7 +347,7 @@
# Also install test requirements
local install_test_reqs=""
- local test_req="${!#}/test-requirements.txt"
+ local test_req="${package_dir}/test-requirements.txt"
if [[ -e "$test_req" ]]; then
install_test_reqs="-r $test_req"
fi
@@ -346,6 +370,9 @@
}
function pip_uninstall {
+ # Skip uninstall if offline
+ [[ "${OFFLINE}" = "True" ]] && return
+
local name=$1
if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
@@ -383,7 +410,23 @@
# determine if a package was installed from git
function lib_installed_from_git {
local name=$1
- pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git'
+ local safe_name
+ safe_name=$(python -c "from pkg_resources import safe_name; \
+ print(safe_name('${name}'))")
+ # Note "pip freeze" doesn't always work here, because it tries to
+ # be smart about finding the remote of the git repo the package
+ # was installed from. This doesn't work with zuul which clones
+ # repos with no remote.
+ #
+ # The best option seems to be to use "pip list" which will tell
+ # you the path an editable install was installed from; for example
+ # in response to something like
+ # pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate'
+ # pip list --format columns shows
+ # bashate 0.5.2.dev19 /tmp/env/src/bashate
+ # Thus we check the third column to see if we're installed from
+ # some local place.
+ [[ -n $(pip list --format=columns 2>/dev/null | awk "/^$safe_name/ {print \$3}") ]]
}
# check that everything that's in LIBS_FROM_GIT was actually installed
@@ -441,7 +484,7 @@
# project_dir: directory of project repo (e.g., /opt/stack/keystone)
# extras: comma-separated list of optional dependencies to install
# (e.g., ldap,memcache).
-# See http://docs.openstack.org/developer/pbr/#extra-requirements
+# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install <project_dir>[<extras>]"
function setup_install {
local project_dir=$1
@@ -455,7 +498,7 @@
# project_dir: directory of project repo (e.g., /opt/stack/keystone)
# extras: comma-separated list of optional dependencies to install
# (e.g., ldap,memcache).
-# See http://docs.openstack.org/developer/pbr/#extra-requirements
+# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install -e <project_dir>[<extras>]"
function setup_develop {
local project_dir=$1
@@ -487,7 +530,7 @@
# flags: pip CLI options/flags
# extras: comma-separated list of optional dependencies to install
# (e.g., ldap,memcache).
-# See http://docs.openstack.org/developer/pbr/#extra-requirements
+# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install <flags> <project_dir>[<extras>]"
function _setup_package_with_constraints_edit {
local project_dir=$1
@@ -523,7 +566,7 @@
# flags: pip CLI options/flags
# extras: comma-separated list of optional dependencies to install
# (e.g., ldap,memcache).
-# See http://docs.openstack.org/developer/pbr/#extra-requirements
+# See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
# The command is like "pip install <flags> <project_dir>[<extras>]"
function setup_package {
local project_dir=$1
diff --git a/lib/apache b/lib/apache
index c1b6bf8..84cec73 100644
--- a/lib/apache
+++ b/lib/apache
@@ -132,6 +132,11 @@
elif is_fedora; then
sudo rm -f /etc/httpd/conf.d/000-*
install_package httpd mod_wsgi
+ # For consistency with Ubuntu, switch to the worker mpm, as
+ # the default is event
+ sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
+ sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
+ sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf
elif is_suse; then
install_package apache2 apache2-mod_wsgi
else
@@ -250,24 +255,27 @@
# always cleanup given that we are using iniset here
rm -rf $file
iniset "$file" uwsgi wsgi-file "$wsgi"
- iniset "$file" uwsgi socket "$socket"
iniset "$file" uwsgi processes $API_WORKERS
# This is running standalone
iniset "$file" uwsgi master true
# Set die-on-term & exit-on-reload so that uwsgi shuts down
iniset "$file" uwsgi die-on-term true
- iniset "$file" uwsgi exit-on-reload true
+ iniset "$file" uwsgi exit-on-reload false
+ # Set worker-reload-mercy so that worker will not exit till the time
+ # configured after graceful shutdown
+ iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
iniset "$file" uwsgi enable-threads true
iniset "$file" uwsgi plugins python
# uwsgi recommends this to prevent thundering herd on accept.
iniset "$file" uwsgi thunder-lock true
+ # Set hook to trigger graceful shutdown on SIGTERM
+ iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
# Override the default size for headers from the 4k default.
iniset "$file" uwsgi buffer-size 65535
# Make sure the client doesn't try to re-use the connection.
iniset "$file" uwsgi add-header "Connection: close"
# This ensures that file descriptors aren't shared between processes.
iniset "$file" uwsgi lazy-apps true
- iniset "$file" uwsgi chmod-socket 666
# If we said bind directly to http, then do that and don't start the apache proxy
if [[ -n "$http" ]]; then
@@ -275,12 +283,74 @@
else
local apache_conf=""
apache_conf=$(apache_site_config_for $name)
- echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee $apache_conf
+ iniset "$file" uwsgi socket "$socket"
+ iniset "$file" uwsgi chmod-socket 666
+ echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}/\" retry=0 " | sudo tee -a $apache_conf
enable_apache_site $name
restart_apache_server
fi
}
+# For services using chunked encoding, the only services known to use this
+# currently are Glance and Swift, we need to use an http proxy instead of
+# mod_proxy_uwsgi because the chunked encoding gets dropped. See:
+# https://github.com/unbit/uwsgi/issues/1540 You can workaround this on python2
+# but that involves having apache buffer the request before sending it to
+# uwsgi.
+function write_local_uwsgi_http_config {
+ local file=$1
+ local wsgi=$2
+ local url=$3
+ name=$(basename $wsgi)
+
+ # create a home for the sockets; note don't use /tmp -- apache has
+ # a private view of it on some platforms.
+
+ # always cleanup given that we are using iniset here
+ rm -rf $file
+ iniset "$file" uwsgi wsgi-file "$wsgi"
+ port=$(get_random_port)
+ iniset "$file" uwsgi http-socket "127.0.0.1:$port"
+ iniset "$file" uwsgi processes $API_WORKERS
+ # This is running standalone
+ iniset "$file" uwsgi master true
+ # Set die-on-term & exit-on-reload so that uwsgi shuts down
+ iniset "$file" uwsgi die-on-term true
+ iniset "$file" uwsgi exit-on-reload false
+ iniset "$file" uwsgi enable-threads true
+ iniset "$file" uwsgi plugins python
+ # uwsgi recommends this to prevent thundering herd on accept.
+ iniset "$file" uwsgi thunder-lock true
+ # Set hook to trigger graceful shutdown on SIGTERM
+ iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+ # Set worker-reload-mercy so that worker will not exit till the time
+ # configured after graceful shutdown
+ iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+ # Override the default size for headers from the 4k default.
+ iniset "$file" uwsgi buffer-size 65535
+ # Make sure the client doesn't try to re-use the connection.
+ iniset "$file" uwsgi add-header "Connection: close"
+ # This ensures that file descriptors aren't shared between processes.
+ iniset "$file" uwsgi lazy-apps true
+ iniset "$file" uwsgi chmod-socket 666
+ iniset "$file" uwsgi http-raw-body true
+ iniset "$file" uwsgi http-chunked-input true
+ iniset "$file" uwsgi http-auto-chunked true
+ iniset "$file" uwsgi http-keepalive false
+ # Increase socket timeout for slow chunked uploads
+ iniset "$file" uwsgi socket-timeout 30
+
+ enable_apache_mod proxy
+ enable_apache_mod proxy_http
+ local apache_conf=""
+ apache_conf=$(apache_site_config_for $name)
+ echo "KeepAlive Off" | sudo tee $apache_conf
+ echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
+ echo "ProxyPass \"${url}\" \"http://127.0.0.1:$port\" retry=0 " | sudo tee -a $apache_conf
+ enable_apache_site $name
+ restart_apache_server
+}
+
function remove_uwsgi_config {
local file=$1
local wsgi=$2
diff --git a/lib/cinder b/lib/cinder
index 2068812..3a8097f 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -55,6 +55,8 @@
CINDER_CONF_DIR=/etc/cinder
CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
+CINDER_UWSGI=$CINDER_BIN_DIR/cinder-wsgi
+CINDER_UWSGI_CONF=$CINDER_CONF_DIR/cinder-api-uwsgi.ini
CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
# Public facing bits
@@ -68,12 +70,11 @@
CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
# What type of LVM device should Cinder use for LVM backend
-# Defaults to default, which is thick, the other valid choice
-# is thin, which as the name implies utilizes lvm thin provisioning.
-# Thinly provisioned LVM volumes may be more efficient when using the Cinder
-# image cache, but there are also known race failures with volume snapshots
-# and thinly provisioned LVM volumes, see bug 1642111 for details.
-CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
+# Defaults to auto, which will do thin provisioning if it's a fresh
+# volume group, otherwise it will do thick. The other valid choices are
+# default, which is thick, or thin, which as the name implies utilizes lvm
+# thin provisioning.
+CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto}
# Default backends
# The backend format is type:name where type is one of the supported backend
@@ -95,10 +96,20 @@
# https://bugs.launchpad.net/cinder/+bug/1180976
CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
-CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
+# Centos7 switched to using LIO and that's all that's supported,
+# although the tgt bits are in EPEL we don't want that for CI
+if is_fedora; then
+ CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+ if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
+ die "lioadm is the only valid Cinder target_helper config on this platform"
+ fi
+else
+ CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
+fi
-# Toggle for deploying Cinder under HTTPD + mod_wsgi
-CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False}
+# Toggle for deploying Cinder under a wsgi server. Legacy mod_wsgi
+# reference should be cleaned up to more accurately refer to uwsgi.
+CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-True}
# Source the enabled backends
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -187,46 +198,14 @@
done
fi
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- _cinder_cleanup_apache_wsgi
- fi
-}
-
-# _cinder_config_apache_wsgi() - Set WSGI config files
-function _cinder_config_apache_wsgi {
- local cinder_apache_conf
- cinder_apache_conf=$(apache_site_config_for osapi-volume)
- local cinder_ssl=""
- local cinder_certfile=""
- local cinder_keyfile=""
- local cinder_api_port=$CINDER_SERVICE_PORT
- local venv_path=""
-
- if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages"
- fi
-
- # copy proxy vhost file
- sudo cp $FILES/apache-cinder-api.template $cinder_apache_conf
- sudo sed -e "
- s|%PUBLICPORT%|$cinder_api_port|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%APIWORKERS%|$API_WORKERS|g
- s|%CINDER_BIN_DIR%|$CINDER_BIN_DIR|g;
- s|%SSLENGINE%|$cinder_ssl|g;
- s|%SSLCERTFILE%|$cinder_certfile|g;
- s|%SSLKEYFILE%|$cinder_keyfile|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- " -i $cinder_apache_conf
+ stop_process "c-api"
+ remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
}
# configure_cinder() - Set config files, create data dirs, etc
function configure_cinder {
sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR
- cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
-
rm -f $CINDER_CONF
configure_rootwrap cinder
@@ -248,20 +227,9 @@
configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
- # Change the default nova_catalog_info and nova_catalog_admin_info values in
- # cinder so that the service name cinder is searching for matches that set for
- # nova in keystone.
- if [[ -n "$CINDER_NOVA_CATALOG_INFO" ]]; then
- iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO
- fi
- if [[ -n "$CINDER_NOVA_CATALOG_ADMIN_INFO" ]]; then
- iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO
- fi
-
- iniset $CINDER_CONF DEFAULT auth_strategy keystone
iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER"
iniset $CINDER_CONF database connection `database_connection_url cinder`
iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
@@ -272,9 +240,8 @@
iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
- iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
-
- iniset $CINDER_CONF key_manager api_class cinder.keymgr.conf_key_mgr.ConfKeyManager
+ iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
+ iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
local enabled_backends=""
@@ -310,10 +277,17 @@
fi
if is_service_enabled tls-proxy; then
- # Set the service port for a proxy to take the original
- iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
- iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
- iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
+ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
+ # Set the service port for a proxy to take the original
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
+ iniset $CINDER_CONF oslo_middleware enable_proxy_headers_parsing True
+ else
+ iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
+ iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
+ iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
+ fi
+ fi
fi
if [ "$SYSLOG" != "False" ]; then
@@ -325,9 +299,7 @@
# Format logging
setup_logging $CINDER_CONF $CINDER_USE_MOD_WSGI
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- _cinder_config_apache_wsgi
- fi
+ write_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI" "/volume"
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
@@ -335,7 +307,7 @@
iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS"
- iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
+ iniset $CINDER_CONF DEFAULT glance_api_servers "$GLANCE_URL"
if is_service_enabled tls-proxy; then
iniset $CINDER_CONF DEFAULT glance_protocol https
iniset $CINDER_CONF DEFAULT glance_ca_certificates_file $SSL_BUNDLE_FILE
@@ -345,16 +317,15 @@
iniset $CINDER_CONF DEFAULT glance_api_version 2
fi
- # Set os_privileged_user credentials (used for os-assisted-snapshots)
- iniset $CINDER_CONF DEFAULT os_privileged_user_name nova
- iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
- iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME"
+ # Set nova credentials (used for os-assisted-snapshots)
+ configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova
+ iniset $CINDER_CONF nova region_name "$REGION_NAME"
iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL"
elif is_service_enabled etcd3; then
- iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:2379"
+ iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
fi
}
@@ -366,29 +337,59 @@
# Migrated from keystone_data.sh
function create_cinder_accounts {
-
# Cinder
if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
create_service_user "cinder"
+ # block-storage is the official service type
+ get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
get_or_create_service "cinder" "volume" "Cinder Volume Service"
- get_or_create_endpoint \
- "volume" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s"
+ if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
+ get_or_create_endpoint \
+ "block-storage" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/"
- get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
- get_or_create_endpoint \
- "volumev2" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s"
+ get_or_create_endpoint \
+ "volume" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s"
- get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
- get_or_create_endpoint \
- "volumev3" \
- "$REGION_NAME" \
- "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
+ get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
+ get_or_create_endpoint \
+ "volumev2" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(project_id)s"
+
+ get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
+ get_or_create_endpoint \
+ "volumev3" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
+ else
+ get_or_create_endpoint \
+ "block-storage" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/"
+
+ get_or_create_endpoint \
+ "volume" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s"
+
+ get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
+ get_or_create_endpoint \
+ "volumev2" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v2/\$(project_id)s"
+
+ get_or_create_service "cinderv3" "volumev3" "Cinder Volume Service V3"
+ get_or_create_endpoint \
+ "volumev3" \
+ "$REGION_NAME" \
+ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
+ fi
configure_cinder_internal_tenant
fi
@@ -436,16 +437,10 @@
function install_cinder {
git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
setup_develop $CINDER_DIR
- if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
- if is_fedora; then
- install_package scsi-target-utils
- else
- install_package tgt
- fi
- fi
-
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- install_apache_wsgi
+ if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
+ install_package tgt
+ elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
+ install_package targetcli
fi
}
@@ -473,11 +468,12 @@
fi
}
-# start_cinder() - Start running processes, including screen
+# start_cinder() - Start running processes
function start_cinder {
local service_port=$CINDER_SERVICE_PORT
local service_protocol=$CINDER_SERVICE_PROTOCOL
- if is_service_enabled tls-proxy; then
+ local cinder_url
+ if is_service_enabled tls-proxy && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
service_port=$CINDER_SERVICE_PORT_INT
service_protocol="http"
fi
@@ -501,24 +497,23 @@
fi
fi
- if is_service_enabled c-api ; then
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- enable_apache_site osapi-volume
- restart_apache_server
- tail_log c-api /var/log/$APACHE_NAME/c-api.log
- else
+ if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
+ if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
+ cinder_url=$service_protocol://$SERVICE_HOST:$service_port
+ # Start proxy if tls enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
+ fi
+ else
+ run_process "c-api" "$CINDER_BIN_DIR/uwsgi --procname-prefix cinder-api --ini $CINDER_UWSGI_CONF"
+ cinder_url=$service_protocol://$SERVICE_HOST/volume/v3
fi
+ fi
- echo "Waiting for Cinder API to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then
- die $LINENO "c-api did not start"
- fi
-
- # Start proxies if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
- fi
+ echo "Waiting for Cinder API to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $cinder_url; then
+ die $LINENO "c-api did not start"
fi
run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
@@ -532,18 +527,10 @@
# stop_cinder() - Stop running processes
function stop_cinder {
- if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
- disable_apache_site osapi-volume
- restart_apache_server
- else
- stop_process c-api
- fi
-
- # Kill the cinder screen windows
- local serv
- for serv in c-bak c-sch c-vol; do
- stop_process $serv
- done
+ stop_process c-api
+ stop_process c-bak
+ stop_process c-sch
+ stop_process c-vol
}
# create_volume_types() - Create Cinder's configured volume types
@@ -553,7 +540,17 @@
local be be_name
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_name=${be##*:}
- openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name}
+ # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode
+ if is_service_enabled keystone; then
+ openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name}
+ else
+ # TODO (e0ne): use openstack client once it will support cinder in noauth mode:
+ # https://bugs.launchpad.net/python-cinderclient/+bug/1755279
+ local cinder_url
+ cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3
+ OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name}
+ OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name}
+ fi
done
fi
}
diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate
index 6b1f848..3ffd9a6 100644
--- a/lib/cinder_backends/fake_gate
+++ b/lib/cinder_backends/fake_gate
@@ -50,7 +50,7 @@
iniset $CINDER_CONF $be_name volume_backend_name $be_name
iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver"
iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
- iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 03e1880..497081c 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -50,7 +50,7 @@
iniset $CINDER_CONF $be_name volume_backend_name $be_name
iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
- iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER"
+ iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
}
diff --git a/lib/databases/mysql b/lib/databases/mysql
index a0cf7a4..0089663 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -15,10 +15,9 @@
register_database mysql
-# Linux distros, thank you for being incredibly consistent
-MYSQL=mysql
+MYSQL_SERVICE_NAME=mysql
if is_fedora && ! is_oraclelinux; then
- MYSQL=mariadb
+ MYSQL_SERVICE_NAME=mariadb
fi
# Functions
@@ -34,17 +33,17 @@
# Get rid of everything enough to cleanly change database backends
function cleanup_database_mysql {
- stop_service $MYSQL
+ stop_service $MYSQL_SERVICE_NAME
if is_ubuntu; then
# Get ruthless with mysql
apt_get purge -y mysql* mariadb*
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
return
- elif is_suse || is_oraclelinux; then
+ elif is_oraclelinux; then
uninstall_package mysql-community-server
sudo rm -rf /var/lib/mysql
- elif is_fedora; then
+ elif is_suse || is_fedora; then
uninstall_package mariadb-server
sudo rm -rf /var/lib/mysql
else
@@ -64,12 +63,9 @@
if is_ubuntu; then
my_conf=/etc/mysql/my.cnf
- mysql=mysql
elif is_suse || is_oraclelinux; then
my_conf=/etc/my.cnf
- mysql=mysql
elif is_fedora; then
- mysql=mariadb
my_conf=/etc/my.cnf
local cracklib_conf=/etc/my.cnf.d/cracklib_password_check.cnf
if [ -f "$cracklib_conf" ]; then
@@ -82,7 +78,7 @@
# Start mysql-server
if is_fedora || is_suse; then
# service is not started by default
- start_service $mysql
+ start_service $MYSQL_SERVICE_NAME
fi
# Set the root password - only works the first time. For Ubuntu, we already
@@ -124,7 +120,7 @@
iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
fi
- restart_service $mysql
+ restart_service $MYSQL_SERVICE_NAME
}
function install_database_mysql {
@@ -151,13 +147,11 @@
chmod 0600 $HOME/.my.cnf
fi
# Install mysql-server
- if is_suse || is_oraclelinux; then
- if ! is_package_installed mariadb; then
- install_package mysql-community-server
- fi
- elif is_fedora; then
+ if is_oraclelinux; then
+ install_package mysql-community-server
+ elif is_fedora || is_suse; then
install_package mariadb-server
- sudo systemctl enable mariadb
+ sudo systemctl enable $MYSQL_SERVICE_NAME
elif is_ubuntu; then
install_package mysql-server
else
diff --git a/lib/dstat b/lib/dstat
index 982b703..fe38d75 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -16,7 +16,7 @@
_XTRACE_DSTAT=$(set +o | grep xtrace)
set +o xtrace
-# start_dstat() - Start running processes, including screen
+# start_dstat() - Start running processes
function start_dstat {
# A better kind of sysstat, with the top process per time slice
run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR"
diff --git a/lib/etcd3 b/lib/etcd3
index ea58403..26d07fd 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -24,16 +24,9 @@
# --------
# Set up default values for etcd
-ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
-ETCD_VERSION=${ETCD_VERSION:-v3.1.7}
-ETCD_DATA_DIR="$DEST/data/etcd"
+ETCD_DATA_DIR="$DATA_DIR/etcd"
ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
ETCD_BIN_DIR="$DEST/bin"
-ETCD_SHA256_AMD64="4fde194bbcd259401e2b5c462dfa579ee7f6af539f13f130b8f5b4f52e3b3c52"
-# NOTE(sdague): etcd v3.1.7 doesn't have anything for these architectures, though 3.2.0 does.
-ETCD_SHA256_ARM64=""
-ETCD_SHA256_PPC64=""
-ETCD_PORT=2379
if is_ubuntu ; then
UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1`
@@ -44,11 +37,15 @@
local cmd="$ETCD_BIN_DIR/etcd"
cmd+=" --name $HOSTNAME --data-dir $ETCD_DATA_DIR"
cmd+=" --initial-cluster-state new --initial-cluster-token etcd-cluster-01"
- cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:2380"
- cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:2380"
- cmd+=" --advertise-client-urls http://${HOST_IP}:$ETCD_PORT"
- cmd+=" --listen-peer-urls http://0.0.0.0:2380 "
- cmd+=" --listen-client-urls http://${HOST_IP}:$ETCD_PORT"
+ cmd+=" --initial-cluster $HOSTNAME=http://$SERVICE_HOST:$ETCD_PEER_PORT"
+ cmd+=" --initial-advertise-peer-urls http://$SERVICE_HOST:$ETCD_PEER_PORT"
+ cmd+=" --advertise-client-urls http://$SERVICE_HOST:$ETCD_PORT"
+ if [ "$SERVICE_LISTEN_ADDRESS" == "::" ]; then
+ cmd+=" --listen-peer-urls http://[::]:$ETCD_PEER_PORT "
+ else
+ cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT "
+ fi
+ cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root"
@@ -57,6 +54,9 @@
iniset -sudo $unitfile "Service" "Type" "notify"
iniset -sudo $unitfile "Service" "Restart" "on-failure"
iniset -sudo $unitfile "Service" "LimitNOFILE" "65536"
+ if is_arch "aarch64"; then
+ iniset -sudo $unitfile "Service" "Environment" "ETCD_UNSUPPORTED_ARCH=arm64"
+ fi
$SYSTEMCTL daemon-reload
$SYSTEMCTL enable $ETCD_SYSTEMD_SERVICE
@@ -92,39 +92,25 @@
function install_etcd3 {
echo "Installing etcd"
- # Make sure etcd3 downloads the correct architecture
- if is_arch "x86_64"; then
- ETCD_ARCH="amd64"
- ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64}
- elif is_arch "aarch64"; then
- ETCD_ARCH="arm64"
- ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64}
- elif is_arch "ppc64le"; then
- ETCD_ARCH="ppc64le"
- ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64}
- else
- exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
- fi
-
- ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
-
# Create the necessary directories
sudo mkdir -p $ETCD_BIN_DIR
sudo mkdir -p $ETCD_DATA_DIR
# Download and cache the etcd tgz for subsequent use
+ local etcd_file
+ etcd_file="$(get_extra_file $ETCD_DOWNLOAD_LOCATION)"
if [ ! -f "$FILES/etcd-$ETCD_VERSION-linux-$ETCD_ARCH/etcd" ]; then
- ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
- wget $ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE -O $FILES/$ETCD_DOWNLOAD_FILE
- echo "${ETCD_SHA256} $FILES/${ETCD_DOWNLOAD_FILE}" > $FILES/etcd.sha256sum
- # NOTE(sdague): this should go fatal if this fails
- sha256sum -c $FILES/etcd.sha256sum
+ echo "${ETCD_SHA256} $etcd_file" > $FILES/etcd.sha256sum
+ # NOTE(yuanke wei): rm the damaged file when checksum fails
+ sha256sum -c $FILES/etcd.sha256sum || (sudo rm -f $etcd_file; exit 1)
- tar xzvf $FILES/$ETCD_DOWNLOAD_FILE -C $FILES
+ tar xzvf $etcd_file -C $FILES
sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd
+ sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl
fi
if [ ! -f "$ETCD_BIN_DIR/etcd" ]; then
sudo cp $FILES/$ETCD_NAME/etcd $ETCD_BIN_DIR/etcd
+ sudo cp $FILES/$ETCD_NAME/etcdctl $ETCD_BIN_DIR/etcdctl
fi
}
diff --git a/lib/glance b/lib/glance
index baf8c61..95d2450 100644
--- a/lib/glance
+++ b/lib/glance
@@ -56,6 +56,7 @@
GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
+GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf
GLANCE_V1_ENABLED=${GLANCE_V1_ENABLED:-False}
if is_service_enabled tls-proxy; then
@@ -71,6 +72,16 @@
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
+GLANCE_UWSGI=$GLANCE_BIN_DIR/glance-wsgi-api
+GLANCE_UWSGI_CONF=$GLANCE_CONF_DIR/glance-uwsgi.ini
+# If wsgi mode is uwsgi run glance under uwsgi, else default to eventlet
+# TODO(mtreinish): Remove the eventlet path here and in all the similar
+# conditionals below after the Pike release
+if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+ GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_SERVICE_HOST/image"
+else
+ GLANCE_URL="$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
+fi
# Functions
# ---------
@@ -95,6 +106,11 @@
function configure_glance {
sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
+ # We run this here as this configures cache dirs for the auth middleware
+ # which is used in the api server and not in the registry. The api
+ # Server is configured through this function and not init_glance.
+ create_glance_cache_dir
+
# Copy over our glance configurations and update them
cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -104,16 +120,13 @@
dburl=`database_connection_url glance`
iniset $GLANCE_REGISTRY_CONF database connection $dburl
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
- iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2
iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
- cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
inicomment $GLANCE_API_CONF DEFAULT log_file
iniset $GLANCE_API_CONF database connection $dburl
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
@@ -141,8 +154,6 @@
iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST
- iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
-
# CORS feature support - to allow calls from Horizon by default
if [ -n "$GLANCE_CORS_ALLOWED_ORIGIN" ]; then
iniset $GLANCE_API_CONF cors allowed_origin "$GLANCE_CORS_ALLOWED_ORIGIN"
@@ -181,9 +192,12 @@
inicomment $GLANCE_API_CONF glance_store swift_store_auth_address
fi
+ # We need to tell glance what it's public endpoint is so that the version
+ # discovery document will be correct
+ iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_URL
+
if is_service_enabled tls-proxy; then
iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
- iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT
iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
@@ -199,7 +213,6 @@
setup_logging $GLANCE_REGISTRY_CONF
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
-
cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF
@@ -220,6 +233,11 @@
# Store specific confs
iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+ # Set default configuration options for the glance-image-import
+ iniset $GLANCE_IMAGE_IMPORT_CONF image_import_opts image_import_plugins []
+ iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin
+ iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject
+
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
@@ -232,6 +250,13 @@
iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
fi
+
+ if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+ write_local_uwsgi_http_config "$GLANCE_UWSGI_CONF" "$GLANCE_UWSGI" "/image"
+ else
+ iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
+ iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+ fi
}
# create_glance_accounts() - Set up common required glance accounts
@@ -256,7 +281,7 @@
get_or_create_endpoint \
"image" \
"$REGION_NAME" \
- "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
+ "$GLANCE_URL"
# Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999
service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
@@ -265,7 +290,7 @@
fi
}
-# create_glance_cache_dir() - Part of the init_glance() process
+# create_glance_cache_dir() - Part of the configure_glance() process
function create_glance_cache_dir {
# Create cache dir
sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact
@@ -292,8 +317,6 @@
# Load metadata definitions
$GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
time_stop "dbsync"
-
- create_glance_cache_dir
}
# install_glanceclient() - Collect source and prepare
@@ -319,26 +342,31 @@
setup_develop $GLANCE_DIR
}
-# start_glance() - Start running processes, including screen
+# start_glance() - Start running processes
function start_glance {
local service_protocol=$GLANCE_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
- start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
+ if [[ "$WSGI_MODE" != "uwsgi" ]]; then
+ start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
+ fi
start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
fi
run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
- run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+ if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+ run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
+ else
+ run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+ fi
- echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then
+ echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_URL; then
die $LINENO "g-api did not start"
fi
}
# stop_glance() - Stop running processes
function stop_glance {
- # Kill the Glance screen windows
stop_process g-api
stop_process g-reg
}
diff --git a/lib/horizon b/lib/horizon
index 9c7ec00..fab41bb 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -26,9 +26,6 @@
# Defaults
# --------
-# Set up default directories
-GITDIR["django_openstack_auth"]=$DEST/django_openstack_auth
-
HORIZON_DIR=$DEST/horizon
# local_settings.py is used to customize Dashboard settings.
@@ -106,6 +103,10 @@
_horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
fi
+ if is_service_enabled ldap; then
+ _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True"
+ fi
+
# Create an empty directory that apache uses as docroot
sudo mkdir -p $HORIZON_DIR/.blackhole
@@ -155,20 +156,6 @@
}
-# install_django_openstack_auth() - Collect source and prepare
-function install_django_openstack_auth {
- if use_library_from_git "django_openstack_auth"; then
- local dir=${GITDIR["django_openstack_auth"]}
- git_clone_by_name "django_openstack_auth"
- # Compile message catalogs before installation
- _prepare_message_catalog_compilation
- (cd $dir; $PYTHON setup.py compile_catalog)
- setup_dev_lib "django_openstack_auth"
- fi
- # if we aren't using this library from git, then we just let it
- # get dragged in by the horizon setup.
-}
-
# install_horizon() - Collect source and prepare
function install_horizon {
# Apache installation, because we mark it NOPRIME
@@ -177,24 +164,16 @@
git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH
}
-# start_horizon() - Start running processes, including screen
+# start_horizon() - Start running processes
function start_horizon {
restart_apache_server
- tail_log horizon /var/log/$APACHE_NAME/horizon_error.log
}
-# stop_horizon() - Stop running processes (non-screen)
+# stop_horizon() - Stop running processes
function stop_horizon {
stop_apache_server
}
-# NOTE: It can be moved to common functions, but it is only used by compilation
-# of django_openstack_auth catalogs at the moment.
-function _prepare_message_catalog_compilation {
- pip_install_gr Babel
-}
-
-
# Restore xtrace
$_XTRACE_HORIZON
diff --git a/lib/keystone b/lib/keystone
index eb46526..714f089 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -148,16 +148,18 @@
# cleanup_keystone() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_keystone {
- # TODO: remove admin at pike-2
- # These files will be created if we are running WSGI_MODE="uwsgi"
- remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
- sudo rm -f $(apache_site_config_for keystone-wsgi-public)
- sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
-
- # These files will be created if we are running WSGI_MODE="mod_wsgi"
- disable_apache_site keystone
- sudo rm -f $(apache_site_config_for keystone)
+ if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
+ # These files will be created if we are running WSGI_MODE="mod_wsgi"
+ disable_apache_site keystone
+ sudo rm -f $(apache_site_config_for keystone)
+ else
+ stop_process "keystone"
+ # TODO: remove admin at pike-2
+ remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
+ remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
+ sudo rm -f $(apache_site_config_for keystone-wsgi-public)
+ sudo rm -f $(apache_site_config_for keystone-wsgi-admin)
+ fi
}
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
@@ -219,17 +221,10 @@
fi
# Rewrite stock ``keystone.conf``
-
if is_service_enabled ldap; then
- #Set all needed ldap values
- iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
- iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
- iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
- iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
- iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
- iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
+ iniset $KEYSTONE_CONF identity domain_config_dir "$KEYSTONE_CONF_DIR/domains"
+ iniset $KEYSTONE_CONF identity domain_specific_drivers_enabled "True"
fi
-
iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND"
iniset $KEYSTONE_CONF identity password_hash_rounds $KEYSTONE_PASSWORD_HASH_ROUNDS
iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND"
@@ -357,7 +352,7 @@
# The Member role is used by Horizon and Swift so we need to keep it:
local member_role="member"
- # Captial Member role is legacy hard coded in Horizon / Swift
+ # Capital Member role is legacy hard coded in Horizon / Swift
# configs. Keep it around.
get_or_create_role "Member"
@@ -410,6 +405,10 @@
get_or_add_group_project_role $member_role $non_admin_group $alt_demo_project
get_or_add_group_project_role $another_role $non_admin_group $alt_demo_project
get_or_add_group_project_role $admin_role $admin_group $admin_project
+
+ if is_service_enabled ldap; then
+ create_ldap_domain
+ fi
}
# Create a user that is capable of verifying keystone tokens for use with auth_token middleware.
@@ -451,7 +450,7 @@
iniset $conf_file $section cafile $SSL_BUNDLE_FILE
iniset $conf_file $section signing_dir $signing_dir
- iniset $conf_file $section memcached_servers $SERVICE_HOST:11211
+ iniset $conf_file $section memcached_servers localhost:11211
}
# init_keystone() - Initialize databases, etc.
@@ -537,7 +536,7 @@
fi
}
-# start_keystone() - Start running processes, including screen
+# start_keystone() - Start running processes
function start_keystone {
# Get right service port for testing
local service_port=$KEYSTONE_SERVICE_PORT
@@ -550,10 +549,8 @@
if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
enable_apache_site keystone
restart_apache_server
- tail_log key /var/log/$APACHE_NAME/keystone.log
- tail_log key-access /var/log/$APACHE_NAME/keystone_access.log
else # uwsgi
- run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
+ run_process keystone "$KEYSTONE_BIN_DIR/uwsgi --procname-prefix keystone --ini $KEYSTONE_PUBLIC_UWSGI_CONF" ""
fi
echo "Waiting for keystone to start..."
@@ -585,12 +582,7 @@
restart_apache_server
else
stop_process keystone
- remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
- # TODO(remove in at pike-2)
- remove_uwsgi_config "$KEYSTONE_ADMIN_UWSGI_CONF" "$KEYSTONE_ADMIN_UWSGI"
fi
- # Kill the Keystone screen window
- stop_process key
}
# bootstrap_keystone() - Initialize user, role and project
@@ -615,6 +607,57 @@
--bootstrap-public-url "$KEYSTONE_SERVICE_URI"
}
+# create_ldap_domain() - Create domain file and initialize domain with a user
+function create_ldap_domain {
+ # Creates domain Users
+ openstack --os-identity-api-version=3 domain create --description "LDAP domain" Users
+
+ # Create domain file inside etc/keystone/domains
+ KEYSTONE_LDAP_DOMAIN_FILE=$KEYSTONE_CONF_DIR/domains/keystone.Users.conf
+ mkdir -p "$KEYSTONE_CONF_DIR/domains"
+ touch "$KEYSTONE_LDAP_DOMAIN_FILE"
+
+ # Set identity driver 'ldap'
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE identity driver "ldap"
+
+ # LDAP settings for Users domain
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_objectclass "inetOrgPerson"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_name_attribute "cn"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_mail_attribute "mail"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user_id_attribute "uid"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap user "cn=Manager,dc=openstack,dc=org"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap url "ldap://localhost"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap suffix $LDAP_BASE_DN
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap password $LDAP_PASSWORD
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_tree_dn "ou=Groups,$LDAP_BASE_DN"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_objectclass "groupOfNames"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_name_attribute "cn"
+ iniset $KEYSTONE_LDAP_DOMAIN_FILE ldap group_id_attribute "cn"
+
+ # Restart apache and identity services to associate domain and conf file
+ sudo service apache2 reload
+ sudo systemctl restart devstack@keystone
+
+ # Create LDAP user.ldif and add user to LDAP backend
+ local tmp_ldap_dir
+ tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ _ldap_varsubst $FILES/ldap/user.ldif.in $slappass >$tmp_ldap_dir/user.ldif
+ sudo ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/user.ldif
+ rm -rf $tmp_ldap_dir
+
+ local admin_project
+ admin_project=$(get_or_create_project "admin" default)
+ local ldap_user
+ ldap_user=$(openstack user show --domain=Users demo -f value -c id)
+ local admin_role="admin"
+ get_or_create_role $admin_role
+
+ # Grant demo LDAP user access to project and role
+ get_or_add_user_project_role $admin_role $ldap_user $admin_project
+}
+
# Restore xtrace
$_XTRACE_KEYSTONE
diff --git a/lib/ldap b/lib/ldap
index 4cea812..5a53d0e 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -119,8 +119,7 @@
printf "installing OpenLDAP"
if is_ubuntu; then
- # Ubuntu automatically starts LDAP so no need to call start_ldap()
- :
+ configure_ldap
elif is_fedora; then
start_ldap
elif is_suse; then
@@ -148,6 +147,27 @@
rm -rf $tmp_ldap_dir
}
+# configure_ldap() - Configure LDAP - reconfigure slapd
+function configure_ldap {
+ sudo debconf-set-selections <<EOF
+ slapd slapd/internal/generated_adminpw password $LDAP_PASSWORD
+ slapd slapd/internal/adminpw password $LDAP_PASSWORD
+ slapd slapd/password2 password $LDAP_PASSWORD
+ slapd slapd/password1 password $LDAP_PASSWORD
+ slapd slapd/dump_database_destdir string /var/backups/slapd-VERSION
+ slapd slapd/domain string Users
+ slapd shared/organization string $LDAP_DOMAIN
+ slapd slapd/backend string HDB
+ slapd slapd/purge_database boolean true
+ slapd slapd/move_old_database boolean true
+ slapd slapd/allow_ldap_v2 boolean false
+ slapd slapd/no_configuration boolean false
+ slapd slapd/dump_database select when needed
+EOF
+ sudo apt-get install -y slapd ldap-utils
+ sudo dpkg-reconfigure -f noninteractive $LDAP_SERVICE_NAME
+}
+
# start_ldap() - Start LDAP
function start_ldap {
sudo service $LDAP_SERVICE_NAME restart
diff --git a/lib/libraries b/lib/libraries
index 4ceb804..b4f3c31 100644
--- a/lib/libraries
+++ b/lib/libraries
@@ -28,8 +28,10 @@
GITDIR["cursive"]=$DEST/cursive
GITDIR["debtcollector"]=$DEST/debtcollector
GITDIR["futurist"]=$DEST/futurist
+GITDIR["openstacksdk"]=$DEST/openstacksdk
GITDIR["os-client-config"]=$DEST/os-client-config
GITDIR["osc-lib"]=$DEST/osc-lib
+GITDIR["osc-placement"]=$DEST/osc-placement
GITDIR["oslo.cache"]=$DEST/oslo.cache
GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
GITDIR["oslo.config"]=$DEST/oslo.config
@@ -50,7 +52,6 @@
GITDIR["oslo.vmware"]=$DEST/oslo.vmware
GITDIR["osprofiler"]=$DEST/osprofiler
GITDIR["pycadf"]=$DEST/pycadf
-GITDIR["python-openstacksdk"]=$DEST/python-openstacksdk
GITDIR["stevedore"]=$DEST/stevedore
GITDIR["taskflow"]=$DEST/taskflow
GITDIR["tooz"]=$DEST/tooz
@@ -90,7 +91,9 @@
_install_lib_from_source "cursive"
_install_lib_from_source "debtcollector"
_install_lib_from_source "futurist"
+ _install_lib_from_source "openstacksdk"
_install_lib_from_source "osc-lib"
+ _install_lib_from_source "osc-placement"
_install_lib_from_source "os-client-config"
_install_lib_from_source "oslo.cache"
_install_lib_from_source "oslo.concurrency"
@@ -112,7 +115,6 @@
_install_lib_from_source "oslo.vmware"
_install_lib_from_source "osprofiler"
_install_lib_from_source "pycadf"
- _install_lib_from_source "python-openstacksdk"
_install_lib_from_source "stevedore"
_install_lib_from_source "taskflow"
_install_lib_from_source "tooz"
diff --git a/lib/lvm b/lib/lvm
index 0cebd92..f047181 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -35,7 +35,7 @@
# _clean_lvm_volume_group removes all default LVM volumes
#
-# Usage: clean_lvm_volume_group $vg
+# Usage: _clean_lvm_volume_group $vg
function _clean_lvm_volume_group {
local vg=$1
@@ -43,6 +43,16 @@
sudo lvremove -f $vg
}
+# _remove_lvm_volume_group removes the volume group
+#
+# Usage: _remove_lvm_volume_group $vg
+function _remove_lvm_volume_group {
+ local vg=$1
+
+ # Remove the volume group
+ sudo vgremove -f $vg
+}
+
# _clean_lvm_backing_file() removes the backing file of the
# volume group
#
@@ -69,6 +79,7 @@
local vg=$1
_clean_lvm_volume_group $vg
+ _remove_lvm_volume_group $vg
# if there is no logical volume left, it's safe to attempt a cleanup
# of the backing file
if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
diff --git a/lib/neutron b/lib/neutron
index 2a660ec..cef8d1f 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -32,6 +32,17 @@
NEUTRON_DIR=$DEST/neutron
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
+NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
+# Distributed Virtual Router (DVR) configuration
+# Can be:
+# - ``legacy`` - No DVR functionality
+# - ``dvr_snat`` - Controller or single node DVR
+# - ``dvr`` - Compute node in multi-node DVR
+# - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
+#
+# Default is 'dvr_snat' since it can handle both DVR and legacy routers
+NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat}
+
NEUTRON_BIN_DIR=$(get_python_exec_prefix)
NEUTRON_DHCP_BINARY="neutron-dhcp-agent"
@@ -42,6 +53,7 @@
NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/
+NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
@@ -72,7 +84,8 @@
NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone}
NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-NEUTRON_ROOTWRAP_DAEMON_CMD="sudo $NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
+NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
+NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
# This is needed because _neutron_ovs_base_configure_l3_agent will set
# external_network_bridge
@@ -125,6 +138,13 @@
done
}
+# configure_root_helper_options() - Configure agent rootwrap helper options
+function configure_root_helper_options {
+ local conffile=$1
+ iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD"
+ iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD"
+}
+
# configure_neutron() - Set config files, create data dirs, etc
function configure_neutron_new {
sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
@@ -165,6 +185,7 @@
iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
+ iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
@@ -173,7 +194,15 @@
# Configure VXLAN
# TODO(sc68cal) not hardcode?
iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
- iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
+
+ local mech_drivers="openvswitch"
+ if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+ mech_drivers+=",l2population"
+ else
+ mech_drivers+=",linuxbridge"
+ fi
+ iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
+
iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public
if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
@@ -185,6 +214,7 @@
if is_service_enabled neutron-agent; then
iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF
# Configure the neutron agent
if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
@@ -193,6 +223,11 @@
else
iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables_hybrid
iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+
+ if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+ iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
+ iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
+ fi
fi
if ! running_in_container; then
@@ -208,7 +243,7 @@
# make it so we have working DNS from guests
iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
- iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+ configure_root_helper_options $NEUTRON_DHCP_CONF
iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
fi
@@ -217,9 +252,20 @@
cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
neutron_service_plugin_class_add router
- iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+ configure_root_helper_options $NEUTRON_L3_CONF
iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
+
+ # Configure the neutron agent to serve external network ports
+ if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
+ iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
+ else
+ iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
+ fi
+
+ if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
+ iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE
+ fi
fi
# Metadata
@@ -227,9 +273,10 @@
cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
+ iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $SERVICE_HOST
iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
- iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
+ # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
+ configure_root_helper_options $NEUTRON_META_CONF
# TODO(dtroyer): remove the v2.0 hard code below
iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
@@ -242,6 +289,7 @@
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original
iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
+ iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
fi
# Metering
@@ -253,12 +301,6 @@
# configure_neutron_rootwrap() - configure Neutron's rootwrap
function configure_neutron_rootwrap {
- # Set the paths of certain binaries
- neutron_rootwrap=$(get_rootwrap_location neutron)
-
- # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
- local rootwrap_sudoer_cmd="${neutron_rootwrap} $NEUTRON_CONF_DIR/rootwrap.conf"
-
# Deploy new rootwrap filters files (owned by root).
# Wipe any existing rootwrap.d files first
if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then
@@ -275,7 +317,8 @@
# Set up the rootwrap sudoers for Neutron
tempfile=`mktemp`
- echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd *" >$tempfile
+ echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile
+ echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile
chmod 0440 $tempfile
sudo chown root:root $tempfile
sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap
@@ -293,7 +336,6 @@
iniset $NOVA_CONF neutron project_domain_name "Default"
iniset $NOVA_CONF neutron auth_strategy $NEUTRON_AUTH_STRATEGY
iniset $NOVA_CONF neutron region_name "$REGION_NAME"
- iniset $NOVA_CONF neutron url $NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT
iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
@@ -409,7 +451,7 @@
fi
}
-# start_neutron() - Start running processes, including screen
+# start_neutron() - Start running processes
function start_neutron_new {
# Start up the neutron agents if enabled
# TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
@@ -425,7 +467,7 @@
if is_service_enabled neutron-l3; then
run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF"
fi
- if is_service_enabled neutron-api; then
+ if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
# XXX(sc68cal) - Here's where plugins can wire up their own networks instead
# of the code in lib/neutron_plugins/services/l3
if type -p neutron_plugin_create_initial_networks > /dev/null; then
@@ -442,11 +484,11 @@
fi
if is_service_enabled neutron-metering; then
- run_process neutron-metering "$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
+ run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
fi
}
-# stop_neutron() - Stop running processes (non-screen)
+# stop_neutron() - Stop running processes
function stop_neutron_new {
for serv in neutron-api neutron-agent neutron-l3; do
stop_process $serv
@@ -493,6 +535,13 @@
_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
}
+# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
+function neutron_deploy_rootwrap_filters_new {
+ local srcdir=$1
+ sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
+ sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
+}
+
# Dispatch functions
# These are needed for compatibility between the old and new implementations
# where there are function name overlaps. These will be removed when
@@ -607,5 +656,14 @@
fi
}
+function neutron_deploy_rootwrap_filters {
+ if is_neutron_legacy_enabled; then
+ # Call back to old function
+ _neutron_deploy_rootwrap_filters "$@"
+ else
+ neutron_deploy_rootwrap_filters_new "$@"
+ fi
+}
+
# Restore xtrace
$XTRACE
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 784f3a8..0cd7e31 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -168,7 +168,7 @@
#
Q_DVR_MODE=${Q_DVR_MODE:-legacy}
if [[ "$Q_DVR_MODE" != "legacy" ]]; then
- Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
fi
# Provider Network Configurations
@@ -376,7 +376,6 @@
iniset $NOVA_CONF neutron project_domain_name "$SERVICE_DOMAIN_NAME"
iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $NOVA_CONF neutron region_name "$REGION_NAME"
- iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
if [[ "$Q_USE_SECGROUP" == "True" ]]; then
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
@@ -455,7 +454,7 @@
fi
}
-# Start running processes, including screen
+# Start running processes
function start_neutron_service_and_check {
local service_port=$Q_PORT
local service_protocol=$Q_PROTOCOL
@@ -524,7 +523,7 @@
stop_process q-agt
}
-# stop_mutnauq_other() - Stop running processes (non-screen)
+# stop_mutnauq_other() - Stop running processes
function stop_mutnauq_other {
if is_service_enabled q-dhcp; then
stop_process q-dhcp
@@ -718,6 +717,7 @@
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original
iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+ iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
fi
_neutron_setup_rootwrap
@@ -756,7 +756,7 @@
cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
+ iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
@@ -928,7 +928,7 @@
}
function _get_net_id {
- neutron --os-cloud devstack-admin --os-region "$REGION_NAME" --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}'
+ openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
}
function _get_probe_cmd_prefix {
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 07974fe..9be32b7 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -39,9 +39,9 @@
Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True}
-# Use flat providernet for public network
+# Use providernet for public network
#
-# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network
+# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a provider network
# for external interface of neutron l3-agent. In that case,
# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value
# used for the network. In case of ofagent, you should add the
@@ -59,6 +59,10 @@
# Q_USE_PROVIDERNET_FOR_PUBLIC=True
# PUBLIC_PHYSICAL_NETWORK=public
# OVS_BRIDGE_MAPPINGS=public:br-ex
+#
+# The provider-network-type defaults to flat, however, the values
+# PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could
+# be set to specify the parameters for an alternate network type.
Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
@@ -87,7 +91,8 @@
# Subnetpool defaults
USE_SUBNETPOOL=${USE_SUBNETPOOL:-True}
-SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"}
+SUBNETPOOL_NAME_V4=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v4"}
+SUBNETPOOL_NAME_V6=${SUBNETPOOL_NAME:-"shared-default-subnetpool-v6"}
SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE}
SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE}
@@ -169,10 +174,10 @@
if is_networking_extension_supported "auto-allocated-topology"; then
if [[ "$USE_SUBNETPOOL" == "True" ]]; then
if [[ "$IP_VERSION" =~ 4.* ]]; then
- SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2)
+ SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default -f value -c id)
fi
if [[ "$IP_VERSION" =~ .*6 ]]; then
- SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2)
+ SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default -f value -c id)
fi
fi
fi
@@ -187,7 +192,7 @@
if [ -z $SUBNETPOOL_V4_ID ]; then
fixed_range_v4=$FIXED_RANGE
fi
- SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2)
+ SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
fi
@@ -197,7 +202,7 @@
if [ -z $SUBNETPOOL_V6_ID ]; then
fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
fi
- IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID --subnet-range $fixed_range_v6 | grep ' id ' | get_field 2)
+ IPV6_SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
fi
@@ -239,7 +244,7 @@
fi
# Create an external network, and a subnet. Configure the external network as router gw
if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
- EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
else
EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
fi
diff --git a/lib/nova b/lib/nova
index 788588f..56e3093 100644
--- a/lib/nova
+++ b/lib/nova
@@ -17,7 +17,6 @@
#
# - install_nova
# - configure_nova
-# - _config_nova_apache_wsgi
# - create_nova_conf
# - init_nova
# - start_nova
@@ -28,7 +27,6 @@
_XTRACE_LIB_NOVA=$(set +o | grep xtrace)
set +o xtrace
-
# Defaults
# --------
@@ -53,10 +51,15 @@
NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
+NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf
NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
NOVA_API_DB=${NOVA_API_DB:-nova_api}
+NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
+NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
+NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini
+NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini
# The total number of cells we expect. Must be greater than one and doesn't
# count cell0.
@@ -67,19 +70,22 @@
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
-if is_suse; then
- NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova}
-else
- NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova}
-fi
-
-# Toggle for deploying Nova-API under HTTPD + mod_wsgi
-NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False}
+# Toggle for deploying Nova-API under a wsgi server. We default to
+# true to use UWSGI, but allow False so that fall back to the
+# eventlet server can happen for grenade runs.
+# NOTE(cdent): We can adjust to remove the eventlet-base api service
+# after pike, at which time we can stop using NOVA_USE_MOD_WSGI to
+# mean "use uwsgi" because we'll be always using uwsgi.
+NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True}
if is_service_enabled tls-proxy; then
NOVA_SERVICE_PROTOCOL="https"
fi
+# Whether to use TLS for comms between the VNC/SPICE/serial proxy
+# services and the compute node
+NOVA_CONSOLE_PROXY_COMPUTE_TLS=${NOVA_CONSOLE_PROXY_COMPUTE_TLS:-False}
+
# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
@@ -99,7 +105,7 @@
# The following FILTERS contains SameHostFilter and DifferentHostFilter with
# the default filters.
-FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
QEMU_CONF=/etc/libvirt/qemu.conf
@@ -195,6 +201,13 @@
return 1
}
+# is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy
+# service has TLS enabled
+function is_nova_console_proxy_compute_tls_enabled {
+ [[ ${NOVA_CONSOLE_PROXY_COMPUTE_TLS} = "True" ]] && return 0
+ return 1
+}
+
# Helper to clean iptables rules
function clean_iptables {
# Delete rules
@@ -219,7 +232,10 @@
instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
- echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
+ if ! xargs -n1 sudo virsh undefine --managed-save --nvram <<< $instances; then
+ # Can't delete with nvram flags, then just try without this flag
+ xargs -n1 sudo virsh undefine --managed-save <<< $instances
+ fi
fi
# Logout and delete iscsi sessions
@@ -244,66 +260,10 @@
# cleanup_nova_hypervisor
#fi
- if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
- _cleanup_nova_apache_wsgi
- fi
-}
-
-# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_nova_apache_wsgi {
- sudo rm -f $NOVA_WSGI_DIR/*
- sudo rm -f $(apache_site_config_for nova-api)
- sudo rm -f $(apache_site_config_for nova-metadata)
-}
-
-# _config_nova_apache_wsgi() - Set WSGI config files of Nova API
-function _config_nova_apache_wsgi {
- sudo mkdir -p $NOVA_WSGI_DIR
-
- local nova_apache_conf
- nova_apache_conf=$(apache_site_config_for nova-api)
- local nova_metadata_apache_conf
- nova_metadata_apache_conf=$(apache_site_config_for nova-metadata)
- local nova_ssl=""
- local nova_certfile=""
- local nova_keyfile=""
- local nova_api_port=$NOVA_SERVICE_PORT
- local nova_metadata_port=$METADATA_SERVICE_PORT
- local venv_path=""
-
- if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
- fi
-
- # copy proxy vhost and wsgi helper files
- sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api
- sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata
-
- sudo cp $FILES/apache-nova-api.template $nova_apache_conf
- sudo sed -e "
- s|%PUBLICPORT%|$nova_api_port|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g;
- s|%SSLENGINE%|$nova_ssl|g;
- s|%SSLCERTFILE%|$nova_certfile|g;
- s|%SSLKEYFILE%|$nova_keyfile|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- s|%APIWORKERS%|$API_WORKERS|g
- " -i $nova_apache_conf
-
- sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf
- sudo sed -e "
- s|%PUBLICPORT%|$nova_metadata_port|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g;
- s|%SSLENGINE%|$nova_ssl|g;
- s|%SSLCERTFILE%|$nova_certfile|g;
- s|%SSLKEYFILE%|$nova_keyfile|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- s|%APIWORKERS%|$API_WORKERS|g
- " -i $nova_metadata_apache_conf
+ stop_process "n-api"
+ stop_process "n-api-meta"
+ remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
+ remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
}
# configure_nova() - Set config files, create data dirs, etc
@@ -464,6 +424,9 @@
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
iniset $NOVA_CONF scheduler driver "$SCHEDULER"
iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS"
+ if [[ $SCHEDULER == "filter_scheduler" ]]; then
+ iniset $NOVA_CONF scheduler workers "$API_WORKERS"
+ fi
iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
if [[ $SERVICE_IP_VERSION == 6 ]]; then
iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
@@ -475,7 +438,7 @@
iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
- iniset $NOVA_CONF key_manager api_class nova.keymgr.conf_key_mgr.ConfKeyManager
+ iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
if is_fedora || is_suse; then
# nova defaults to /usr/local/bin, but fedora and suse pip like to
@@ -487,7 +450,19 @@
# require them running on the host. The ensures that n-cpu doesn't
# leak a need to use the db in a multinode scenario.
if is_service_enabled n-api n-cond n-sched; then
- iniset $NOVA_CONF database connection `database_connection_url nova_cell0`
+ # If we're in multi-tier cells mode, we want our control services pointing
+ # at cell0 instead of cell1 to ensure isolation. If not, we point everything
+ # at the main database like normal.
+ if [[ "$CELLSV2_SETUP" == "singleconductor" ]]; then
+ local db="nova_cell1"
+ else
+ local db="nova_cell0"
+ # When in superconductor mode, nova-compute can't send instance
+ # info updates to the scheduler, so just disable it.
+ iniset $NOVA_CONF filter_scheduler track_instance_changes False
+ fi
+
+ iniset $NOVA_CONF database connection `database_connection_url $db`
iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
fi
@@ -497,7 +472,7 @@
NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
fi
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
- if is_service_enabled tls-proxy; then
+ if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
# Set the service port for a proxy to take the original
iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
@@ -532,11 +507,10 @@
iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
fi
# Format logging
- setup_logging $NOVA_CONF $NOVA_USE_MOD_WSGI
+ setup_logging $NOVA_CONF
- if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
- _config_nova_apache_wsgi
- fi
+ write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+ write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" ":${METADATA_SERVICE_PORT}"
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
@@ -560,10 +534,21 @@
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
- iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN"
- iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+ iniset $NOVA_CONF vnc server_listen "$VNCSERVER_LISTEN"
+ iniset $NOVA_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $NOVA_CONF vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+
+ if is_nova_console_proxy_compute_tls_enabled ; then
+ iniset $NOVA_CONF vnc auth_schemes "vencrypt"
+ iniset $NOVA_CONF vnc vencrypt_client_key "/etc/pki/nova-novnc/client-key.pem"
+ iniset $NOVA_CONF vnc vencrypt_client_cert "/etc/pki/nova-novnc/client-cert.pem"
+ iniset $NOVA_CONF vnc vencrypt_ca_certs "/etc/pki/nova-novnc/ca-cert.pem"
+
+ sudo mkdir -p /etc/pki/nova-novnc
+ deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem
+ deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem
+ fi
else
iniset $NOVA_CONF vnc enabled false
fi
@@ -582,8 +567,8 @@
# Set the oslo messaging driver to the typical default. This does not
# enable notifications, but it will allow them to function when enabled.
iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2"
+ iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url)
iniset_rpc_backend nova $NOVA_CONF
- iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
@@ -594,6 +579,7 @@
if is_service_enabled tls-proxy; then
iniset $NOVA_CONF DEFAULT glance_protocol https
+ iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
fi
if is_service_enabled n-sproxy; then
@@ -605,19 +591,17 @@
# Setup logging for nova-dhcpbridge command line
sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
- local service="n-dhcp"
- local logfile="${service}.log.${CURRENT_LOG_TIME}"
- local real_logfile="${LOGDIR}/${logfile}"
- if [[ -n ${LOGDIR} ]]; then
- bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
- iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- # Drop the backward-compat symlink
- ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
+ if is_service_enabled n-net; then
+ local service="n-dhcp"
+ local logfile="${service}.log.${CURRENT_LOG_TIME}"
+ local real_logfile="${LOGDIR}/${logfile}"
+ if [[ -n ${LOGDIR} ]]; then
+ bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
+ iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
fi
- fi
- iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
+ iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
+ fi
if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then
init_nova_service_user_conf
@@ -629,11 +613,26 @@
local vhost
conf=$(conductor_conf $i)
vhost="nova_cell${i}"
+ # clean old conductor conf
+ rm -f $conf
iniset $conf database connection `database_connection_url nova_cell${i}`
iniset $conf conductor workers "$API_WORKERS"
iniset $conf DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
- rpc_backend_add_vhost $vhost
- iniset_rpc_backend nova $conf DEFAULT $vhost
+ # if we have a singleconductor, we don't have per host message queues.
+ if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+ iniset_rpc_backend nova $conf DEFAULT
+ else
+ rpc_backend_add_vhost $vhost
+ iniset_rpc_backend nova $conf DEFAULT $vhost
+ # When running in superconductor mode, the cell conductor
+ # must be configured to talk to the placement service for
+ # reschedules to work.
+ if is_service_enabled placement placement-client; then
+ configure_placement_nova_compute $conf
+ fi
+ fi
+ # Format logging
+ setup_logging $conf
done
fi
}
@@ -676,6 +675,9 @@
iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
fi
+ # Cells v1 conductor should be the nova-cells.conf
+ NOVA_COND_CONF=$NOVA_CELLS_CONF
+
time_start "dbsync"
$NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
time_stop "dbsync"
@@ -729,15 +731,15 @@
# and nova_cell0 databases.
nova-manage cell_v2 map_cell0 --database_connection `database_connection_url nova_cell0`
- # Migrate nova and nova_cell0 databases.
- $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
-
# (Re)create nova databases
for i in $(seq 1 $NOVA_NUM_CELLS); do
recreate_database nova_cell${i}
$NOVA_BIN_DIR/nova-manage --config-file $(conductor_conf $i) db sync
done
+ # Migrate nova and nova_cell0 databases.
+ $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
+
if is_service_enabled n-cell; then
recreate_database $NOVA_CELLS_DB
fi
@@ -746,9 +748,6 @@
# Needed for flavor conversion
$NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations
- # FIXME(danms): Should this be configurable?
- iniset $NOVA_CONF workarounds disable_group_policy_check_upcall True
-
# create the cell1 cell for the main nova db where the hosts live
for i in $(seq 1 $NOVA_NUM_CELLS); do
nova-manage --config-file $NOVA_CONF --config-file $(conductor_conf $i) cell_v2 create_cell --name "cell$i"
@@ -812,10 +811,6 @@
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
setup_develop $NOVA_DIR
sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
-
- if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
- install_apache_wsgi
- fi
}
# start_nova_api() - Start the API process ahead of other things
@@ -823,6 +818,7 @@
# Get right service port for testing
local service_port=$NOVA_SERVICE_PORT
local service_protocol=$NOVA_SERVICE_PROTOCOL
+ local nova_url
if is_service_enabled tls-proxy; then
service_port=$NOVA_SERVICE_PORT_INT
service_protocol="http"
@@ -832,54 +828,61 @@
local old_path=$PATH
export PATH=$NOVA_BIN_DIR:$PATH
- # If the site is not enabled then we are in a grenade scenario
- local enabled_site_file
- enabled_site_file=$(apache_site_config_for nova-api)
- if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
- enable_apache_site nova-api
- enable_apache_site nova-metadata
- restart_apache_server
- tail_log nova-api /var/log/$APACHE_NAME/nova-api.log
- tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log
- else
+ if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
run_process n-api "$NOVA_BIN_DIR/nova-api"
+ nova_url=$service_protocol://$SERVICE_HOST:$service_port
+ # Start proxy if tsl enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
+ fi
+ else
+ run_process "n-api" "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api --ini $NOVA_UWSGI_CONF"
+ nova_url=$service_protocol://$SERVICE_HOST/compute/v2.1/
fi
echo "Waiting for nova-api to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
+ if ! wait_for_service $SERVICE_TIMEOUT $nova_url; then
die $LINENO "nova-api did not start"
fi
- # Start proxies if enabled
- if is_service_enabled tls-proxy; then
- start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
- fi
-
export PATH=$old_path
}
+# Detect and setup conditions under which singleconductor setup is
+# needed. Notably cellsv1.
+function _set_singleconductor {
+ # NOTE(danms): Don't setup conductor fleet for cellsv1
+ if is_service_enabled n-cell; then
+ CELLSV2_SETUP="singleconductor"
+ fi
+}
+
+
# start_nova_compute() - Start the compute process
function start_nova_compute {
- local nomulticellflag="$1"
# Hack to set the path for rootwrap
local old_path=$PATH
export PATH=$NOVA_BIN_DIR:$PATH
if is_service_enabled n-cell; then
local compute_cell_conf=$NOVA_CELLS_CONF
- # NOTE(danms): Don't setup conductor fleet for cellsv1
- nomulticellflag='nomulticell'
else
local compute_cell_conf=$NOVA_CONF
fi
- if [ "$nomulticellflag" = 'nomulticell' ]; then
+ if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
# NOTE(danms): Grenade doesn't setup multi-cell rabbit, so
# skip these bits and use the normal config.
NOVA_CPU_CONF=$compute_cell_conf
echo "Skipping multi-cell conductor fleet setup"
else
+ # "${CELLSV2_SETUP}" is "superconductor"
cp $compute_cell_conf $NOVA_CPU_CONF
+ # FIXME(danms): Should this be configurable?
+ iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
+ # Since the nova-compute service cannot reach nova-scheduler over
+ # RPC, we also disable track_instance_changes.
+ iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False
iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
fi
@@ -899,7 +902,7 @@
# creating or modifying real configurations. Each fake
# gets its own configuration and own log file.
local fake_conf="${NOVA_FAKE_CONF}-${i}"
- iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
+ iniset $fake_conf DEFAULT host "${HOSTNAME}${i}"
run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CPU_CONF --config-file $fake_conf"
done
else
@@ -912,7 +915,7 @@
export PATH=$old_path
}
-# start_nova() - Start running processes, including screen
+# start_nova() - Start running processes
function start_nova_rest {
# Hack to set the path for rootwrap
local old_path=$PATH
@@ -937,7 +940,11 @@
run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
- run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
+ if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
+ run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
+ else
+ run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
+ fi
run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
@@ -958,15 +965,15 @@
}
function start_nova_conductor {
- if is_service_enabled n-cell; then
+ if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
echo "Starting nova-conductor in a cellsv1-compatible way"
- run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
+ run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF"
return
fi
enable_nova_fleet
if is_service_enabled n-super-cond; then
- run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CONF"
+ run_process n-super-cond "$NOVA_BIN_DIR/nova-conductor --config-file $NOVA_COND_CONF"
fi
for i in $(seq 1 $NOVA_NUM_CELLS); do
if is_service_enabled n-cond-cell${i}; then
@@ -977,10 +984,39 @@
done
}
+function is_nova_ready {
+ # NOTE(sdague): with cells v2 all the compute services must be up
+ # and checked into the database before discover_hosts is run. This
+ # happens in all in one installs by accident, because > 30 seconds
+ # happen between here and the script ending. However, in multinode
+ # tests this can very often not be the case. So ensure that the
+ # compute is up before we move on.
+ if is_service_enabled n-cell; then
+ # cells v1 can't complete the check below because it munges
+ # hostnames with cell information (grumble grumble).
+ return
+ fi
+ # TODO(sdague): honestly, this probably should be a plug point for
+ # an external system.
+ if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
+ # xenserver encodes information in the hostname of the compute
+ # because of the dom0/domU split. Just ignore for now.
+ return
+ fi
+ wait_for_compute $NOVA_READY_TIMEOUT
+}
+
function start_nova {
+ # this catches the cells v1 case early
+ _set_singleconductor
start_nova_rest
start_nova_conductor
start_nova_compute
+ if is_service_enabled n-api; then
+ # dump the cell mapping to ensure life is good
+ echo "Dumping cells_v2 mapping"
+ nova-manage cell_v2 list_cells --verbose
+ fi
}
function stop_nova_compute {
@@ -998,22 +1034,18 @@
}
function stop_nova_rest {
- if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
- disable_apache_site nova-api
- disable_apache_site nova-metadata
- restart_apache_server
- else
- stop_process n-api
- fi
- # Kill the nova screen windows
- # Some services are listed here twice since more than one instance
- # of a service may be running in certain configs.
- for serv in n-api n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-api-meta n-sproxy; do
+ # Kill the non-compute nova processes
+ for serv in n-api n-api-meta n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cell n-cell n-sproxy; do
stop_process $serv
done
}
function stop_nova_conductor {
+ if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
+ stop_process n-cond
+ return
+ fi
+
enable_nova_fleet
for srv in n-super-cond $(seq -f n-cond-cell%0.f 1 $NOVA_NUM_CELLS); do
if is_service_enabled $srv; then
@@ -1022,7 +1054,7 @@
done
}
-# stop_nova() - Stop running processes (non-screen)
+# stop_nova() - Stop running processes
function stop_nova {
stop_nova_rest
stop_nova_conductor
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 3e38b89..fcb4777 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -25,7 +25,7 @@
DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS)
# Only Xenial is left with libvirt-bin. Everywhere else is libvirtd
-if is_ubuntu && [ ! -f /etc/init.d/libvirtd ]; then
+if is_ubuntu && [ ${DISTRO} == "xenial" ]; then
LIBVIRT_DAEMON=libvirt-bin
else
LIBVIRT_DAEMON=libvirtd
@@ -72,18 +72,12 @@
pip_install_gr libvirt-python
#pip_install_gr <there-si-no-guestfs-in-pypi>
elif is_fedora || is_suse; then
- # On "KVM for IBM z Systems", kvm does not have its own package
- if [[ ! ${DISTRO} =~ "kvmibm1" && ! ${DISTRO} =~ "rhel7" ]]; then
- install_package kvm
- fi
- if [[ ${DISTRO} =~ "rhel7" ]]; then
- # This should install the latest qemu-kvm build,
- # which is called qemu-kvm-ev in centos7
- # (as the default OS qemu-kvm package is usually rather old,
- # and should be updated by above)
- install_package qemu-kvm
- fi
+ # Note that in CentOS/RHEL this needs to come from the RDO
+ # repositories (qemu-kvm-ev ... which provides this package)
+ # as the base system version is too old. We should have
+ # pre-installed these
+ install_package qemu-kvm
install_package libvirt libvirt-devel
pip_uninstall libvirt-python
@@ -155,6 +149,18 @@
fi
fi
+ if is_nova_console_proxy_compute_tls_enabled ; then
+ if is_service_enabled n-novnc ; then
+ echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
+ echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
+
+ sudo mkdir -p /etc/pki/libvirt-vnc
+ sudo chown libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
+ deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
+ deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
+ fi
+ fi
+
# Service needs to be started on redhat/fedora -- do a restart for
# sanity after fiddling the config.
restart_service $LIBVIRT_DAEMON
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index 7d47ef0..c91f70b 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -41,10 +41,15 @@
iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
- iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
- iniset $NOVA_CONF filter_scheduler use_baremetal_filters True
- iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
- iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
+
+ if [[ "$IRONIC_USE_RESOURCE_CLASSES" == "False" ]]; then
+ iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic_host_manager
+ iniset $NOVA_CONF filter_scheduler use_baremetal_filters True
+ iniset $NOVA_CONF filter_scheduler host_subset_size 999
+ iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+ iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
+ fi
+
# ironic section
iniset $NOVA_CONF ironic auth_type password
iniset $NOVA_CONF ironic username admin
@@ -53,6 +58,9 @@
iniset $NOVA_CONF ironic project_domain_id default
iniset $NOVA_CONF ironic user_domain_id default
iniset $NOVA_CONF ironic project_name demo
+
+ iniset $NOVA_CONF ironic api_max_retries 300
+ iniset $NOVA_CONF ironic api_retry_interval 5
}
# install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index f3c8add..3d676b9 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -71,8 +71,8 @@
iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
iniset $NOVA_CONF libvirt images_type "ploop"
iniset $NOVA_CONF DEFAULT force_raw_images "False"
- iniset $NOVA_CONF vnc vncserver_proxyclient_address $HOST_IP
- iniset $NOVA_CONF vnc vncserver_listen $HOST_IP
+ iniset $NOVA_CONF vnc server_proxyclient_address $HOST_IP
+ iniset $NOVA_CONF vnc server_listen $HOST_IP
iniset $NOVA_CONF vnc keymap
elif [[ "$NOVA_BACKEND" == "LVM" ]]; then
iniset $NOVA_CONF libvirt images_type "lvm"
@@ -115,7 +115,10 @@
sudo dpkg-statoverride --add --update $STAT_OVERRIDE
fi
done
- elif is_fedora || is_suse; then
+ elif is_suse; then
+ # Workaround for missing dependencies in python-libguestfs
+ install_package python-libguestfs guestfs-data augeas augeas-lenses
+ elif is_fedora; then
install_package python-libguestfs
fi
fi
diff --git a/lib/placement b/lib/placement
index 8adbbde..1d68f8a 100644
--- a/lib/placement
+++ b/lib/placement
@@ -71,6 +71,7 @@
function cleanup_placement {
sudo rm -f $(apache_site_config_for nova-placement-api)
sudo rm -f $(apache_site_config_for placement-api)
+ remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
}
# _config_placement_apache_wsgi() - Set WSGI config files
@@ -102,14 +103,16 @@
}
function configure_placement_nova_compute {
- iniset $NOVA_CONF placement auth_type "password"
- iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_URI"
- iniset $NOVA_CONF placement username placement
- iniset $NOVA_CONF placement password "$SERVICE_PASSWORD"
- iniset $NOVA_CONF placement user_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME"
- iniset $NOVA_CONF placement project_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $NOVA_CONF placement os_region_name "$REGION_NAME"
+ # Use the provided config file path or default to $NOVA_CONF.
+ local conf=${1:-$NOVA_CONF}
+ iniset $conf placement auth_type "password"
+ iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $conf placement username placement
+ iniset $conf placement password "$SERVICE_PASSWORD"
+ iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement project_name "$SERVICE_TENANT_NAME"
+ iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement os_region_name "$REGION_NAME"
# TODO(cdent): auth_strategy, which is common to see in these
# blocks is not currently used here. For the time being the
# placement api uses the auth_strategy configuration setting
@@ -159,12 +162,15 @@
# install_placement() - Collect source and prepare
function install_placement {
install_apache_wsgi
+ # Install the openstackclient placement client plugin for CLI
+ # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r.
+ pip_install osc-placement
}
# start_placement_api() - Start the API processes ahead of other things
function start_placement_api {
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
- run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --ini $PLACEMENT_UWSGI_CONF"
+ run_process "placement-api" "$PLACEMENT_BIN_DIR/uwsgi --procname-prefix placement --ini $PLACEMENT_UWSGI_CONF"
else
enable_apache_site placement-api
restart_apache_server
@@ -185,7 +191,6 @@
function stop_placement {
if [[ "$WSGI_MODE" == "uwsgi" ]]; then
stop_process "placement-api"
- remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
else
disable_apache_site placement-api
restart_apache_server
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3177e88..1c7c82f 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -52,7 +52,20 @@
if is_service_enabled rabbit; then
# Install rabbitmq-server
install_package rabbitmq-server
- if is_fedora; then
+ if is_suse; then
+ install_package rabbitmq-server-plugins
+ # the default systemd socket activation only listens on the loopback interface
+ # which causes rabbitmq to try to start its own epmd
+ sudo mkdir -p /etc/systemd/system/epmd.socket.d
+ cat <<EOF | sudo tee /etc/systemd/system/epmd.socket.d/ports.conf >/dev/null
+[Socket]
+ListenStream=
+ListenStream=[::]:4369
+EOF
+ sudo systemctl daemon-reload
+ sudo systemctl restart epmd.socket epmd.service
+ fi
+ if is_fedora || is_suse; then
sudo systemctl enable rabbitmq-server
fi
fi
@@ -97,6 +110,8 @@
break
done
+ # NOTE(frickler): Remove the default guest user
+ sudo rabbitmqctl delete_user guest || true
fi
}
@@ -114,7 +129,7 @@
fi
}
-# builds transport url string
+# Returns the address of the RPC backend in URL format.
function get_transport_url {
local virtual_host=$1
if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
@@ -122,8 +137,9 @@
fi
}
-# Repeat the definition, in case get_transport_url is overriden for RPC purpose.
-# get_notification_url can then be used to talk to rabbit for notifications.
+# Returns the address of the Notification backend in URL format. This
+# should be used to set the transport_url option in the
+# oslo_messaging_notifications group.
function get_notification_url {
local virtual_host=$1
if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
diff --git a/lib/stack b/lib/stack
index f09ddce..bada26f 100644
--- a/lib/stack
+++ b/lib/stack
@@ -33,5 +33,8 @@
if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then
unset PIP_VIRTUAL_ENV
fi
+ else
+ echo "No function declared with name 'install_${service}'."
+ exit 1
fi
}
diff --git a/lib/swift b/lib/swift
index e247f15..6cda9c8 100644
--- a/lib/swift
+++ b/lib/swift
@@ -7,7 +7,7 @@
#
# - ``functions`` file
# - ``apache`` file
-# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
+# - ``DEST``, `SWIFT_HASH` must be defined
# - ``STACK_USER`` must be defined
# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
# - ``lib/keystone`` file
@@ -464,6 +464,9 @@
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH"
+ # Allow both reseller prefixes to be used with domain_remap
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH"
+
if is_service_enabled swift3; then
cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
[filter:s3token]
@@ -554,7 +557,11 @@
local swift_log_dir=${SWIFT_DATA_DIR}/logs
sudo rm -rf ${swift_log_dir}
- sudo install -d -o ${STACK_USER} -g adm ${swift_log_dir}/hourly
+ local swift_log_group=adm
+ if is_suse; then
+ swift_log_group=root
+ fi
+ sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly
if [[ $SYSLOG != "False" ]]; then
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
@@ -608,15 +615,13 @@
# create all of the directories needed to emulate a few different servers
local node_number
for node_number in ${SWIFT_REPLICAS_SEQ}; do
- sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
- local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
- local node=${SWIFT_DATA_DIR}/${node_number}/node
- local node_device=${node}/sdb1
- [[ -d $node ]] && continue
- [[ -d $drive ]] && continue
- sudo install -o ${STACK_USER} -g $user_group -d $drive
- sudo install -o ${STACK_USER} -g $user_group -d $node_device
- sudo chown -R ${STACK_USER}: ${node}
+ # node_devices must match *.conf devices option
+ local node_devices=${SWIFT_DATA_DIR}/${node_number}
+ local real_devices=${SWIFT_DATA_DIR}/drives/sdb1/$node_number
+ sudo ln -sf $real_devices $node_devices;
+ local device=${real_devices}/sdb1
+ [[ -d $device ]] && continue
+ sudo install -o ${STACK_USER} -g $user_group -d $device
done
}
@@ -780,7 +785,7 @@
fi
}
-# start_swift() - Start running processes, including screen
+# start_swift() - Start running processes
function start_swift {
# (re)start memcached to make sure we have a clean memcache.
restart_service memcached
@@ -799,13 +804,6 @@
restart_apache_server
# The rest of the services should be started in backgroud
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
- # Be we still want the logs of Swift Proxy in our screen session
- tail_log s-proxy /var/log/$APACHE_NAME/proxy-server
- if [[ ${SWIFT_REPLICAS} == 1 ]]; then
- for type in object container account; do
- tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1
- done
- fi
return 0
fi
@@ -833,7 +831,8 @@
else
# The container-sync daemon is strictly needed to pass the container
# sync Tempest tests.
- swift-init --run-dir=${SWIFT_DATA_DIR}/run container-sync start
+ enable_service s-container-sync
+ run_process s-container-sync "$SWIFT_BIN_DIR/swift-container-sync ${SWIFT_CONF_DIR}/container-server/1.conf"
fi
else
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
@@ -842,16 +841,24 @@
if is_service_enabled tls-proxy; then
local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
- start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT
+ start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT $SWIFT_MAX_HEADER_SIZE
fi
run_process s-proxy "$SWIFT_BIN_DIR/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
+ # We also started the storage services, but proxy started last and
+ # will take the longest to start, so by the time it comes up, we're
+ # probably fine.
+ echo "Waiting for swift proxy to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/info; then
+ die $LINENO "swift proxy did not start"
+ fi
+
if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
swift_configure_tempurls
fi
}
-# stop_swift() - Stop running processes (non-screen)
+# stop_swift() - Stop running processes
function stop_swift {
local type
diff --git a/lib/tempest b/lib/tempest
index cc65ec7..0605ffb 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -293,9 +293,15 @@
iniset $TEMPEST_CONFIG identity-feature-enabled security_compliance True
fi
- # TODO(rodrigods): This is a feature flag for bug 1590578 which is fixed in
- # Newton and Ocata. This option can be removed after Mitaka is end of life.
- iniset $TEMPEST_CONFIG identity-feature-enabled forbid_global_implied_dsr True
+ # When LDAP is enabled domain specific drivers are also enabled and the users
+ # and groups identity tests must adapt to this scenario
+ if is_service_enabled ldap; then
+ iniset $TEMPEST_CONFIG identity-feature-enabled domain_specific_drivers True
+ fi
+
+ # TODO(felipemonteiro): Remove this once Tempest no longer supports Pike
+ # as this is supported in Queens and beyond.
+ iniset $TEMPEST_CONFIG identity-feature-enabled project_tags True
# Image
# We want to be able to override this variable in the gate to avoid
@@ -308,7 +314,6 @@
fi
# Image Features
- iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
if [ "$GLANCE_V1_ENABLED" != "True" ]; then
iniset $TEMPEST_CONFIG image-feature-enabled api_v1 False
fi
@@ -381,6 +386,10 @@
fi
fi
+ if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
+ iniset $TEMPEST_CONFIG compute-feature-enabled volume_multiattach True
+ fi
+
if is_service_enabled n-novnc; then
iniset $TEMPEST_CONFIG compute-feature-enabled vnc_console True
fi
@@ -430,6 +439,12 @@
TEMPEST_VOLUME_MANAGE_VOLUME=${TEMPEST_VOLUME_MANAGE_VOLUME:-True}
fi
iniset $TEMPEST_CONFIG volume-feature-enabled manage_volume $(trueorfalse False TEMPEST_VOLUME_MANAGE_VOLUME)
+ # Only turn on TEMPEST_EXTEND_ATTACHED_VOLUME by default for "lvm" backends
+ # in Cinder and the libvirt driver in Nova.
+ if [[ "$CINDER_ENABLED_BACKENDS" == *"lvm"* ]] && [ "$VIRT_DRIVER" = "libvirt" ]; then
+ TEMPEST_EXTEND_ATTACHED_VOLUME=${TEMPEST_EXTEND_ATTACHED_VOLUME:-True}
+ fi
+ iniset $TEMPEST_CONFIG volume-feature-enabled extend_attached_volume $(trueorfalse False TEMPEST_EXTEND_ATTACHED_VOLUME)
# TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
iniset $TEMPEST_CONFIG volume-feature-enabled api_v3 True
iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 $(trueorfalse False TEMPEST_VOLUME_API_V1)
@@ -543,7 +558,10 @@
if [[ "$OFFLINE" != "True" ]]; then
tox -revenv-tempest --notest
fi
- tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt -r requirements.txt
+
+ # The requirements might be on a different branch, while tempest needs master requirements.
+ (cd $REQUIREMENTS_DIR && git show master:upper-constraints.txt) > u-c-m.txt
+ tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt
# Auth:
iniset $TEMPEST_CONFIG auth tempest_roles "Member"
@@ -574,6 +592,11 @@
DISABLE_NETWORK_API_EXTENSIONS+=", metering"
fi
+ # disable l3_agent_scheduler if we didn't enable L3 agent
+ if ! is_service_enabled q-l3; then
+ DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler"
+ fi
+
local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"}
if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then
# Enabled extensions are either the ones explicitly specified or those available on the API endpoint
@@ -608,7 +631,7 @@
# install_tempest() - Collect source and prepare
function install_tempest {
git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
- pip_install tox
+ pip_install 'tox!=2.8.0'
pushd $TEMPEST_DIR
tox -r --notest -efull
# NOTE(mtreinish) Respect constraints in the tempest full venv, things that
diff --git a/lib/template b/lib/template
index 25d653c..e6d0032 100644
--- a/lib/template
+++ b/lib/template
@@ -81,7 +81,7 @@
:
}
-# start_XXXX() - Start running processes, including screen
+# start_XXXX() - Start running processes
function start_XXXX {
# The quoted command must be a single command and not include an
# shell metacharacters, redirections or shell builtins.
@@ -89,7 +89,7 @@
:
}
-# stop_XXXX() - Stop running processes (non-screen)
+# stop_XXXX() - Stop running processes
function stop_XXXX {
# for serv in serv-a serv-b; do
# stop_process $serv
diff --git a/lib/tls b/lib/tls
index 6d67c90..a72b708 100644
--- a/lib/tls
+++ b/lib/tls
@@ -340,6 +340,24 @@
fi
}
+# Deploy the service cert & key to a service specific
+# location
+function deploy_int_cert {
+ local cert_target_file=$1
+ local key_target_file=$2
+
+ sudo cp "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" "$cert_target_file"
+ sudo cp "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" "$key_target_file"
+}
+
+# Deploy the intermediate CA cert bundle file to a service
+# specific location
+function deploy_int_CA {
+ local ca_target_file=$1
+
+ sudo cp "$INT_CA_DIR/ca-chain.pem" "$ca_target_file"
+}
+
# If a non-system python-requests is installed then it will use the
# built-in CA certificate store rather than the distro-specific
# CA certificate store. Detect this and symlink to the correct
@@ -487,13 +505,15 @@
}
# Starts the TLS proxy for the given IP/ports
-# start_tls_proxy front-host front-port back-host back-port
+# start_tls_proxy service-name front-host front-port back-host back-port
function start_tls_proxy {
local b_service="$1-tls-proxy"
local f_host=$2
local f_port=$3
local b_host=$4
local b_port=$5
+ # 8190 is the default apache size.
+ local f_header_size=${6:-8190}
tune_apache_connections
@@ -521,21 +541,26 @@
# ('Connection aborted.', BadStatusLine("''",)) error
KeepAlive Off
+ # This increase in allowed request header sizes is required
+ # for swift functional testing to work with tls enabled. It is 2 bytes
+ # larger than the apache default of 8190.
+ LimitRequestFieldSize $f_header_size
+ RequestHeader set X-Forwarded-Proto "https"
+
<Location />
ProxyPass http://$b_host:$b_port/ retry=0 nocanon
ProxyPassReverse http://$b_host:$b_port/
</Location>
ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
- ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
+ ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
LogLevel info
- CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common
- LogFormat "%v %h %l %u %t \"%r\" %>s %b"
+ CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
</VirtualHost>
EOF
if is_suse ; then
sudo a2enflag SSL
fi
- for mod in ssl proxy proxy_http; do
+ for mod in headers ssl proxy proxy_http; do
enable_apache_mod $mod
done
enable_apache_site $b_service
@@ -557,6 +582,20 @@
# using tls configuration are down.
function stop_tls_proxy {
stop_apache_server
+
+ # NOTE(jh): Removing all tls-proxy configs is a bit of a hack, but
+ # necessary so that we can restart after an unstack. A better
+ # solution would be to ensure that each service calling
+ # start_tls_proxy will call stop_tls_proxy with the same
+ # parameters on shutdown so we can use the disable_apache_site
+ # function and remove individual files there.
+ if is_ubuntu; then
+ sudo rm -f /etc/apache2/sites-enabled/*-tls-proxy.conf
+ else
+ for i in $APACHE_CONF_DIR/*-tls-proxy.conf; do
+ sudo mv $i $i.disabled
+ done
+ fi
}
# Clean up the CA files
diff --git a/openrc b/openrc
index 23c173c..37724c5 100644
--- a/openrc
+++ b/openrc
@@ -84,7 +84,7 @@
# We currently recommend using the version 3 *identity api*.
#
-# If you don't have a working .stackenv, this is the backup possition
+# If you don't have a working .stackenv, this is the backup position
KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_URI:-$KEYSTONE_BACKUP}
diff --git a/playbooks/devstack.yaml b/playbooks/devstack.yaml
new file mode 100644
index 0000000..d090638
--- /dev/null
+++ b/playbooks/devstack.yaml
@@ -0,0 +1,7 @@
+- hosts: all
+ # This is the default strategy, however since orchestrate-devstack requires
+ # "linear", it is safer to enforce it in case this is running in an
+ # environment configured with a different default strategy.
+ strategy: linear
+ roles:
+ - orchestrate-devstack
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
new file mode 100644
index 0000000..9e66f20
--- /dev/null
+++ b/playbooks/post.yaml
@@ -0,0 +1,32 @@
+- hosts: all
+ become: True
+ vars:
+ devstack_log_dir: "{{ devstack_base_dir|default('/opt/stack') }}/logs/"
+ devstack_conf_dir: "{{ devstack_base_dir|default('/opt/stack') }}/devstack/"
+ devstack_full_log: "{{ devstack_early_log|default('/opt/stack/logs/devstack-early.txt') }}"
+ tasks:
+ # NOTE(andreaf) If the tempest service is enabled, a tempest.log is
+ # generated as part of lib/tempest, as a result of verify_tempest_config
+ - name: Check if a tempest log exits
+ stat:
+ path: "{{ devstack_conf_dir }}/tempest.log"
+ register: tempest_log
+ - name: Link post-devstack tempest.log
+ file:
+ src: "{{ devstack_conf_dir }}/tempest.log"
+ dest: "{{ stage_dir }}/verify_tempest_conf.log"
+ state: hard
+ when: tempest_log.stat.exists
+ roles:
+ - export-devstack-journal
+ - apache-logs-conf
+ - devstack-project-conf
+ # capture-system-logs should be the last role before stage-output
+ - capture-system-logs
+ - role: stage-output
+ # NOTE(andreaf) We need fetch-devstack-log-dir only as long as the base job
+ # starts pulling logs for us from {{ ansible_user_dir }}/logs.
+ # Meanwhile we already store things in ansible_user_dir and use
+ # fetch-devstack-log-dir setting devstack_base_dir
+ - role: fetch-devstack-log-dir
+ devstack_base_dir: "{{ ansible_user_dir }}"
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
new file mode 100644
index 0000000..4689a63
--- /dev/null
+++ b/playbooks/pre.yaml
@@ -0,0 +1,30 @@
+- hosts: all
+ pre_tasks:
+ - name: Gather minimum local MTU
+ set_fact:
+ local_mtu: >
+ {% set mtus = [] -%}
+ {% for interface in ansible_interfaces -%}
+ {% set interface_variable = 'ansible_' + interface -%}
+ {% if interface_variable in hostvars[inventory_hostname] -%}
+ {% set _ = mtus.append(hostvars[inventory_hostname][interface_variable]['mtu']|int) -%}
+ {% endif -%}
+ {% endfor -%}
+ {{- mtus|min -}}
+ - name: Calculate external_bridge_mtu
+ # 50 bytes is overhead for vxlan (which is greater than GRE
+ # allowing us to use either overlay option with this MTU.
+ # TODO(andreaf) This should work, but it may have to be reconcilied with
+ # the MTU setting used by the multinode setup roles in multinode pre.yaml
+ set_fact:
+ external_bridge_mtu: "{{ local_mtu | int - 50 }}"
+ roles:
+ - test-matrix
+ - configure-swap
+ - setup-stack-user
+ - setup-tempest-user
+ - setup-devstack-source-dirs
+ - setup-devstack-log-dir
+ - setup-devstack-cache
+ - start-fresh-logging
+ - write-devstack-local-conf
diff --git a/playbooks/tox/post.yaml b/playbooks/tox/post.yaml
new file mode 100644
index 0000000..7f0cb19
--- /dev/null
+++ b/playbooks/tox/post.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+ roles:
+ - fetch-tox-output
+ - fetch-subunit-output
diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml
new file mode 100644
index 0000000..d7e4670
--- /dev/null
+++ b/playbooks/tox/pre.yaml
@@ -0,0 +1,8 @@
+- hosts: all
+ roles:
+ # Run bindep and test-setup after devstack so that they won't interfere
+ - role: bindep
+ bindep_profile: test
+ bindep_dir: "{{ zuul_work_dir }}"
+ - test-setup
+ - ensure-tox
diff --git a/playbooks/tox/run-both.yaml b/playbooks/tox/run-both.yaml
new file mode 100644
index 0000000..e85c2ee
--- /dev/null
+++ b/playbooks/tox/run-both.yaml
@@ -0,0 +1,10 @@
+- hosts: all
+ roles:
+ - run-devstack
+ # Run bindep and test-setup after devstack so that they won't interfere
+ - role: bindep
+ bindep_profile: test
+ bindep_dir: "{{ zuul_work_dir }}"
+ - test-setup
+ - ensure-tox
+ - tox
diff --git a/playbooks/tox/run.yaml b/playbooks/tox/run.yaml
new file mode 100644
index 0000000..22f8209
--- /dev/null
+++ b/playbooks/tox/run.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+ roles:
+ - tox
diff --git a/playbooks/unit-tests/pre.yaml b/playbooks/unit-tests/pre.yaml
new file mode 100644
index 0000000..cfa1676
--- /dev/null
+++ b/playbooks/unit-tests/pre.yaml
@@ -0,0 +1,13 @@
+- hosts: all
+
+ tasks:
+
+ - name: Install prerequisites
+ shell:
+ chdir: '{{ zuul.project.src_dir }}'
+ executable: /bin/bash
+ cmd: |
+ set -e
+ set -x
+ echo "IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20" >> localrc
+ ./tools/install_prereqs.sh
diff --git a/playbooks/unit-tests/run.yaml b/playbooks/unit-tests/run.yaml
new file mode 100644
index 0000000..181521f
--- /dev/null
+++ b/playbooks/unit-tests/run.yaml
@@ -0,0 +1,12 @@
+- hosts: all
+
+ tasks:
+
+ - name: Run run_tests.sh
+ shell:
+ chdir: '{{ zuul.project.src_dir }}'
+ executable: /bin/bash
+ cmd: |
+ set -e
+ set -x
+ ./run_tests.sh
diff --git a/roles/apache-logs-conf/README.rst b/roles/apache-logs-conf/README.rst
new file mode 100644
index 0000000..eccee40
--- /dev/null
+++ b/roles/apache-logs-conf/README.rst
@@ -0,0 +1,12 @@
+Prepare apache configs and logs for staging
+
+Make sure apache config files and log files are available in a linux flavor
+independent location. Note that this relies on hard links, to the staging
+directory must be in the same partition where the logs and configs are.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory.
diff --git a/roles/apache-logs-conf/defaults/main.yaml b/roles/apache-logs-conf/defaults/main.yaml
new file mode 100644
index 0000000..1fb04fe
--- /dev/null
+++ b/roles/apache-logs-conf/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/apache-logs-conf/tasks/main.yaml b/roles/apache-logs-conf/tasks/main.yaml
new file mode 100644
index 0000000..bd64574
--- /dev/null
+++ b/roles/apache-logs-conf/tasks/main.yaml
@@ -0,0 +1,89 @@
+- name: Ensure {{ stage_dir }}/apache exists
+ file:
+ path: "{{ stage_dir }}/apache"
+ state: directory
+
+- name: Link apache logs on Debian/SuSE
+ block:
+ - name: Find logs
+ find:
+ path: "/var/log/apache2"
+ file_type: any
+ register: debian_suse_apache_logs
+
+ - name: Dereference files
+ stat:
+ path: "{{ item.path }}"
+ with_items: "{{ debian_suse_apache_logs.files }}"
+ register: debian_suse_apache_deref_logs
+
+ - name: Create hard links
+ file:
+ src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+ dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}"
+ state: hard
+ with_items: "{{ debian_suse_apache_deref_logs.results }}"
+ when:
+ - item.stat.isreg or item.stat.islnk
+ when: ansible_os_family in ('Debian', 'Suse')
+ no_log: true
+
+- name: Link apache logs on RedHat
+ block:
+ - name: Find logs
+ find:
+ path: "/var/log/httpd"
+ file_type: any
+ register: redhat_apache_logs
+
+ - name: Dereference files
+ stat:
+ path: "{{ item.path }}"
+ with_items: "{{ redhat_apache_logs.files }}"
+ register: redhat_apache_deref_logs
+
+ - name: Create hard links
+ file:
+ src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+ dest: "{{ stage_dir }}/apache/{{ item.stat.path | basename }}"
+ state: hard
+ with_items: "{{ redhat_apache_deref_logs.results }}"
+ when:
+ - item.stat.isreg or item.stat.islnk
+ when: ansible_os_family == 'RedHat'
+ no_log: true
+
+- name: Ensure {{ stage_dir }}/apache_config apache_config exists
+ file:
+ path: "{{ stage_dir }}/apache_config"
+ state: directory
+
+- name: Define config paths
+ set_fact:
+ apache_config_paths:
+ 'Debian': '/etc/apache2/sites-enabled/'
+ 'Suse': '/etc/apache2/conf.d/'
+ 'RedHat': '/etc/httpd/conf.d/'
+
+- name: Discover configurations
+ find:
+ path: "{{ apache_config_paths[ansible_os_family] }}"
+ file_type: any
+ register: apache_configs
+ no_log: true
+
+- name: Dereference configurations
+ stat:
+ path: "{{ item.path }}"
+ with_items: "{{ apache_configs.files }}"
+ register: apache_configs_deref
+ no_log: true
+
+- name: Link configurations
+ file:
+ src: "{{ item.stat.lnk_source | default(item.stat.path) }}"
+ dest: "{{ stage_dir }}/apache_config/{{ item.stat.path | basename }}"
+ state: hard
+ with_items: "{{ apache_configs_deref.results }}"
+ when: item.stat.isreg or item.stat.islnk
+ no_log: true
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
new file mode 100644
index 0000000..c284124
--- /dev/null
+++ b/roles/capture-system-logs/README.rst
@@ -0,0 +1,20 @@
+Stage a number of system type logs
+
+Stage a number of different logs / reports:
+- snapshot of iptables
+- disk space available
+- pip[2|3] freeze
+- installed packages (dpkg/rpm)
+- ceph, openswitch, gluster
+- coredumps
+- dns resolver
+- listen53
+- unbound.log
+- deprecation messages
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory.
diff --git a/roles/capture-system-logs/defaults/main.yaml b/roles/capture-system-logs/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/capture-system-logs/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
new file mode 100644
index 0000000..de4f8ed
--- /dev/null
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -0,0 +1,39 @@
+# TODO(andreaf) Make this into proper Ansible
+- name: Stage various logs and reports
+ shell:
+ executable: /bin/bash
+ cmd: |
+ sudo iptables-save > {{ stage_dir }}/iptables.txt
+ df -h > {{ stage_dir }}/df.txt
+
+ for py_ver in 2 3; do
+ if [[ `which python${py_ver}` ]]; then
+ python${py_ver} -m pip freeze > {{ stage_dir }}/pip${py_ver}-freeze.txt
+ fi
+ done
+
+ if [ `command -v dpkg` ]; then
+ dpkg -l> {{ stage_dir }}/dpkg-l.txt
+ fi
+ if [ `command -v rpm` ]; then
+ rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
+ fi
+
+ # gzip and save any coredumps in /var/core
+ if [ -d /var/core ]; then
+ sudo gzip -r /var/core
+ sudo cp -r /var/core {{ stage_dir }}/
+ fi
+
+ sudo ss -lntup | grep ':53' > {{ stage_dir }}/listen53.txt
+
+ # NOTE(andreaf) Service logs are already in logs/ thanks for the
+ # export-devstack-journal log. Apache logs are under apache/ thans to the
+ # apache-logs-conf role.
+ grep -i deprecat {{ stage_dir }}/logs/*.txt {{ stage_dir }}/apache/*.log | \
+ sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}\.[0-9]{1,3}/ /g' | \
+ sed -r 's/[0-9]{1,2}\:[0-9]{1,2}\:[0-9]{1,2}/ /g' | \
+ sed -r 's/[0-9]{1,4}-[0-9]{1,2}-[0-9]{1,4}/ /g' |
+ sed -r 's/\[.*\]/ /g' | \
+ sed -r 's/\s[0-9]+\s/ /g' | \
+ awk '{if ($0 in seen) {seen[$0]++} else {out[++n]=$0;seen[$0]=1}} END { for (i=1; i<=n; i++) print seen[out[i]]" :: " out[i] }' > {{ stage_dir }}/deprecations.log
diff --git a/roles/devstack-project-conf/README.rst b/roles/devstack-project-conf/README.rst
new file mode 100644
index 0000000..3f2d4c9
--- /dev/null
+++ b/roles/devstack-project-conf/README.rst
@@ -0,0 +1,11 @@
+Prepare OpenStack project configurations for staging
+
+Prepare all relevant config files for staging.
+This is helpful to avoid staging the entire /etc.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory.
diff --git a/roles/devstack-project-conf/defaults/main.yaml b/roles/devstack-project-conf/defaults/main.yaml
new file mode 100644
index 0000000..f8fb8de
--- /dev/null
+++ b/roles/devstack-project-conf/defaults/main.yaml
@@ -0,0 +1 @@
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/devstack-project-conf/tasks/main.yaml b/roles/devstack-project-conf/tasks/main.yaml
new file mode 100644
index 0000000..917cdbc
--- /dev/null
+++ b/roles/devstack-project-conf/tasks/main.yaml
@@ -0,0 +1,25 @@
+- name: Ensure {{ stage_dir }}/etc exists
+ file:
+ path: "{{ stage_dir }}/etc"
+ state: directory
+
+- name: Check which projects have a config folder
+ stat:
+ path: "/etc/{{ item.value.short_name }}"
+ with_dict: "{{ zuul.projects }}"
+ register: project_configs
+ no_log: true
+
+- name: Copy configuration files
+ command: cp -pRL {{ item.stat.path }} {{ stage_dir }}/etc/{{ item.item.value.short_name }}
+ when: item.stat.exists
+ with_items: "{{ project_configs.results }}"
+
+- name: Check if openstack has a config folder
+ stat:
+ path: "/etc/openstack"
+ register: openstack_configs
+
+- name: Copy configuration files
+ command: cp -pRL /etc/openstack {{ stage_dir }}/etc/
+ when: openstack_configs.stat.exists
diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst
new file mode 100644
index 0000000..a34e070
--- /dev/null
+++ b/roles/export-devstack-journal/README.rst
@@ -0,0 +1,21 @@
+Export journal files from devstack services
+
+Export the systemd journal for every devstack service in native
+journal format as well as text. Also, export a syslog-style file with
+kernal and sudo messages.
+
+Writes the output to the ``logs/`` subdirectory of
+``stage_dir``.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory. This is used to obtain the
+ ``log-start-timestamp.txt``, used to filter the systemd journal.
+
+.. zuul:rolevar:: stage_dir
+ :default: {{ ansible_user_dir }}
+
+ The base stage directory.
diff --git a/roles/export-devstack-journal/defaults/main.yaml b/roles/export-devstack-journal/defaults/main.yaml
new file mode 100644
index 0000000..1fb04fe
--- /dev/null
+++ b/roles/export-devstack-journal/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
new file mode 100644
index 0000000..6e760c1
--- /dev/null
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -0,0 +1,37 @@
+# NOTE(andreaf) This bypasses the stage-output role
+- name: Ensure {{ stage_dir }}/logs exists
+ become: true
+ file:
+ path: "{{ stage_dir }}/logs"
+ state: directory
+ owner: "{{ ansible_user }}"
+
+# TODO: convert this to ansible
+- name: Export journal files
+ become: true
+ shell:
+ cmd: |
+ u=""
+ name=""
+ for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do
+ name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
+ journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz
+ done
+
+ # Export the journal in export format to make it downloadable
+ # for later searching. It can then be rewritten to a journal native
+ # format locally using systemd-journal-remote. This makes a class of
+ # debugging much easier. We don't do the native conversion here as
+ # some distros do not package that tooling.
+ journalctl -u 'devstack@*' -o export | \
+ xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
+
+ # The journal contains everything running under systemd, we'll
+ # build an old school version of the syslog with just the
+ # kernel and sudo messages.
+ journalctl \
+ -t kernel \
+ -t sudo \
+ --no-pager \
+ --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
+ | gzip - > {{ stage_dir }}/logs/syslog.txt.gz
diff --git a/roles/fetch-devstack-log-dir/README.rst b/roles/fetch-devstack-log-dir/README.rst
new file mode 100644
index 0000000..360a2e3
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/README.rst
@@ -0,0 +1,10 @@
+Fetch content from the devstack log directory
+
+Copy logs from every host back to the zuul executor.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/fetch-devstack-log-dir/defaults/main.yaml b/roles/fetch-devstack-log-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
new file mode 100644
index 0000000..5a198b2
--- /dev/null
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Collect devstack logs
+ synchronize:
+ dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
+ mode: pull
+ src: "{{ devstack_base_dir }}/logs"
diff --git a/roles/orchestrate-devstack/README.rst b/roles/orchestrate-devstack/README.rst
new file mode 100644
index 0000000..097dcea
--- /dev/null
+++ b/roles/orchestrate-devstack/README.rst
@@ -0,0 +1,25 @@
+Orchestrate a devstack
+
+Runs devstack in a multinode scenario, with one controller node
+and a group of subnodes.
+
+The reason for this role is so that jobs in other repository may
+run devstack in their plays with no need for re-implementing the
+orchestration logic.
+
+The "run-devstack" role is available to run devstack with no
+orchestration.
+
+This role sets up the controller and CA first, it then pushes CA
+data to sub-nodes and run devstack there. The only requirement for
+this role is for the controller inventory_hostname to be "controller"
+and for all sub-nodes to be defined in a group called "subnode".
+
+This role needs to be invoked from a playbook that uses a "linear" strategy.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/orchestrate-devstack/defaults/main.yaml b/roles/orchestrate-devstack/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/orchestrate-devstack/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/orchestrate-devstack/tasks/main.yaml b/roles/orchestrate-devstack/tasks/main.yaml
new file mode 100644
index 0000000..12db58c
--- /dev/null
+++ b/roles/orchestrate-devstack/tasks/main.yaml
@@ -0,0 +1,38 @@
+- name: Run devstack on the controller
+ include_role:
+ name: run-devstack
+ when: inventory_hostname == 'controller'
+
+- name: Setup devstack on sub-nodes
+ block:
+
+ - name: Sync CA data to subnodes (when any)
+ # Only do this if the tls-proxy service is defined and enabled
+ include_role:
+ name: sync-devstack-data
+ when: devstack_services['tls-proxy']|default(false)
+
+ - name: Run devstack on the sub-nodes
+ include_role:
+ name: run-devstack
+ when: inventory_hostname in groups['subnode']
+
+ - name: Discover hosts
+ # Discovers compute nodes (subnodes) and maps them to cells. Only run
+ # on the controller node.
+ # NOTE(mriedem): We want to remove this if/when nova supports
+ # auto-registration of computes with cells, but that's not happening in
+ # Ocata.
+ # NOTE(andreaf) This is taken (NOTE included) from the discover_hosts
+ # function in devstack gate. Since this is now in devstack, which is
+ # branched, we know that the discover_hosts tool exists.
+ become: true
+ become_user: stack
+ shell: ./tools/discover_hosts.sh
+ args:
+ chdir: "{{ devstack_base_dir }}/devstack"
+ when: inventory_hostname == 'controller'
+
+ when:
+ - '"controller" in hostvars'
+ - '"subnode" in groups'
diff --git a/roles/run-devstack/README.rst b/roles/run-devstack/README.rst
new file mode 100644
index 0000000..d77eb15
--- /dev/null
+++ b/roles/run-devstack/README.rst
@@ -0,0 +1,8 @@
+Run devstack
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/run-devstack/defaults/main.yaml b/roles/run-devstack/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/run-devstack/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/run-devstack/tasks/main.yaml b/roles/run-devstack/tasks/main.yaml
new file mode 100644
index 0000000..f58b31d
--- /dev/null
+++ b/roles/run-devstack/tasks/main.yaml
@@ -0,0 +1,11 @@
+- name: Run devstack
+ shell:
+ cmd: |
+ ./stack.sh 2>&1
+ rc=$?
+ echo "*** FINISHED ***"
+ exit $rc
+ args:
+ chdir: "{{devstack_base_dir}}/devstack"
+ become: true
+ become_user: stack
diff --git a/roles/setup-devstack-cache/README.rst b/roles/setup-devstack-cache/README.rst
new file mode 100644
index 0000000..b8938c3
--- /dev/null
+++ b/roles/setup-devstack-cache/README.rst
@@ -0,0 +1,15 @@
+Set up the devstack cache directory
+
+If the node has a cache of devstack image files, copy it into place.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: devstack_cache_dir
+ :default: /opt/cache
+
+ The directory with the cached files.
diff --git a/roles/setup-devstack-cache/defaults/main.yaml b/roles/setup-devstack-cache/defaults/main.yaml
new file mode 100644
index 0000000..c56720b
--- /dev/null
+++ b/roles/setup-devstack-cache/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_cache_dir: /opt/cache
diff --git a/roles/setup-devstack-cache/tasks/main.yaml b/roles/setup-devstack-cache/tasks/main.yaml
new file mode 100644
index 0000000..84f33f0
--- /dev/null
+++ b/roles/setup-devstack-cache/tasks/main.yaml
@@ -0,0 +1,14 @@
+- name: Copy cached devstack files
+ # This uses hard links to avoid using extra space.
+ command: "find {{ devstack_cache_dir }}/files -mindepth 1 -maxdepth 1 -exec cp -l {} {{ devstack_base_dir }}/devstack/files/ ;"
+ become: true
+
+- name: Set ownership of cached files
+ file:
+ path: '{{ devstack_base_dir }}/devstack/files'
+ state: directory
+ recurse: true
+ owner: stack
+ group: stack
+ mode: a+r
+ become: yes
diff --git a/roles/setup-devstack-log-dir/README.rst b/roles/setup-devstack-log-dir/README.rst
new file mode 100644
index 0000000..9d8dba3
--- /dev/null
+++ b/roles/setup-devstack-log-dir/README.rst
@@ -0,0 +1,11 @@
+Set up the devstack log directory
+
+Create a log directory on the ephemeral disk partition to save space
+on the root device.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/setup-devstack-log-dir/defaults/main.yaml b/roles/setup-devstack-log-dir/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-devstack-log-dir/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml
new file mode 100644
index 0000000..b9f38df
--- /dev/null
+++ b/roles/setup-devstack-log-dir/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Create logs directory
+ file:
+ path: '{{ devstack_base_dir }}/logs'
+ state: directory
+ become: yes
diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst
new file mode 100644
index 0000000..4ebf839
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/README.rst
@@ -0,0 +1,11 @@
+Set up the devstack source directories
+
+Ensure that the base directory exists, and then move the source repos
+into it.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/setup-devstack-source-dirs/defaults/main.yaml b/roles/setup-devstack-source-dirs/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
new file mode 100644
index 0000000..e6bbae2
--- /dev/null
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -0,0 +1,22 @@
+- name: Find all source repos used by this job
+ find:
+ paths:
+ - src/git.openstack.org/openstack
+ - src/git.openstack.org/openstack-dev
+ - src/git.openstack.org/openstack-infra
+ file_type: directory
+ register: found_repos
+
+- name: Copy Zuul repos into devstack working directory
+ command: rsync -a {{ item.path }} {{ devstack_base_dir }}
+ with_items: '{{ found_repos.files }}'
+ become: yes
+
+- name: Set ownership of repos
+ file:
+ path: '{{ devstack_base_dir }}'
+ state: directory
+ recurse: true
+ owner: stack
+ group: stack
+ become: yes
diff --git a/roles/setup-stack-user/README.rst b/roles/setup-stack-user/README.rst
new file mode 100644
index 0000000..80c4d39
--- /dev/null
+++ b/roles/setup-stack-user/README.rst
@@ -0,0 +1,16 @@
+Set up the `stack` user
+
+Create the stack user, set up its home directory, and allow it to
+sudo.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: devstack_stack_home_dir
+ :default: {{ devstack_base_dir }}
+
+ The home directory for the stack user.
diff --git a/roles/setup-stack-user/defaults/main.yaml b/roles/setup-stack-user/defaults/main.yaml
new file mode 100644
index 0000000..6d0be66
--- /dev/null
+++ b/roles/setup-stack-user/defaults/main.yaml
@@ -0,0 +1,2 @@
+devstack_base_dir: /opt/stack
+devstack_stack_home_dir: '{{ devstack_base_dir }}'
diff --git a/roles/setup-stack-user/files/50_stack_sh b/roles/setup-stack-user/files/50_stack_sh
new file mode 100644
index 0000000..4c6b46b
--- /dev/null
+++ b/roles/setup-stack-user/files/50_stack_sh
@@ -0,0 +1 @@
+stack ALL=(root) NOPASSWD:ALL
diff --git a/roles/setup-stack-user/tasks/main.yaml b/roles/setup-stack-user/tasks/main.yaml
new file mode 100644
index 0000000..0fc7c2d
--- /dev/null
+++ b/roles/setup-stack-user/tasks/main.yaml
@@ -0,0 +1,47 @@
+- name: Create stack group
+ group:
+ name: stack
+ become: yes
+
+# NOTE(andreaf) Create a user home_dir is not safe via
+# the user module since it will fail if the containing
+# folder does not exists. If the folder does exists and
+# it's empty, the skeleton is setup and ownership set.
+- name: Create the stack user home folder
+ file:
+ path: '{{ devstack_stack_home_dir }}'
+ state: directory
+ become: yes
+
+- name: Create stack user
+ user:
+ name: stack
+ shell: /bin/bash
+ home: '{{ devstack_stack_home_dir }}'
+ group: stack
+ become: yes
+
+- name: Set stack user home directory permissions and ownership
+ file:
+ path: '{{ devstack_stack_home_dir }}'
+ mode: 0755
+ owner: stack
+ group: stack
+ become: yes
+
+- name: Copy 50_stack_sh file to /etc/sudoers.d
+ copy:
+ src: 50_stack_sh
+ dest: /etc/sudoers.d
+ mode: 0440
+ owner: root
+ group: root
+ become: yes
+
+- name: Create .cache folder within BASE
+ file:
+ path: '{{ devstack_stack_home_dir }}/.cache'
+ state: directory
+ owner: stack
+ group: stack
+ become: yes
diff --git a/roles/setup-tempest-user/README.rst b/roles/setup-tempest-user/README.rst
new file mode 100644
index 0000000..bb29c50
--- /dev/null
+++ b/roles/setup-tempest-user/README.rst
@@ -0,0 +1,10 @@
+Set up the `tempest` user
+
+Create the tempest user and allow it to sudo.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/setup-tempest-user/files/51_tempest_sh b/roles/setup-tempest-user/files/51_tempest_sh
new file mode 100644
index 0000000..f88ff9f
--- /dev/null
+++ b/roles/setup-tempest-user/files/51_tempest_sh
@@ -0,0 +1,3 @@
+tempest ALL=(root) NOPASSWD:/sbin/ip
+tempest ALL=(root) NOPASSWD:/sbin/iptables
+tempest ALL=(root) NOPASSWD:/usr/bin/ovsdb-client
diff --git a/roles/setup-tempest-user/tasks/main.yaml b/roles/setup-tempest-user/tasks/main.yaml
new file mode 100644
index 0000000..892eaf6
--- /dev/null
+++ b/roles/setup-tempest-user/tasks/main.yaml
@@ -0,0 +1,20 @@
+- name: Create tempest group
+ group:
+ name: tempest
+ become: yes
+
+- name: Create tempest user
+ user:
+ name: tempest
+ shell: /bin/bash
+ group: tempest
+ become: yes
+
+- name: Copy 51_tempest_sh to /etc/sudoers.d
+ copy:
+ src: 51_tempest_sh
+ dest: /etc/sudoers.d
+ owner: root
+ group: root
+ mode: 0440
+ become: yes
diff --git a/roles/start-fresh-logging/README.rst b/roles/start-fresh-logging/README.rst
new file mode 100644
index 0000000..11b029e
--- /dev/null
+++ b/roles/start-fresh-logging/README.rst
@@ -0,0 +1,11 @@
+Restart logging on all hosts
+
+Restart syslog so that the system logs only include output from the
+job.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/start-fresh-logging/defaults/main.yaml b/roles/start-fresh-logging/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/start-fresh-logging/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/start-fresh-logging/tasks/main.yaml b/roles/start-fresh-logging/tasks/main.yaml
new file mode 100644
index 0000000..6c7ba66
--- /dev/null
+++ b/roles/start-fresh-logging/tasks/main.yaml
@@ -0,0 +1,56 @@
+- name: Check for /bin/journalctl file
+ command: which journalctl
+ changed_when: False
+ failed_when: False
+ register: which_out
+
+- block:
+ - name: Get current date
+ command: date +"%Y-%m-%d %H:%M:%S"
+ register: date_out
+
+ - name: Copy current date to log-start-timestamp.txt
+ copy:
+ dest: "{{ devstack_base_dir }}/log-start-timestamp.txt"
+ content: "{{ date_out.stdout }}"
+ when: which_out.rc == 0
+ become: yes
+
+- block:
+ - name: Stop rsyslog
+ service: name=rsyslog state=stopped
+
+ - name: Save syslog file prior to devstack run
+ command: mv /var/log/syslog /var/log/syslog-pre-devstack
+
+ - name: Save kern.log file prior to devstack run
+ command: mv /var/log/kern.log /var/log/kern_log-pre-devstack
+
+ - name: Recreate syslog file
+ file: name=/var/log/syslog state=touch
+
+ - name: Recreate syslog file owner and group
+ command: chown /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+ - name: Recreate syslog file permissions
+ command: chmod /var/log/syslog --ref /var/log/syslog-pre-devstack
+
+ - name: Add read permissions to all on syslog file
+ file: name=/var/log/syslog mode=a+r
+
+ - name: Recreate kern.log file
+ file: name=/var/log/kern.log state=touch
+
+ - name: Recreate kern.log file owner and group
+ command: chown /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+ - name: Recreate kern.log file permissions
+ command: chmod /var/log/kern.log --ref /var/log/kern_log-pre-devstack
+
+ - name: Add read permissions to all on kern.log file
+ file: name=/var/log/kern.log mode=a+r
+
+ - name: Start rsyslog
+ service: name=rsyslog state=started
+ when: which_out.rc == 1
+ become: yes
diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst
new file mode 100644
index 0000000..500e8cc
--- /dev/null
+++ b/roles/sync-devstack-data/README.rst
@@ -0,0 +1,12 @@
+Sync devstack data for multinode configurations
+
+Sync any data files which include certificates to be used if TLS is enabled.
+This role must be executed on the controller and it pushes data to all
+subnodes.
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/sync-devstack-data/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml
new file mode 100644
index 0000000..4600015
--- /dev/null
+++ b/roles/sync-devstack-data/tasks/main.yaml
@@ -0,0 +1,48 @@
+- name: Ensure the data folder exists
+ become: true
+ file:
+ path: "{{ devstack_base_dir }}/data"
+ state: directory
+ owner: stack
+ group: stack
+ mode: 0755
+ when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Ensure the CA folder exists
+ become: true
+ file:
+ path: "{{ devstack_base_dir }}/data/CA"
+ state: directory
+ owner: stack
+ group: stack
+ mode: 0755
+ when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Pull the CA certificate and folder
+ become: true
+ synchronize:
+ src: "{{ item }}"
+ dest: "{{ zuul.executor.work_root }}/{{ item | basename }}"
+ mode: pull
+ with_items:
+ - "{{ devstack_base_dir }}/data/ca-bundle.pem"
+ - "{{ devstack_base_dir }}/data/CA"
+ when: inventory_hostname == 'controller'
+
+- name: Push the CA certificate
+ become: true
+ become_user: stack
+ synchronize:
+ src: "{{ zuul.executor.work_root }}/ca-bundle.pem"
+ dest: "{{ devstack_base_dir }}/data/ca-bundle.pem"
+ mode: push
+ when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Push the CA folder
+ become: true
+ become_user: stack
+ synchronize:
+ src: "{{ zuul.executor.work_root }}/CA/"
+ dest: "{{ devstack_base_dir }}/data/"
+ mode: push
+ when: 'inventory_hostname in groups["subnode"]|default([])'
diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst
new file mode 100644
index 0000000..73f9f0d
--- /dev/null
+++ b/roles/write-devstack-local-conf/README.rst
@@ -0,0 +1,77 @@
+Write the local.conf file for use by devstack
+
+**Role Variables**
+
+.. zuul:rolevar:: devstack_base_dir
+ :default: /opt/stack
+
+ The devstack base directory.
+
+.. zuul:rolevar:: devstack_local_conf_path
+ :default: {{ devstack_base_dir }}/devstack/local.conf
+
+ The path of the local.conf file.
+
+.. zuul:rolevar:: devstack_localrc
+ :type: dict
+
+ A dictionary of variables that should be written to the localrc
+ section of local.conf. The values (which are strings) may contain
+ bash shell variables, and will be ordered so that variables used by
+ later entries appear first.
+
+.. zuul:rolevar:: devstack_local_conf
+ :type: dict
+
+ A complex argument consisting of nested dictionaries which combine
+ to form the meta-sections of the local_conf file. The top level is
+ a dictionary of phases, followed by dictionaries of filenames, then
+ sections, which finally contain key-value pairs for the INI file
+ entries in those sections.
+
+ The keys in this dictionary are the devstack phases.
+
+ .. zuul:rolevar:: [phase]
+ :type: dict
+
+ The keys in this dictionary are the filenames for this phase.
+
+ .. zuul:rolevar:: [filename]
+ :type: dict
+
+ The keys in this dictionary are the INI sections in this file.
+
+ .. zuul:rolevar:: [section]
+ :type: dict
+
+ This is a dictionary of key-value pairs which comprise
+ this section of the INI file.
+
+.. zuul:rolevar:: devstack_base_services
+ :type: list
+ :default: {{ base_services | default(omit) }}
+
+ A list of base services which are enabled. Services can be added or removed
+ from this list via the ``devstack_services`` variable. This is ignored if
+ ``base`` is set to ``False`` in ``devstack_services``.
+
+.. zuul:rolevar:: devstack_services
+ :type: dict
+
+ A dictionary mapping service names to boolean values. If the
+ boolean value is ``false``, a ``disable_service`` line will be
+ emitted for the service name. If it is ``true``, then
+ ``enable_service`` will be emitted. All other values are ignored.
+
+ The special key ``base`` can be used to enable or disable the base set of
+ services enabled by default. If ``base`` is found, it will processed before
+ all other keys. If its value is ``False`` a ``disable_all_services`` will be
+ emitted; if its value is ``True`` services from ``devstack_base_services``
+ will be emitted via ``ENABLED_SERVICES``.
+
+.. zuul:rolevar:: devstack_plugins
+ :type: dict
+
+ A dictionary mapping a plugin name to a git repo location. If the
+ location is a non-empty string, then an ``enable_plugin`` line will
+ be emmitted for the plugin name.
diff --git a/roles/write-devstack-local-conf/defaults/main.yaml b/roles/write-devstack-local-conf/defaults/main.yaml
new file mode 100644
index 0000000..7bc1dec
--- /dev/null
+++ b/roles/write-devstack-local-conf/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_local_conf_path: "{{ devstack_base_dir }}/devstack/local.conf"
+devstack_base_services: "{{ enabled_services | default(omit) }}"
diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py
new file mode 100644
index 0000000..746f54f
--- /dev/null
+++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+
+
+class DependencyGraph(object):
+ # This is based on the JobGraph from Zuul.
+
+ def __init__(self):
+ self._names = set()
+ self._dependencies = {} # dependent_name -> set(parent_names)
+
+ def add(self, name, dependencies):
+ # Append the dependency information
+ self._dependencies.setdefault(name, set())
+ try:
+ for dependency in dependencies:
+ # Make sure a circular dependency is never created
+ ancestors = self._getParentNamesRecursively(
+ dependency, soft=True)
+ ancestors.add(dependency)
+ if name in ancestors:
+ raise Exception("Dependency cycle detected in {}".
+ format(name))
+ self._dependencies[name].add(dependency)
+ except Exception:
+ del self._dependencies[name]
+ raise
+
+ def getDependenciesRecursively(self, parent):
+ dependencies = []
+
+ current_dependencies = self._dependencies[parent]
+ for current in current_dependencies:
+ if current not in dependencies:
+ dependencies.append(current)
+ for dep in self.getDependenciesRecursively(current):
+ if dep not in dependencies:
+ dependencies.append(dep)
+ return dependencies
+
+ def _getParentNamesRecursively(self, dependent, soft=False):
+ all_parent_items = set()
+ items_to_iterate = set([dependent])
+ while len(items_to_iterate) > 0:
+ current_item = items_to_iterate.pop()
+ current_parent_items = self._dependencies.get(current_item)
+ if current_parent_items is None:
+ if soft:
+ current_parent_items = set()
+ else:
+ raise Exception("Dependent item {} not found: ".format(
+ dependent))
+ new_parent_items = current_parent_items - all_parent_items
+ items_to_iterate |= new_parent_items
+ all_parent_items |= new_parent_items
+ return all_parent_items
+
+
+class VarGraph(DependencyGraph):
+ def __init__(self, vars):
+ super(VarGraph, self).__init__()
+ self.vars = {}
+ self._varnames = set()
+ for k, v in vars.items():
+ self._varnames.add(k)
+ for k, v in vars.items():
+ self._addVar(k, str(v))
+
+ bash_var_re = re.compile(r'\$\{?(\w+)')
+ def getDependencies(self, value):
+ return self.bash_var_re.findall(value)
+
+ def _addVar(self, key, value):
+ if key in self.vars:
+ raise Exception("Variable {} already added".format(key))
+ self.vars[key] = value
+ # Append the dependency information
+ dependencies = set()
+ for dependency in self.getDependencies(value):
+ if dependency == key:
+ # A variable is allowed to reference itself; no
+ # dependency link needed in that case.
+ continue
+ if dependency not in self._varnames:
+ # It's not necessary to create a link for an
+ # external variable.
+ continue
+ dependencies.add(dependency)
+ try:
+ self.add(key, dependencies)
+ except Exception:
+ del self.vars[key]
+ raise
+
+ def getVars(self):
+ ret = []
+ keys = sorted(self.vars.keys())
+ seen = set()
+ for key in keys:
+ dependencies = self.getDependenciesRecursively(key)
+ for var in dependencies + [key]:
+ if var not in seen:
+ ret.append((var, self.vars[var]))
+ seen.add(var)
+ return ret
+
+
+class PluginGraph(DependencyGraph):
+ def __init__(self, base_dir, plugins):
+ super(PluginGraph, self).__init__()
+ # The dependency trees expressed by all the plugins we found
+ # (which may be more than those the job is using).
+ self._plugin_dependencies = {}
+ self.loadPluginNames(base_dir)
+
+ self.plugins = {}
+ self._pluginnames = set()
+ for k, v in plugins.items():
+ self._pluginnames.add(k)
+ for k, v in plugins.items():
+ self._addPlugin(k, str(v))
+
+ def loadPluginNames(self, base_dir):
+ if base_dir is None:
+ return
+ git_roots = []
+ for root, dirs, files in os.walk(base_dir):
+ if '.git' not in dirs:
+ continue
+ # Don't go deeper than git roots
+ dirs[:] = []
+ git_roots.append(root)
+ for root in git_roots:
+ devstack = os.path.join(root, 'devstack')
+ if not (os.path.exists(devstack) and os.path.isdir(devstack)):
+ continue
+ settings = os.path.join(devstack, 'settings')
+ if not (os.path.exists(settings) and os.path.isfile(settings)):
+ continue
+ self.loadDevstackPluginInfo(settings)
+
+ define_re = re.compile(r'^define_plugin\s+(\w+).*')
+ require_re = re.compile(r'^plugin_requires\s+(\w+)\s+(\w+).*')
+ def loadDevstackPluginInfo(self, fn):
+ name = None
+ reqs = set()
+ with open(fn) as f:
+ for line in f:
+ m = self.define_re.match(line)
+ if m:
+ name = m.group(1)
+ m = self.require_re.match(line)
+ if m:
+ if name == m.group(1):
+ reqs.add(m.group(2))
+ if name and reqs:
+ self._plugin_dependencies[name] = reqs
+
+ def getDependencies(self, value):
+ return self._plugin_dependencies.get(value, [])
+
+ def _addPlugin(self, key, value):
+ if key in self.plugins:
+ raise Exception("Plugin {} already added".format(key))
+ self.plugins[key] = value
+ # Append the dependency information
+ dependencies = set()
+ for dependency in self.getDependencies(key):
+ if dependency == key:
+ continue
+ dependencies.add(dependency)
+ try:
+ self.add(key, dependencies)
+ except Exception:
+ del self.plugins[key]
+ raise
+
+ def getPlugins(self):
+ ret = []
+ keys = sorted(self.plugins.keys())
+ seen = set()
+ for key in keys:
+ dependencies = self.getDependenciesRecursively(key)
+ for plugin in dependencies + [key]:
+ if plugin not in seen:
+ ret.append((plugin, self.plugins[plugin]))
+ seen.add(plugin)
+ return ret
+
+
+class LocalConf(object):
+
+ def __init__(self, localrc, localconf, base_services, services, plugins,
+ base_dir):
+ self.localrc = []
+ self.meta_sections = {}
+ self.plugin_deps = {}
+ self.base_dir = base_dir
+ if plugins:
+ self.handle_plugins(plugins)
+ if services or base_services:
+ self.handle_services(base_services, services or {})
+ if localrc:
+ self.handle_localrc(localrc)
+ if localconf:
+ self.handle_localconf(localconf)
+
+ def handle_plugins(self, plugins):
+ pg = PluginGraph(self.base_dir, plugins)
+ for k, v in pg.getPlugins():
+ if v:
+ self.localrc.append('enable_plugin {} {}'.format(k, v))
+
+ def handle_services(self, base_services, services):
+ enable_base_services = services.pop('base', True)
+ if enable_base_services and base_services:
+ self.localrc.append('ENABLED_SERVICES={}'.format(
+ ",".join(base_services)))
+ else:
+ self.localrc.append('disable_all_services')
+ for k, v in services.items():
+ if v is False:
+ self.localrc.append('disable_service {}'.format(k))
+ elif v is True:
+ self.localrc.append('enable_service {}'.format(k))
+
+ def handle_localrc(self, localrc):
+ vg = VarGraph(localrc)
+ for k, v in vg.getVars():
+ self.localrc.append('{}={}'.format(k, v))
+
+ def handle_localconf(self, localconf):
+ for phase, phase_data in localconf.items():
+ for fn, fn_data in phase_data.items():
+ ms_name = '[[{}|{}]]'.format(phase, fn)
+ ms_data = []
+ for section, section_data in fn_data.items():
+ ms_data.append('[{}]'.format(section))
+ for k, v in section_data.items():
+ ms_data.append('{} = {}'.format(k, v))
+ ms_data.append('')
+ self.meta_sections[ms_name] = ms_data
+
+ def write(self, path):
+ with open(path, 'w') as f:
+ f.write('[[local|localrc]]\n')
+ f.write('\n'.join(self.localrc))
+ f.write('\n\n')
+ for section, lines in self.meta_sections.items():
+ f.write('{}\n'.format(section))
+ f.write('\n'.join(lines))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ plugins=dict(type='dict'),
+ base_services=dict(type='list'),
+ services=dict(type='dict'),
+ localrc=dict(type='dict'),
+ local_conf=dict(type='dict'),
+ base_dir=dict(type='path'),
+ path=dict(type='str'),
+ )
+ )
+
+ p = module.params
+ lc = LocalConf(p.get('localrc'),
+ p.get('local_conf'),
+ p.get('base_services'),
+ p.get('services'),
+ p.get('plugins'),
+ p.get('base_dir'))
+ lc.write(p['path'])
+
+ module.exit_json()
+
+
+try:
+ from ansible.module_utils.basic import * # noqa
+ from ansible.module_utils.basic import AnsibleModule
+except ImportError:
+ pass
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py
new file mode 100644
index 0000000..843ca6e
--- /dev/null
+++ b/roles/write-devstack-local-conf/library/test.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from devstack_local_conf import LocalConf
+from collections import OrderedDict
+
+class TestDevstackLocalConf(unittest.TestCase):
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.tmpdir)
+
+ def test_plugins(self):
+ "Test that plugins without dependencies work"
+ localrc = {'test_localrc': '1'}
+ local_conf = {'install':
+ {'nova.conf':
+ {'main':
+ {'test_conf': '2'}}}}
+ services = {'cinder': True}
+ # We use ordereddict here to make sure the plugins are in the
+ # *wrong* order for testing.
+ plugins = OrderedDict([
+ ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
+ ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+ ('baz', 'git://git.openstack.org/openstack/baz-plugin'),
+ ])
+ p = dict(localrc=localrc,
+ local_conf=local_conf,
+ base_services=[],
+ services=services,
+ plugins=plugins,
+ base_dir='./test',
+ path=os.path.join(self.tmpdir, 'test.local.conf'))
+ lc = LocalConf(p.get('localrc'),
+ p.get('local_conf'),
+ p.get('base_services'),
+ p.get('services'),
+ p.get('plugins'),
+ p.get('base_dir'))
+ lc.write(p['path'])
+
+ plugins = []
+ with open(p['path']) as f:
+ for line in f:
+ if line.startswith('enable_plugin'):
+ plugins.append(line.split()[1])
+ self.assertEqual(['bar', 'baz', 'foo'], plugins)
+
+ def test_plugin_deps(self):
+ "Test that plugins with dependencies work"
+ os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
+ os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
+ os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
+ os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
+ with open(os.path.join(
+ self.tmpdir,
+ 'foo-plugin', 'devstack', 'settings'), 'w') as f:
+ f.write('define_plugin foo\n')
+ with open(os.path.join(
+ self.tmpdir,
+ 'bar-plugin', 'devstack', 'settings'), 'w') as f:
+ f.write('define_plugin bar\n')
+ f.write('plugin_requires bar foo\n')
+
+ localrc = {'test_localrc': '1'}
+ local_conf = {'install':
+ {'nova.conf':
+ {'main':
+ {'test_conf': '2'}}}}
+ services = {'cinder': True}
+ # We use ordereddict here to make sure the plugins are in the
+ # *wrong* order for testing.
+ plugins = OrderedDict([
+ ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
+ ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+ ])
+ p = dict(localrc=localrc,
+ local_conf=local_conf,
+ base_services=[],
+ services=services,
+ plugins=plugins,
+ base_dir=self.tmpdir,
+ path=os.path.join(self.tmpdir, 'test.local.conf'))
+ lc = LocalConf(p.get('localrc'),
+ p.get('local_conf'),
+ p.get('base_services'),
+ p.get('services'),
+ p.get('plugins'),
+ p.get('base_dir'))
+ lc.write(p['path'])
+
+ plugins = []
+ with open(p['path']) as f:
+ for line in f:
+ if line.startswith('enable_plugin'):
+ plugins.append(line.split()[1])
+ self.assertEqual(['foo', 'bar'], plugins)
+
+ def test_plugin_circular_deps(self):
+ "Test that plugins with circular dependencies fail"
+ os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', 'devstack'))
+ os.makedirs(os.path.join(self.tmpdir, 'foo-plugin', '.git'))
+ os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', 'devstack'))
+ os.makedirs(os.path.join(self.tmpdir, 'bar-plugin', '.git'))
+ with open(os.path.join(
+ self.tmpdir,
+ 'foo-plugin', 'devstack', 'settings'), 'w') as f:
+ f.write('define_plugin foo\n')
+ f.write('plugin_requires foo bar\n')
+ with open(os.path.join(
+ self.tmpdir,
+ 'bar-plugin', 'devstack', 'settings'), 'w') as f:
+ f.write('define_plugin bar\n')
+ f.write('plugin_requires bar foo\n')
+
+ localrc = {'test_localrc': '1'}
+ local_conf = {'install':
+ {'nova.conf':
+ {'main':
+ {'test_conf': '2'}}}}
+ services = {'cinder': True}
+ # We use ordereddict here to make sure the plugins are in the
+ # *wrong* order for testing.
+ plugins = OrderedDict([
+ ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
+ ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+ ])
+ p = dict(localrc=localrc,
+ local_conf=local_conf,
+ base_services=[],
+ services=services,
+ plugins=plugins,
+ base_dir=self.tmpdir,
+ path=os.path.join(self.tmpdir, 'test.local.conf'))
+ with self.assertRaises(Exception):
+ lc = LocalConf(p.get('localrc'),
+ p.get('local_conf'),
+ p.get('base_services'),
+ p.get('services'),
+ p.get('plugins'),
+ p.get('base_dir'))
+ lc.write(p['path'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml
new file mode 100644
index 0000000..2a9f898
--- /dev/null
+++ b/roles/write-devstack-local-conf/tasks/main.yaml
@@ -0,0 +1,11 @@
+- name: Write a job-specific local_conf file
+ become: true
+ become_user: stack
+ devstack_local_conf:
+ path: "{{ devstack_local_conf_path }}"
+ plugins: "{{ devstack_plugins|default(omit) }}"
+ base_services: "{{ devstack_base_services|default(omit) }}"
+ services: "{{ devstack_services|default(omit) }}"
+ localrc: "{{ devstack_localrc|default(omit) }}"
+ local_conf: "{{ devstack_local_conf|default(omit) }}"
+ base_dir: "{{ devstack_base_dir|default(omit) }}"
diff --git a/samples/local.conf b/samples/local.conf
index 6d5351f..8b76137 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -10,7 +10,7 @@
# This is a collection of some of the settings we have found to be useful
# in our DevStack development environments. Additional settings are described
-# in http://docs.openstack.org/developer/devstack/configuration.html#local-conf
+# in https://docs.openstack.org/devstack/latest/configuration.html#local-conf
# These should be considered as samples and are unsupported DevStack code.
# The ``localrc`` section replaces the old ``localrc`` configuration file.
diff --git a/setup.cfg b/setup.cfg
index 73d22b5..fcd2b13 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
-home-page = http://docs.openstack.org/developer/devstack
+home-page = https://docs.openstack.org/devstack/latest
classifier =
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
diff --git a/stack.sh b/stack.sh
index 2be4528..ce6e6fe 100755
--- a/stack.sh
+++ b/stack.sh
@@ -30,9 +30,9 @@
# NOTE(sdague): why do we explicitly set locale when running stack.sh?
#
# Devstack is written in bash, and many functions used throughout
-# devstack process text comming off a command (like the ip command)
+# devstack process text coming off a command (like the ip command)
# and do transforms using grep, sed, cut, awk on the strings that are
-# returned. Many of these programs are interationalized, which is
+# returned. Many of these programs are internationalized, which is
# great for end users, but means that the strings that devstack
# functions depend upon might not be there in other locales. We thus
# need to pin the world to an english basis during the runs.
@@ -216,25 +216,18 @@
fi
source $TOP_DIR/stackrc
+# write /etc/devstack-version
+write_devstack_version
+
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|yakkety|zesty|stretch|jessie|f24|f25|opensuse-42.2|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f25|f26|f27|opensuse-42.3|opensuse-tumbleweed|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
fi
fi
-# Check to see if we are already running DevStack
-# Note that this may fail if USE_SCREEN=False
-if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then
- echo "You are already running a stack.sh session."
- echo "To rejoin this session type 'screen -x stack'."
- echo "To destroy this session, type './unstack.sh'."
- exit 1
-fi
-
-
# Local Settings
# --------------
@@ -289,7 +282,7 @@
# Some distros need to add repos beyond the defaults provided by the vendor
# to pick up required packages.
-function _install_epel_and_rdo {
+function _install_epel {
# NOTE: We always remove and install latest -- some environments
# use snapshot images, and if EPEL version updates they break
# unless we update them to latest version.
@@ -320,13 +313,28 @@
yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue"
sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
+}
- # ... and also optional to be enabled
+function _install_rdo {
+ # There are multiple options for this, including using CloudSIG
+ # repositories (centos-release-*), trunk versions, etc. Since
+ # we're not interested in the actual openstack distributions
+ # (since we're using git to run!) but only peripherial packages
+ # like kvm or ovs, this has been reliable.
+
+ # TODO(ianw): figure out how to best mirror -- probably use infra
+ # mirror RDO reverse proxy. We could either have test
+ # infrastructure set it up disabled like EPEL, or fiddle it here.
+ # Per the point above, it's a bunch of repos so starts getting a
+ # little messy...
+ if ! is_package_installed rdo-release ; then
+ yum_install https://rdoproject.org/repos/rdo-release.rpm
+ fi
+
+ # Also enable optional for RHEL7 proper. Note this is a silent
+ # no-op on other platforms.
sudo yum-config-manager --enable rhel-7-server-optional-rpms
- # install the lastest RDO
- is_package_installed rdo-release || yum_install https://rdoproject.org/repos/rdo-release.rpm
-
if is_oraclelinux; then
sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
fi
@@ -360,7 +368,7 @@
# Certain services such as rabbitmq require that the local hostname resolves
# correctly. Make sure it exists in /etc/hosts so that is always true.
LOCAL_HOSTNAME=`hostname -s`
-if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then
+if ! fgrep -qwe "$LOCAL_HOSTNAME" /etc/hosts; then
sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
fi
@@ -369,20 +377,22 @@
# to speed things up
SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
-# If we have /etc/nodepool/provider assume we're on a OpenStack CI
-# node, where EPEL is already pointing at our internal mirror and RDO
-# is pre-installed.
-if [[ -f /etc/nodepool/provider ]]; then
- SKIP_EPEL_INSTALL=True
- if is_fedora; then
- # However, EPEL is not enabled by default.
+if [[ $DISTRO == "rhel7" ]]; then
+ # If we have /etc/ci/mirror_info.sh assume we're on a OpenStack CI
+ # node, where EPEL is installed (but disabled) and already
+ # pointing at our internal mirror
+ if [[ -f /etc/ci/mirror_info.sh ]]; then
+ SKIP_EPEL_INSTALL=True
sudo yum-config-manager --enable epel
fi
-fi
-if is_fedora && [[ $DISTRO == "rhel7" ]] && \
- [[ ${SKIP_EPEL_INSTALL} != True ]]; then
- _install_epel_and_rdo
+ if [[ ${SKIP_EPEL_INSTALL} != True ]]; then
+ _install_epel
+ fi
+ # Along with EPEL, CentOS (and a-likes) require some packages only
+ # available in RDO repositories (e.g. OVS, or later versions of
+ # kvm) to run.
+ _install_rdo
fi
# Ensure python is installed
@@ -395,6 +405,7 @@
# Set up logging level
VERBOSE=$(trueorfalse True VERBOSE)
+VERBOSE_NO_TIMESTAMP=$(trueorfalse False VERBOSE)
# Draw a spinner so the user knows something is happening
function spinner {
@@ -460,8 +471,12 @@
# stdout later.
exec 3>&1
if [[ "$VERBOSE" == "True" ]]; then
+ _of_args="-v"
+ if [[ "$VERBOSE_NO_TIMESTAMP" == "True" ]]; then
+ _of_args="$_of_args --no-timestamp"
+ fi
# Set fd 1 and 2 to write the log file
- exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1
+ exec 1> >( $TOP_DIR/tools/outfilter.py $_of_args -o "${LOGFILE}" ) 2>&1
# Set fd 6 to summary log file
exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
else
@@ -488,24 +503,6 @@
exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
fi
-# Set up logging of screen windows
-# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
-# directory specified in ``SCREEN_LOGDIR``, we will log to the file
-# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
-# ``screen-$SERVICE_NAME.log`` to the latest log file.
-# Logs are kept for as long specified in ``LOGDAYS``.
-# This is deprecated....logs go in ``LOGDIR``, only symlinks will be here now.
-if [[ -n "$SCREEN_LOGDIR" ]]; then
-
- # We make sure the directory is created.
- if [[ -d "$SCREEN_LOGDIR" ]]; then
- # We cleanup the old logs
- find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \;
- else
- mkdir -p $SCREEN_LOGDIR
- fi
-fi
-
# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
check_path_perm_sanity ${DEST}
@@ -534,14 +531,20 @@
if [[ $r -ne 0 ]]; then
echo "Error on exit"
- generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
+ # If we error before we've installed os-testr, this will fail.
+ if type -p generate-subunit > /dev/null; then
+ generate-subunit $DEVSTACK_START_TIME $SECONDS 'fail' >> ${SUBUNIT_OUTPUT}
+ fi
if [[ -z $LOGDIR ]]; then
$TOP_DIR/tools/worlddump.py
else
$TOP_DIR/tools/worlddump.py -d $LOGDIR
fi
else
- generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT}
+ # If we error before we've installed os-testr, this will fail.
+ if type -p generate-subunit > /dev/null; then
+ generate-subunit $DEVSTACK_START_TIME $SECONDS >> ${SUBUNIT_OUTPUT}
+ fi
fi
exit $r
@@ -775,6 +778,7 @@
# Do the ugly hacks for broken packages and distros
source $TOP_DIR/tools/fixup_stuff.sh
+fixup_all
if [[ "$USE_SYSTEMD" == "True" ]]; then
pip_install_gr systemd-python
@@ -899,7 +903,6 @@
if is_service_enabled placement; then
# placement api
stack_install_service placement
- cleanup_placement
configure_placement
fi
@@ -914,8 +917,6 @@
fi
if is_service_enabled horizon; then
- # django openstack_auth
- install_django_openstack_auth
# dashboard
stack_install_service horizon
fi
@@ -962,17 +963,15 @@
if [[ $SYSLOG != "False" ]]; then
if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then
# Configure the master host to receive
- cat <<EOF >/tmp/90-stack-m.conf
+ cat <<EOF | sudo tee /etc/rsyslog.d/90-stack-m.conf >/dev/null
\$ModLoad imrelp
\$InputRELPServerRun $SYSLOG_PORT
EOF
- sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d
else
# Set rsyslog to send to remote host
- cat <<EOF >/tmp/90-stack-s.conf
+ cat <<EOF | sudo tee /etc/rsyslog.d/90-stack-s.conf >/dev/null
*.* :omrelp:$SYSLOG_HOST:$SYSLOG_PORT
EOF
- sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
fi
RSYSLOGCONF="/etc/rsyslog.conf"
@@ -1013,38 +1012,6 @@
configure_database
fi
-
-# Configure screen
-# ----------------
-
-USE_SCREEN=$(trueorfalse True USE_SCREEN)
-if [[ "$USE_SCREEN" == "True" ]]; then
- # Create a new named screen to run processes in
- screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
- sleep 1
-
- # Set a reasonable status bar
- SCREEN_HARDSTATUS=${SCREEN_HARDSTATUS:-}
- if [ -z "$SCREEN_HARDSTATUS" ]; then
- SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
- fi
- screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
- screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
-
- if is_service_enabled tls-proxy; then
- follow_tls_proxy
- fi
-fi
-
-# Clear ``screenrc`` file
-SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
-if [[ -e $SCREENRC ]]; then
- rm -f $SCREENRC
-fi
-
-# Initialize the directory for service status check
-init_service_check
-
# Save configuration values
save_stackenv $LINENO
@@ -1058,7 +1025,7 @@
# be memory bound not cpu bound so enable KSM by default but allow people
# to opt out if the CPU time is more important to them.
-if [[ "ENABLE_KSM" == "True" ]] ; then
+if [[ $ENABLE_KSM == "True" ]] ; then
if [[ -f /sys/kernel/mm/ksm/run ]] ; then
sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run"
fi
@@ -1293,16 +1260,17 @@
done
fi
-# Create a randomized default value for the key manager's fixed_key
-# NOTE(lyarwood): This is currently set to 36 as a workaround to the following
-# libvirt bug that incorrectly pads passphrases that are a multiple of 16 bytes
-# in length.
-# Unable to use LUKS passphrase that is exactly 16 bytes long
-# https://bugzilla.redhat.com/show_bug.cgi?id=1447297
+# NOTE(lyarwood): By default use a single hardcoded fixed_key across devstack
+# deployments. This ensures the keys match across nova and cinder across all
+# hosts.
+FIXED_KEY=${FIXED_KEY:-bae3516cc1c0eb18b05440eba8012a4a880a2ee04d584a9c1579445e675b12defdc716ec}
if is_service_enabled nova; then
- key=$(generate_hex_string 36)
- iniset $NOVA_CONF key_manager fixed_key "$key"
- iniset $NOVA_CPU_CONF key_manager fixed_key "$key"
+ iniset $NOVA_CONF key_manager fixed_key "$FIXED_KEY"
+ iniset $NOVA_CPU_CONF key_manager fixed_key "$FIXED_KEY"
+fi
+
+if is_service_enabled cinder; then
+ iniset $CINDER_CONF key_manager fixed_key "$FIXED_KEY"
fi
# Launch the nova-api and wait for it to answer before continuing
@@ -1417,18 +1385,16 @@
merge_config_group $TOP_DIR/local.conf post-extra
-# Run local script
-# ----------------
-
-# Run ``local.sh`` if it exists to perform user-managed tasks
-if [[ -x $TOP_DIR/local.sh ]]; then
- echo "Running user script $TOP_DIR/local.sh"
- $TOP_DIR/local.sh
-fi
-
# Sanity checks
# =============
+# Check that computes are all ready
+#
+# TODO(sdague): there should be some generic phase here.
+if is_service_enabled n-cpu; then
+ is_nova_ready
+fi
+
# Check the status of running services
service_check
@@ -1453,6 +1419,15 @@
fi
fi
+# Run local script
+# ----------------
+
+# Run ``local.sh`` if it exists to perform user-managed tasks
+if [[ -x $TOP_DIR/local.sh ]]; then
+ echo "Running user script $TOP_DIR/local.sh"
+ $TOP_DIR/local.sh
+fi
+
# Bash completion
# ===============
@@ -1522,19 +1497,28 @@
# Warn that a deprecated feature was used
if [[ -n "$DEPRECATED_TEXT" ]]; then
- echo_summary "WARNING: $DEPRECATED_TEXT"
+ echo
+ echo -e "WARNING: $DEPRECATED_TEXT"
+ echo
fi
# If USE_SYSTEMD is enabled, tell the user about using it.
if [[ "$USE_SYSTEMD" == "True" ]]; then
+ echo
echo "Services are running under systemd unit files."
echo "For more information see: "
- echo "https://docs.openstack.org/developer/devstack/systemd.html"
+ echo "https://docs.openstack.org/devstack/latest/systemd.html"
+ echo
fi
+# Useful info on current state
+cat /etc/devstack-version
+echo
+
# Indicate how long this took to run (bash maintained variable ``SECONDS``)
echo_summary "stack.sh completed in $SECONDS seconds."
+
# Restore/close logging file descriptors
exec 1>&3
exec 2>&3
diff --git a/stackrc b/stackrc
index 50f7c89..166b7cf 100644
--- a/stackrc
+++ b/stackrc
@@ -13,6 +13,18 @@
# Source required DevStack functions and globals
source $RC_DIR/functions
+# Set the target branch. This is used so that stable branching
+# does not need to update each repo below.
+TARGET_BRANCH=master
+
+# Cycle trailing projects need to branch later than the others.
+TRAILING_TARGET_BRANCH=master
+
+# And some repos do not create stable branches, so this is used
+# to make it explicit and avoid accidentally setting to a stable
+# branch.
+BRANCHLESS_TARGET_BRANCH=master
+
# Destination path for installation
DEST=/opt/stack
@@ -53,7 +65,7 @@
# Keystone - nothing works without keystone
ENABLED_SERVICES=key
# Nova - services to support libvirt based openstack clouds
- ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth
+ ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth,n-api-meta
# Placement service needed for Nova
ENABLED_SERVICES+=,placement-api,placement-client
# Glance services needed for Nova
@@ -77,25 +89,20 @@
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=osapi_compute,metadata
+# CELLSV2_SETUP - how we should configure services with cells v2
+#
+# - superconductor - this is one conductor for the api services, and
+# one per cell managing the compute services. This is preferred
+# - singleconductor - this is one conductor for the whole deployment,
+# this is not recommended, and will be removed in the future.
+CELLSV2_SETUP=${CELLSV2_SETUP:-"superconductor"}
+
# Set the root URL for Horizon
HORIZON_APACHE_ROOT="/dashboard"
-# TODO(sdague): Queens
-#
-# All the non systemd paths should be removed in queens, they only
-# exist in Pike to support testing from grenade. Ensure that all this
-# is cleaned up and purged, which should dramatically simplify the
-# devstack codebase.
-
-# Whether to use 'dev mode' for screen windows. Dev mode works by
-# stuffing text into the screen windows so that a developer can use
-# ctrl-c, up-arrow, enter to restart the service. Starting services
-# this way is slightly unreliable, and a bit slower, so this can
-# be disabled for automated testing by setting this value to False.
-USE_SCREEN=$(trueorfalse False USE_SCREEN)
-
-# Whether to use SYSTEMD to manage services
-USE_SYSTEMD=$(trueorfalse False USE_SYSTEMD)
+# Whether to use SYSTEMD to manage services, we only do this from
+# Queens forward.
+USE_SYSTEMD="True"
USER_UNITS=$(trueorfalse False USER_UNITS)
if [[ "$USER_UNITS" == "True" ]]; then
SYSTEMD_DIR="$HOME/.local/share/systemd/user"
@@ -109,21 +116,11 @@
# Whether or not to enable Kernel Samepage Merging (KSM) if available.
# This allows programs that mark their memory as mergeable to share
# memory pages if they are identical. This is particularly useful with
-# libvirt backends. This reduces memory useage at the cost of CPU overhead
+# libvirt backends. This reduces memory usage at the cost of CPU overhead
# to scan memory. We default to enabling it because we tend to be more
# memory constrained than CPU bound.
ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
-# When using screen, should we keep a log file on disk? You might
-# want this False if you have a long-running setup where verbose logs
-# can fill-up the host.
-# XXX: Ideally screen itself would be configured to log but just not
-# activate. This isn't possible with the screerc syntax. Temporary
-# logging can still be used by a developer with:
-# C-a : logfile foo
-# C-a : log on
-SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING)
-
# Passwords generated by interactive devstack runs
if [[ -r $RC_DIR/.localrc.password ]]; then
source $RC_DIR/.localrc.password
@@ -136,7 +133,7 @@
# base name of the directory from which they are installed. See
# enable_python3_package to edit this variable and use_python3_for to
# test membership.
-export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient"
+export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,openstacksdk"
# Explicitly list services not to run under Python 3. See
# disable_python3_package to edit this variable.
@@ -145,10 +142,12 @@
# When Python 3 is supported by an application, adding the specific
# version of Python 3 to this variable will install the app using that
# version of the interpreter instead of 2.7.
-export PYTHON3_VERSION=${PYTHON3_VERSION:-3.5}
+_DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
+export PYTHON3_VERSION=${PYTHON3_VERSION:-${_DEFAULT_PYTHON3_VERSION:-3.5}}
# Just to be more explicit on the Python 2 version to use.
-export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7}
+_DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
+export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
@@ -159,19 +158,6 @@
source $RC_DIR/.localrc.auto
fi
-# TODO(sdague): Delete all this in Queens.
-if [[ "$USE_SYSTEMD" == "True" ]]; then
- USE_SCREEN=False
-fi
-# if we are forcing off USE_SCREEN (as we do in the gate), force on
-# systemd. This allows us to drop one of 3 paths through the code.
-if [[ "$USE_SCREEN" == "False" ]]; then
- # Remove in Pike: this gets us through grenade upgrade
- if [[ "$GRENADE_PHASE" != "target" ]]; then
- USE_SYSTEMD="True"
- fi
-fi
-
# Default for log coloring is based on interactive-or-not.
# Baseline assumption is that non-interactive invocations are for CI,
# where logs are to be presented as browsable text files; hence color
@@ -210,7 +196,7 @@
# will to be set to ``3`` in order to make DevStack register the Identity
# endpoint as v3. This flag is experimental and will be used as basis to
# identify the projects which still have issues to operate with Identity v3.
-ENABLE_IDENTITY_V2=$(trueorfalse True ENABLE_IDENTITY_V2)
+ENABLE_IDENTITY_V2=$(trueorfalse False ENABLE_IDENTITY_V2)
if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
IDENTITY_API_VERSION=3
fi
@@ -272,6 +258,7 @@
# Setting the variable to 'ALL' will activate the download for all
# libraries.
+DEVSTACK_SERIES="rocky"
##############
#
@@ -281,35 +268,35 @@
# block storage service
CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
-CINDER_BRANCH=${CINDER_BRANCH:-master}
+CINDER_BRANCH=${CINDER_BRANCH:-$TARGET_BRANCH}
# image catalog service
GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
-GLANCE_BRANCH=${GLANCE_BRANCH:-master}
+GLANCE_BRANCH=${GLANCE_BRANCH:-$TARGET_BRANCH}
# django powered web control panel for openstack
HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
-HORIZON_BRANCH=${HORIZON_BRANCH:-master}
+HORIZON_BRANCH=${HORIZON_BRANCH:-$TARGET_BRANCH}
# unified auth system (manages accounts/tokens)
KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
-KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
+KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-$TARGET_BRANCH}
# neutron service
NEUTRON_REPO=${NEUTRON_REPO:-${GIT_BASE}/openstack/neutron.git}
-NEUTRON_BRANCH=${NEUTRON_BRANCH:-master}
+NEUTRON_BRANCH=${NEUTRON_BRANCH:-$TARGET_BRANCH}
# neutron fwaas service
NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
-NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
+NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-$TARGET_BRANCH}
# compute service
NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
-NOVA_BRANCH=${NOVA_BRANCH:-master}
+NOVA_BRANCH=${NOVA_BRANCH:-$TARGET_BRANCH}
# object storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
-SWIFT_BRANCH=${SWIFT_BRANCH:-master}
+SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH}
##############
#
@@ -319,11 +306,11 @@
# consolidated openstack requirements
REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
-REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
+REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-$TARGET_BRANCH}
# Tempest test suite
TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
-TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
+TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
##############
@@ -335,53 +322,57 @@
# volume client
GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
-GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master}
+GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
# os-brick client for local volume attachement
GITREPO["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-brick-cinderclient-ext.git}
-GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-master}
+GITBRANCH["python-brick-cinderclient-ext"]=${BRICK_CINDERCLIENT_BRANCH:-$TARGET_BRANCH}
# python barbican client library
GITREPO["python-barbicanclient"]=${BARBICANCLIENT_REPO:-${GIT_BASE}/openstack/python-barbicanclient.git}
-GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-master}
+GITBRANCH["python-barbicanclient"]=${BARBICANCLIENT_BRANCH:-$TARGET_BRANCH}
GITDIR["python-barbicanclient"]=$DEST/python-barbicanclient
# python glance client library
GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
-GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master}
+GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-$TARGET_BRANCH}
# ironic client
GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
-GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
+GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-$TARGET_BRANCH}
# ironic plugin is out of tree, but nova uses it. set GITDIR here.
GITDIR["python-ironicclient"]=$DEST/python-ironicclient
# the base authentication plugins that clients use to authenticate
GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
-GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master}
+GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-$TARGET_BRANCH}
# python keystone client library to nova that horizon uses
GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
-GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master}
+GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-$TARGET_BRANCH}
# neutron client
GITREPO["python-neutronclient"]=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
-GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-master}
+GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-$TARGET_BRANCH}
# python client library to nova that horizon (and others) use
GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
-GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master}
+GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-$TARGET_BRANCH}
# python swift client library
GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
-GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
+GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-$TARGET_BRANCH}
# consolidated openstack python client
GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
-GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master}
+GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-$TARGET_BRANCH}
# this doesn't exist in a lib file, so set it here
GITDIR["python-openstackclient"]=$DEST/python-openstackclient
+# placement-api CLI
+GITREPO["osc-placement"]=${OSC_PLACEMENT_REPO:-${GIT_BASE}/openstack/osc-placement.git}
+GITBRANCH["osc-placement"]=${OSC_PLACEMENT_BRANCH:-$TARGET_BRANCH}
+
###################
#
@@ -392,119 +383,119 @@
# castellan key manager interface
GITREPO["castellan"]=${CASTELLAN_REPO:-${GIT_BASE}/openstack/castellan.git}
-GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-master}
+GITBRANCH["castellan"]=${CASTELLAN_BRANCH:-$TARGET_BRANCH}
# cliff command line framework
GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
-GITBRANCH["cliff"]=${CLIFF_BRANCH:-master}
+GITBRANCH["cliff"]=${CLIFF_BRANCH:-$TARGET_BRANCH}
# async framework/helpers
GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git}
-GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master}
+GITBRANCH["futurist"]=${FUTURIST_BRANCH:-$TARGET_BRANCH}
# debtcollector deprecation framework/helpers
GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git}
-GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master}
+GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-$TARGET_BRANCH}
# helpful state machines
GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git}
-GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master}
+GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-$TARGET_BRANCH}
# oslo.cache
GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git}
-GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master}
+GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-$TARGET_BRANCH}
# oslo.concurrency
GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
-GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master}
+GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-$TARGET_BRANCH}
# oslo.config
GITREPO["oslo.config"]=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
-GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-master}
+GITBRANCH["oslo.config"]=${OSLOCFG_BRANCH:-$TARGET_BRANCH}
# oslo.context
GITREPO["oslo.context"]=${OSLOCTX_REPO:-${GIT_BASE}/openstack/oslo.context.git}
-GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-master}
+GITBRANCH["oslo.context"]=${OSLOCTX_BRANCH:-$TARGET_BRANCH}
# oslo.db
GITREPO["oslo.db"]=${OSLODB_REPO:-${GIT_BASE}/openstack/oslo.db.git}
-GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-master}
+GITBRANCH["oslo.db"]=${OSLODB_BRANCH:-$TARGET_BRANCH}
# oslo.i18n
GITREPO["oslo.i18n"]=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
-GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-master}
+GITBRANCH["oslo.i18n"]=${OSLOI18N_BRANCH:-$TARGET_BRANCH}
# oslo.log
GITREPO["oslo.log"]=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
-GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-master}
+GITBRANCH["oslo.log"]=${OSLOLOG_BRANCH:-$TARGET_BRANCH}
# oslo.messaging
GITREPO["oslo.messaging"]=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
-GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-master}
+GITBRANCH["oslo.messaging"]=${OSLOMSG_BRANCH:-$TARGET_BRANCH}
# oslo.middleware
GITREPO["oslo.middleware"]=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
-GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master}
+GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-$TARGET_BRANCH}
# oslo.policy
GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
-GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-$TARGET_BRANCH}
# oslo.privsep
GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
-GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master}
+GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-$TARGET_BRANCH}
# oslo.reports
GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
-GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
+GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-$TARGET_BRANCH}
# oslo.rootwrap
GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
-GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master}
+GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-$TARGET_BRANCH}
# oslo.serialization
GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
-GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master}
+GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-$TARGET_BRANCH}
# oslo.service
GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git}
-GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master}
+GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-$TARGET_BRANCH}
# oslo.utils
GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
-GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master}
+GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-$TARGET_BRANCH}
# oslo.versionedobjects
GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git}
-GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master}
+GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-$TARGET_BRANCH}
# oslo.vmware
GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
-GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master}
+GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-$TARGET_BRANCH}
# osprofiler
GITREPO["osprofiler"]=${OSPROFILER_REPO:-${GIT_BASE}/openstack/osprofiler.git}
-GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-master}
+GITBRANCH["osprofiler"]=${OSPROFILER_BRANCH:-$TARGET_BRANCH}
# pycadf auditing library
GITREPO["pycadf"]=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
-GITBRANCH["pycadf"]=${PYCADF_BRANCH:-master}
+GITBRANCH["pycadf"]=${PYCADF_BRANCH:-$TARGET_BRANCH}
# stevedore plugin manager
GITREPO["stevedore"]=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
-GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-master}
+GITBRANCH["stevedore"]=${STEVEDORE_BRANCH:-$TARGET_BRANCH}
# taskflow plugin manager
GITREPO["taskflow"]=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
-GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-master}
+GITBRANCH["taskflow"]=${TASKFLOW_BRANCH:-$TARGET_BRANCH}
# tooz plugin manager
GITREPO["tooz"]=${TOOZ_REPO:-${GIT_BASE}/openstack/tooz.git}
-GITBRANCH["tooz"]=${TOOZ_BRANCH:-master}
+GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH}
# pbr drives the setuptools configs
GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
-GITBRANCH["pbr"]=${PBR_BRANCH:-master}
+GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH}
##################
@@ -515,69 +506,65 @@
# cursive library
GITREPO["cursive"]=${CURSIVE_REPO:-${GIT_BASE}/openstack/cursive.git}
-GITBRANCH["cursive"]=${CURSIVE_BRANCH:-master}
+GITBRANCH["cursive"]=${CURSIVE_BRANCH:-$TARGET_BRANCH}
# glance store library
GITREPO["glance_store"]=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
-GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-master}
-
-# django openstack_auth library
-GITREPO["django_openstack_auth"]=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git}
-GITBRANCH["django_openstack_auth"]=${HORIZONAUTH_BRANCH:-master}
+GITBRANCH["glance_store"]=${GLANCE_STORE_BRANCH:-$TARGET_BRANCH}
# keystone middleware
GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git}
-GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master}
+GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
# s3 support for swift
SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
-SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
+SWIFT3_BRANCH=${SWIFT3_BRANCH:-$TARGET_BRANCH}
# ceilometer middleware
GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
-GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master}
+GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
+# openstacksdk OpenStack Python SDK
+GITREPO["openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/openstacksdk.git}
+GITBRANCH["openstacksdk"]=${OPENSTACKSDK_BRANCH:-$TARGET_BRANCH}
+
# os-brick library to manage local volume attaches
GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
-GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master}
+GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-$TARGET_BRANCH}
# os-client-config to manage clouds.yaml and friends
GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git}
-GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-master}
+GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-$TARGET_BRANCH}
GITDIR["os-client-config"]=$DEST/os-client-config
# os-vif library to communicate between Neutron to Nova
GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git}
-GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master}
+GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-$TARGET_BRANCH}
# osc-lib OpenStackClient common lib
GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git}
-GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master}
-
-# python-openstacksdk OpenStack Python SDK
-GITREPO["python-openstacksdk"]=${OPENSTACKSDK_REPO:-${GIT_BASE}/openstack/python-openstacksdk.git}
-GITBRANCH["python-openstacksdk"]=${OPENSTACKSDK_BRANCH:-master}
+GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-$TARGET_BRANCH}
# ironic common lib
GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
-GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master}
+GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-$TARGET_BRANCH}
# this doesn't exist in a lib file, so set it here
GITDIR["ironic-lib"]=$DEST/ironic-lib
# diskimage-builder tool
GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-master}
+GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$TARGET_BRANCH}
GITDIR["diskimage-builder"]=$DEST/diskimage-builder
# neutron-lib library containing neutron stable non-REST interfaces
GITREPO["neutron-lib"]=${NEUTRON_LIB_REPO:-${GIT_BASE}/openstack/neutron-lib.git}
-GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-master}
+GITBRANCH["neutron-lib"]=${NEUTRON_LIB_BRANCH:-$TARGET_BRANCH}
GITDIR["neutron-lib"]=$DEST/neutron-lib
# os-traits library for resource provider traits in the placement service
GITREPO["os-traits"]=${OS_TRAITS_REPO:-${GIT_BASE}/openstack/os-traits.git}
-GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-master}
+GITBRANCH["os-traits"]=${OS_TRAITS_BRANCH:-$TARGET_BRANCH}
##################
#
@@ -587,19 +574,19 @@
# run-parts script required by os-refresh-config
DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
-DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-master}
+DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
# os-apply-config configuration template tool
OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
-OAC_BRANCH=${OAC_BRANCH:-master}
+OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
# os-collect-config configuration agent
OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
-OCC_BRANCH=${OCC_BRANCH:-master}
+OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
# os-refresh-config configuration run-parts tool
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
-ORC_BRANCH=${ORC_BRANCH:-master}
+ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
#################
@@ -612,16 +599,21 @@
# ironic python agent
IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
-IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
+IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6}
# a websockets/html5 or flash powered SPICE console for vm instances
SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
-SPICE_BRANCH=${SPICE_BRANCH:-master}
+SPICE_BRANCH=${SPICE_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+# Global flag used to configure Tempest and potentially other services if
+# volume multiattach is supported. In Queens, only the libvirt compute driver
+# and lvm volume driver support multiattach, and qemu must be less than 2.10
+# or libvirt must be greater than or equal to 3.10.
+ENABLE_VOLUME_MULTIATTACH=$(trueorfalse False ENABLE_VOLUME_MULTIATTACH)
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
@@ -633,7 +625,12 @@
case "$VIRT_DRIVER" in
ironic|libvirt)
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
- if [[ "$os_VENDOR" =~ (Debian|Ubuntu) ]]; then
+ # If ENABLE_VOLUME_MULTIATTACH is True, the Ubuntu Cloud Archive can't
+ # be used until it provides libvirt>=3.10, and with older versions of
+ # Ubuntu the group is "libvirtd".
+ # TODO(mriedem): Remove the ENABLE_VOLUME_MULTIATTACH check when
+ # UCA has libvirt>=3.10.
+ if [[ "$os_VENDOR" =~ (Debian|Ubuntu) && "${ENABLE_VOLUME_MULTIATTACH}" == "False" ]]; then
# The groups change with newer libvirt. Older Ubuntu used
# 'libvirtd', but now uses libvirt like Debian. Do a quick check
# to see if libvirtd group already exists to handle grenade's case.
@@ -718,30 +715,65 @@
DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-$DEFAULT_IMAGE_NAME}
IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/${DEFAULT_IMAGE_FILE_NAME}";;
xenserver)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk}
- DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk.vhd.tgz}
- IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk}
+ DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.5-x86_64-disk.vhd.tgz}
+ IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.5-x86_64-disk.vhd.tgz"
IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
+ fake)
+ # Use the same as the default for libvirt
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
+ DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
+ IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
esac
DOWNLOAD_DEFAULT_IMAGES=False
fi
-# Staging area for new images. These images are cached by a run of
-# ./tools/image_list.sh during CI image build (see
-# project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos).
-#
-# To avoid CI failures grabbing the images, new images should be here
-# for at least 24hrs (nodepool builds images at 14:00UTC) so the they
-# are in the cache.
-PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES)
-if [[ "$PRECACHE_IMAGES" == "True" ]]; then
- # required for trove devstack tests; see
- # git.openstack.org/cgit/openstack/trove/tree/devstack/plugin.sh
- IMAGE_URL="http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2"
- if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then
- IMAGE_URLS+=",$IMAGE_URL"
+# This is a comma separated list of extra URLS to be listed for
+# download by the tools/image_list.sh script. CI environments can
+# pre-download these URLS and place them in $FILES. Later scripts can
+# then use "get_extra_file <url>" which will print out the path to the
+# file; it will either be downloaded on demand or acquired from the
+# cache if there.
+EXTRA_CACHE_URLS=""
+
+# etcd3 defaults
+ETCD_VERSION=${ETCD_VERSION:-v3.2.17}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"}
+# etcd v3.2.x doesn't have anything for s390x
+ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
+# Make sure etcd3 downloads the correct architecture
+if is_arch "x86_64"; then
+ ETCD_ARCH="amd64"
+ ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_AMD64}
+elif is_arch "aarch64"; then
+ ETCD_ARCH="arm64"
+ ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_ARM64}
+elif is_arch "ppc64le"; then
+ ETCD_ARCH="ppc64le"
+ ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_PPC64}
+elif is_arch "s390x"; then
+ # An etcd3 binary for s390x is not available on github like it is
+ # for other arches. Only continue if a custom download URL was
+ # provided.
+ if [[ -n "${ETCD_DOWNLOAD_URL}" ]]; then
+ ETCD_ARCH="s390x"
+ ETCD_SHA256=${ETCD_SHA256:-$ETCD_SHA256_S390X}
+ else
+ exit_distro_not_supported "etcd3. No custom ETCD_DOWNLOAD_URL provided."
fi
+else
+ exit_distro_not_supported "invalid hardware type - $ETCD_ARCH"
fi
+ETCD_PORT=${ETCD_PORT:-2379}
+ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380}
+ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
+ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
+ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
+ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
+# etcd is always required, so place it into list of pre-cached downloads
+EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION"
# Detect duplicate values in IMAGE_URLS
for image_url in ${IMAGE_URLS//,/ }; do
@@ -750,8 +782,8 @@
fi
done
-# 10Gb default volume backing file size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
+# 24Gb default volume backing file size
+VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-24G}
# Prefixes for volume and instance names
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
@@ -766,9 +798,6 @@
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""}
-# Set default screen name
-SCREEN_NAME=${SCREEN_NAME:-stack}
-
# Allow the use of an alternate protocol (such as https) for service endpoints
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
@@ -786,9 +815,15 @@
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+# Timeout for compute node registration in Nova
+NOVA_READY_TIMEOUT=${NOVA_READY_TIMEOUT:-$SERVICE_TIMEOUT}
+
# Service graceful shutdown timeout
SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
+# Service graceful shutdown timeout
+WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
+
# Support alternative yum -- in future Fedora 'dnf' will become the
# only supported installer, but for now 'yum' and 'dnf' are both
# available in parallel with compatible CLIs. Allow manual switching
@@ -888,15 +923,6 @@
# Following entries need to be last items in file
-# Compatibility bits required by other callers like Grenade
-
-# Old way was using SCREEN_LOGDIR to locate those logs and LOGFILE for the stack.sh trace log.
-# LOGFILE SCREEN_LOGDIR output
-# not set not set no log files
-# set not set stack.sh log to LOGFILE
-# not set set screen logs to SCREEN_LOGDIR
-# set set stack.sh log to LOGFILE, screen logs to SCREEN_LOGDIR
-
# New way is LOGDIR for all logs and LOGFILE for stack.sh trace log, but if not fully-qualified will be in LOGDIR
# LOGFILE LOGDIR output
# not set not set (new) set LOGDIR from default
@@ -904,9 +930,6 @@
# not set set screen logs to LOGDIR
# set set stack.sh log to LOGFILE, screen logs to LOGDIR
-# For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR
-# symlinks to SCREEN_LOGDIR (compat)
-
# Set up new logging defaults
if [[ -z "${LOGDIR:-}" ]]; then
default_logdir=$DEST/logs
@@ -921,18 +944,11 @@
# LOGFILE had no path, set a default
LOGDIR="$default_logdir"
fi
-
- # Check for duplication
- if [[ "${SCREEN_LOGDIR:-}" == "${LOGDIR}" ]]; then
- # We don't need the symlinks since it's the same directory
- unset SCREEN_LOGDIR
- fi
fi
unset default_logdir logfile
fi
# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs
-# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks
# System-wide ulimit file descriptors override
ULIMIT_NOFILE=${ULIMIT_NOFILE:-2048}
diff --git a/tests/run-process.sh b/tests/run-process.sh
deleted file mode 100755
index 301b9a0..0000000
--- a/tests/run-process.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-# tests/exec.sh - Test DevStack run_process() and stop_process()
-#
-# exec.sh start|stop|status
-#
-# Set USE_SCREEN True|False to change use of screen.
-#
-# This script emulates the basic exec environment in ``stack.sh`` to test
-# the process spawn and kill operations.
-
-if [[ -z $1 ]]; then
- echo "$0 start|stop"
- exit 1
-fi
-
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-source $TOP_DIR/functions
-
-USE_SCREEN=${USE_SCREEN:-False}
-
-ENABLED_SERVICES=fake-service
-
-SERVICE_DIR=/tmp
-SCREEN_NAME=test
-SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME}
-
-
-# Kill background processes on exit
-trap clean EXIT
-clean() {
- local r=$?
- jobs -p
- kill >/dev/null 2>&1 $(jobs -p)
- exit $r
-}
-
-
-# Exit on any errors so that errors don't compound
-trap failed ERR
-failed() {
- local r=$?
- jobs -p
- kill >/dev/null 2>&1 $(jobs -p)
- set +o xtrace
- [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
- exit $r
-}
-
-function status {
- if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then
- pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid)
- fi
- ps -ef | grep fake
-}
-
-function setup_screen {
-if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then
- rm -rf $SERVICE_DIR/$SCREEN_NAME
- mkdir -p $SERVICE_DIR/$SCREEN_NAME
-fi
-
-if [[ "$USE_SCREEN" == "True" ]]; then
- # Create a new named screen to run processes in
- screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
- sleep 1
-
- # Set a reasonable status bar
- if [ -z "$SCREEN_HARDSTATUS" ]; then
- SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
- fi
- screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
-fi
-
-# Clear screen rc file
-SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc
-if [[ -e $SCREENRC ]]; then
- echo -n > $SCREENRC
-fi
-}
-
-# Mimic logging
- # Set up output redirection without log files
- # Copy stdout to fd 3
- exec 3>&1
- if [[ "$VERBOSE" != "True" ]]; then
- # Throw away stdout and stderr
- #exec 1>/dev/null 2>&1
- :
- fi
- # Always send summary fd to original stdout
- exec 6>&3
-
-
-if [[ "$1" == "start" ]]; then
- echo "Start service"
- setup_screen
- run_process fake-service "$TOP_DIR/tests/fake-service.sh"
- sleep 1
- status
-elif [[ "$1" == "stop" ]]; then
- echo "Stop service"
- stop_process fake-service
- status
-elif [[ "$1" == "status" ]]; then
- status
-else
- echo "Unknown command"
- exit 1
-fi
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 5b4ff32..c3b4457 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -35,9 +35,10 @@
ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore"
ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
-ALL_LIBS+=" oslo.serialization django_openstack_auth"
-ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap"
-ALL_LIBS+=" oslo.i18n oslo.utils python-openstacksdk python-swiftclient"
+ALL_LIBS+=" oslo.serialization"
+ALL_LIBS+=" python-openstackclient osc-lib osc-placement"
+ALL_LIBS+=" os-client-config oslo.rootwrap"
+ALL_LIBS+=" oslo.i18n oslo.utils openstacksdk python-swiftclient"
ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
ALL_LIBS+=" debtcollector os-brick os-traits automaton futurist oslo.service"
ALL_LIBS+=" oslo.cache oslo.reports osprofiler cursive"
diff --git a/tests/test_refs.sh b/tests/test_refs.sh
index 65848cd..0f9aa4a 100755
--- a/tests/test_refs.sh
+++ b/tests/test_refs.sh
@@ -15,10 +15,10 @@
echo "Ensuring we don't have crazy refs"
-REFS=`grep BRANCH stackrc | grep -v -- '-master' | grep -v 'NOVNC_BRANCH'`
+REFS=`grep BRANCH stackrc | grep -v 'TARGET_BRANCH' | grep -v 'NOVNC_BRANCH'`
rc=$?
if [[ $rc -eq 0 ]]; then
- echo "Branch defaults must be master. Found:"
+ echo "Branch defaults must be one of the *TARGET_BRANCH values. Found:"
echo $REFS
exit 1
fi
diff --git a/tests/test_write_devstack_local_conf_role.sh b/tests/test_write_devstack_local_conf_role.sh
new file mode 100755
index 0000000..b2bc0a2
--- /dev/null
+++ b/tests/test_write_devstack_local_conf_role.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+source $TOP/tests/unittest.sh
+
+python ./roles/write-devstack-local-conf/library/test.py
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 0b78bde..90b2c8b 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -45,27 +45,29 @@
# where Keystone will try and bind to the port and the port will already be
# in use as an ephemeral port by another process. This places an explicit
# exception into the Kernel for the Keystone AUTH ports.
-keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
+function fixup_keystone {
+ keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
-# Only do the reserved ports when available, on some system (like containers)
-# where it's not exposed we are almost pretty sure these ports would be
-# exclusive for our DevStack.
-if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
- # Get any currently reserved ports, strip off leading whitespace
- reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
+ # Only do the reserved ports when available, on some system (like containers)
+ # where it's not exposed we are almost pretty sure these ports would be
+ # exclusive for our DevStack.
+ if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
+ # Get any currently reserved ports, strip off leading whitespace
+ reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
- if [[ -z "${reserved_ports}" ]]; then
- # If there are no currently reserved ports, reserve the keystone ports
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
+ if [[ -z "${reserved_ports}" ]]; then
+ # If there are no currently reserved ports, reserve the keystone ports
+ sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
+ else
+ # If there are currently reserved ports, keep those and also reserve the
+ # Keystone specific ports. Duplicate reservations are merged into a single
+ # reservation (or range) automatically by the kernel.
+ sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
+ fi
else
- # If there are currently reserved ports, keep those and also reserve the
- # Keystone specific ports. Duplicate reservations are merged into a single
- # reservation (or range) automatically by the kernel.
- sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
+ echo_summary "WARNING: unable to reserve keystone ports"
fi
-else
- echo_summary "WARNING: unable to reserve keystone ports"
-fi
+}
# Ubuntu Cloud Archive
#---------------------
@@ -75,7 +77,16 @@
# Make it possible to switch this based on an environment variable as
# libvirt 2.5.0 doesn't handle nested virtualization quite well and this
# is required for the trove development environment.
-if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "True" && "$DISTRO" = "xenial" ]]; then
+# The Pike UCA has qemu 2.10 but libvirt 3.6, therefore if
+# ENABLE_VOLUME_MULTIATTACH is True, we can't use the Pike UCA
+# because multiattach won't work with those package versions.
+# We can remove this check when the UCA has libvirt>=3.10.
+function fixup_uca {
+ if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" || \
+ "${ENABLE_VOLUME_MULTIATTACH}" == "True" ]]; then
+ return
+ fi
+
# This pulls in apt-add-repository
install_package "software-properties-common"
# Use UCA for newer libvirt. Should give us libvirt 2.5.0.
@@ -84,10 +95,10 @@
# we can find local mirrors then use that mirror.
source /etc/ci/mirror_info.sh
- sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/ocata main"
+ sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/pike main"
else
# Otherwise use upstream UCA
- sudo add-apt-repository -y cloud-archive:ocata
+ sudo add-apt-repository -y cloud-archive:pike
fi
# Disable use of libvirt wheel since a cached wheel build might be
@@ -99,8 +110,7 @@
# Force update our APT repos, since we added UCA above.
REPOS_UPDATED=False
apt_get_update
-fi
-
+}
# Python Packages
# ---------------
@@ -115,27 +125,32 @@
# Pre-install affected packages so we can fix the permissions
# These can go away once we are confident that pip 1.4.1+ is available everywhere
-# Fix prettytable 0.7.2 permissions
-# Don't specify --upgrade so we use the existing package if present
-pip_install 'prettytable>=0.7'
-PACKAGE_DIR=$(get_package_path prettytable)
-# Only fix version 0.7.2
-dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
-if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
-fi
+function fixup_python_packages {
+ # Fix prettytable 0.7.2 permissions
+ # Don't specify --upgrade so we use the existing package if present
+ pip_install 'prettytable>=0.7'
+ PACKAGE_DIR=$(get_package_path prettytable)
+ # Only fix version 0.7.2
+ dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
+ if [[ -d $dir ]]; then
+ sudo chmod +r $dir/*
+ fi
-# Fix httplib2 0.8 permissions
-# Don't specify --upgrade so we use the existing package if present
-pip_install httplib2
-PACKAGE_DIR=$(get_package_path httplib2)
-# Only fix version 0.8
-dir=$(echo $PACKAGE_DIR-0.8*)
-if [[ -d $dir ]]; then
- sudo chmod +r $dir/*
-fi
+ # Fix httplib2 0.8 permissions
+ # Don't specify --upgrade so we use the existing package if present
+ pip_install httplib2
+ PACKAGE_DIR=$(get_package_path httplib2)
+ # Only fix version 0.8
+ dir=$(echo $PACKAGE_DIR-0.8*)
+ if [[ -d $dir ]]; then
+ sudo chmod +r $dir/*
+ fi
+}
-if is_fedora; then
+function fixup_fedora {
+ if ! is_fedora; then
+ return
+ fi
# Disable selinux to avoid configuring to allow Apache access
# to Horizon files (LP#1175444)
if selinuxenabled; then
@@ -157,7 +172,7 @@
# [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031
# [2] https://bugs.launchpad.net/neutron/+bug/1455303
# [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp
- # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html
+ # [4] https://docs.openstack.org/devstack/latest/guides/neutron.html
if is_package_installed firewalld; then
sudo systemctl disable firewalld
# The iptables service files are no longer included by default,
@@ -193,7 +208,7 @@
pip_install --upgrade --force-reinstall requests
fi
fi
-fi
+}
# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
# connection issues under proxy so re-install the latest version using
@@ -202,5 +217,32 @@
# on python-virtualenv), first install the distro python-virtualenv
# to satisfy any dependencies then use pip to overwrite it.
-install_package python-virtualenv
-pip_install -U --force-reinstall virtualenv
+# ... but, for infra builds, the pip-and-virtualenv [1] element has
+# already done this to ensure the latest pip, virtualenv and
+# setuptools on the base image for all platforms. It has also added
+# the packages to the yum/dnf ignore list to prevent them being
+# overwritten with old versions. F26 and dnf 2.0 has changed
+# behaviour that means re-installing python-virtualenv fails [2].
+# Thus we do a quick check if we're in the infra environment by
+# looking for the mirror config script before doing this, and just
+# skip it if so.
+
+# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \
+# diskimage_builder/elements/pip-and-virtualenv/ \
+# install.d/pip-and-virtualenv-source-install/04-install-pip
+# [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
+
+function fixup_virtualenv {
+ if [[ ! -f /etc/ci/mirror_info.sh ]]; then
+ install_package python-virtualenv
+ pip_install -U --force-reinstall virtualenv
+ fi
+}
+
+function fixup_all {
+ fixup_keystone
+ fixup_uca
+ fixup_python_packages
+ fixup_fedora
+ fixup_virtualenv
+}
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 27b3d46..3a27c4a 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -1,5 +1,14 @@
#!/bin/bash
+# Print out a list of image and other files to download for caching.
+# This is mostly used by the OpenStack infrasturucture during daily
+# image builds to save the large images to /opt/cache/files (see [1])
+#
+# The two lists of URL's downloaded are the IMAGE_URLS and
+# EXTRA_CACHE_URLS, which are setup in stackrc
+#
+# [1] project-config:nodepool/elements/cache-devstack/extra-data.d/55-cache-devstack-repos
+
# Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
@@ -31,12 +40,20 @@
ALL_IMAGES+=$URLS
done
-# Make a nice list
-echo $ALL_IMAGES | tr ',' '\n' | sort | uniq
-
# Sanity check - ensure we have a minimum number of images
num=$(echo $ALL_IMAGES | tr ',' '\n' | sort | uniq | wc -l)
-if [[ "$num" -lt 5 ]]; then
+if [[ "$num" -lt 4 ]]; then
echo "ERROR: We only found $num images in $ALL_IMAGES, which can't be right."
exit 1
fi
+
+# This is extra non-image files that we want pre-cached. This is kept
+# in a separate list because devstack loops over the IMAGE_LIST to
+# upload files glance and these aren't images. (This was a bit of an
+# after-thought which is why the naming around this is very
+# image-centric)
+URLS=$(source $TOP_DIR/stackrc && echo $EXTRA_CACHE_URLS)
+ALL_IMAGES+=$URLS
+
+# Make a nice combined list
+echo $ALL_IMAGES | tr ',' '\n' | sort | uniq
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index dbe5278..1bd7392 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -129,10 +129,10 @@
# Eradicate any and all system packages
-# Python in fedora depends on the python-pip package so removing it
+# Python in fedora/suse depends on the python-pip package so removing it
# results in a nonfunctional system. pip on fedora installs to /usr so pip
# can safely override the system pip for all versions of fedora
-if ! is_fedora ; then
+if ! is_fedora && ! is_suse; then
uninstall_package python-pip
uninstall_package python3-pip
fi
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index cbdeb8f..63f25ca 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -88,7 +88,7 @@
# list processes that lock memory from swap
if [[ $unevictable -ne $unevictable_point ]]; then
unevictable_point=$unevictable
- ${PYTHON} ./tools/mlock_report.py
+ ${PYTHON} $(dirname $0)/mlock_report.py
fi
echo "]]]"
diff --git a/tools/mlock_report.py b/tools/mlock_report.py
index 2169cc2..07716b0 100755
--- a/tools/mlock_report.py
+++ b/tools/mlock_report.py
@@ -3,12 +3,12 @@
# This tool lists processes that lock memory pages from swapping to disk.
import re
-import subprocess
import psutil
-SUMMARY_REGEX = re.compile(b".*\s+(?P<locked>[\d]+)\s+KB")
+LCK_SUMMARY_REGEX = re.compile(
+ "^VmLck:\s+(?P<locked>[\d]+)\s+kB", re.MULTILINE)
def main():
@@ -22,28 +22,21 @@
def _get_report():
mlock_users = []
for proc in psutil.process_iter():
- pid = proc.pid
# sadly psutil does not expose locked pages info, that's why we
- # call to pmap and parse the output here
+ # iterate over the /proc/%pid/status files manually
try:
- out = subprocess.check_output(['pmap', '-XX', str(pid)])
- except subprocess.CalledProcessError as e:
- # 42 means process just vanished, which is ok
- if e.returncode == 42:
- continue
- raise
- last_line = out.splitlines()[-1]
-
- # some processes don't provide a memory map, for example those
- # running as kernel services, so we need to skip those that don't
- # match
- result = SUMMARY_REGEX.match(last_line)
- if result:
- locked = int(result.group('locked'))
- if locked:
- mlock_users.append({'name': proc.name(),
- 'pid': pid,
- 'locked': locked})
+ s = open("%s/%d/status" % (psutil.PROCFS_PATH, proc.pid), 'r')
+ except EnvironmentError:
+ continue
+ with s:
+ for line in s:
+ result = LCK_SUMMARY_REGEX.search(line)
+ if result:
+ locked = int(result.group('locked'))
+ if locked:
+ mlock_users.append({'name': proc.name(),
+ 'pid': proc.pid,
+ 'locked': locked})
# produce a single line log message with per process mlock stats
if mlock_users:
diff --git a/tools/outfilter.py b/tools/outfilter.py
index f82939b..cf09124 100755
--- a/tools/outfilter.py
+++ b/tools/outfilter.py
@@ -36,6 +36,13 @@
parser.add_argument('-o', '--outfile',
help='Output file for content',
default=None)
+ # NOTE(ianw): This is intended for the case where your stdout is
+ # being captured by something like ansible which independently
+ # logs timestamps on the lines it receives. Note that if using a
+ # output file, those log lines are still timestamped.
+ parser.add_argument('-b', '--no-timestamp', action='store_true',
+ help='Do not prefix stdout with timestamp (bare)',
+ default=False)
parser.add_argument('-v', '--verbose', action='store_true',
default=False)
return parser.parse_args()
@@ -50,33 +57,45 @@
opts = get_options()
outfile = None
if opts.outfile:
- outfile = open(opts.outfile, 'a', 0)
+ # note, binary mode so we can do unbuffered output.
+ outfile = open(opts.outfile, 'ab', 0)
# Otherwise fileinput reprocess args as files
sys.argv = []
- while True:
- line = sys.stdin.readline()
- if not line:
- return 0
+ for line in iter(sys.stdin.readline, ''):
# put skip lines here
if skip_line(line):
continue
- # This prevents us from nesting date lines, because
- # we'd like to pull this in directly in Grenade and not double
- # up on DevStack lines
+ # This prevents us from nesting date lines, because we'd like
+ # to pull this in directly in Grenade and not double up on
+ # DevStack lines.
+ # NOTE(ianw): we could actually strip the extra ts in "bare"
+ # mode (which came after this)? ... as we get more experience
+ # with zuulv3 native jobs and ansible capture it may become
+ # clearer what to do
if HAS_DATE.search(line) is None:
now = datetime.datetime.utcnow()
- line = ("%s | %s" % (
+ ts_line = ("%s | %s" % (
now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3],
line))
+ else:
+ ts_line = line
if opts.verbose:
- sys.stdout.write(line)
+ sys.stdout.write(line if opts.no_timestamp else ts_line)
sys.stdout.flush()
+
if outfile:
- outfile.write(line)
+ # We've opened outfile as a binary file to get the
+ # non-buffered behaviour. on python3, sys.stdin was
+ # opened with the system encoding and made the line into
+ # utf-8, so write the logfile out in utf-8 bytes.
+ if sys.version_info < (3,):
+ outfile.write(ts_line)
+ else:
+ outfile.write(ts_line.encode('utf-8'))
outfile.flush()
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 6fff149..7506082 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -164,8 +164,7 @@
_header("Network Dump")
_dump_cmd("brctl show")
- _dump_cmd("arp -n")
- ip_cmds = ["addr", "link", "route"]
+ ip_cmds = ["neigh", "addr", "link", "route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
for netns_ in _netns_list():
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 9559e77..22263bb 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,173 +1,3 @@
-# Getting Started With XenServer and Devstack
+Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there.
-The purpose of the code in this directory it to help developers bootstrap a
-XenServer 6.2 (older versions may also work) + OpenStack development
-environment. This file gives some pointers on how to get started.
-
-Xenserver is a Type 1 hypervisor, so it is best installed on bare metal. The
-OpenStack services are configured to run within a virtual machine (called OS
-domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with
-the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`).
-
-The provided localrc helps to build a basic environment.
-
-## Introduction
-
-### Requirements
-
- - An internet-enabled network with a DHCP server on it
- - XenServer box plugged in to the same network
-This network will be used as the OpenStack management network. The VM Network
-and the Public Network will not be connected to any physical interfaces, only
-new virtual networks will be created by the `install_os_domU.sh` script.
-
-### Steps to follow
-
- - Install XenServer
- - Download Devstack to XenServer
- - Customise `localrc`
- - Start `install_os_domU.sh` script
-
-### Brief explanation
-
-The `install_os_domU.sh` script will:
- - Setup XenAPI plugins
- - Create the named networks, if they don't exist
- - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse
- it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network
- interface:
- - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to
- `MGT_BRIDGE_OR_NET_NAME`
- - After the Ubuntu install process finished, the network configuration is
- modified to:
- - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi
- must be accessible through this network.
- - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME`
- - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME`
- - Start devstack inside the created OpenStack VM
-
-## Step 1: Install Xenserver
-Install XenServer on a clean box. You can download the latest XenServer for
-free from: http://www.xenserver.org/
-
-The XenServer IP configuration depends on your local network setup. If you are
-using dhcp, make a reservation for XenServer, so its IP address won't change
-over time. Make a note of the XenServer's IP address, as it has to be specified
-in `localrc`. The other option is to manually specify the IP setup for the
-XenServer box. Please make sure, that a gateway and a nameserver is configured,
-as `install_os_domU.sh` will connect to github.com to get source-code snapshots.
-
-## Step 2: Download devstack
-On your XenServer host, run the following commands as root:
-
- wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master
- unzip -o master -d ./devstack
- cd devstack/*/
-
-## Step 3: Configure your localrc inside the devstack directory
-Devstack uses a localrc for user-specific configuration. Note that
-the `XENAPI_PASSWORD` must be your dom0 root password.
-Of course, use real passwords if this machine is exposed.
-
- cat > ./localrc <<EOF
- # At the moment, we depend on github's snapshot function.
- GIT_BASE="http://github.com"
-
- # Passwords
- # NOTE: these need to be specified, otherwise devstack will try
- # to prompt for these passwords, blocking the install process.
-
- DATABASE_PASSWORD=my_super_secret
- ADMIN_PASSWORD=my_super_secret
- SERVICE_PASSWORD=my_super_secret
- RABBIT_PASSWORD=my_super_secret
- SWIFT_HASH="66a3d6b56c1f479c8b4e70ab5c2000f5"
- # This will be the password for the OpenStack VM (both stack and root users)
- GUEST_PASSWORD=my_super_secret
-
- # XenAPI parameters
- # NOTE: The following must be set to your XenServer root password!
-
- XENAPI_PASSWORD=my_xenserver_root_password
-
- XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
- VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
-
- # Explicitly set virt driver
- VIRT_DRIVER=xenserver
-
- # Explicitly enable multi-host for nova-network HA
- MULTI_HOST=1
-
- # Give extra time for boot
- ACTIVE_TIMEOUT=45
-
- EOF
-
-## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory
-
- cd tools/xen
- ./install_os_domU.sh
-
-Once this script finishes executing, log into the VM (openstack domU) that it
-installed and tail the run.sh.log file. You will need to wait until it run.sh
-has finished executing.
-
-# Appendix
-
-This section contains useful information for running devstack in CI
-environments / using ubuntu network mirrors.
-
-## Use a specific Ubuntu mirror for installation
-
-To speed up the Ubuntu installation, you can use a specific mirror. To specify
-a mirror explicitly, include the following settings in your `localrc` file:
-
- UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com"
- UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
-
-These variables set the `mirror/http/hostname` and `mirror/http/directory`
-settings in the ubuntu preseed file. The minimal ubuntu VM will use the
-specified parameters.
-
-## Use an http proxy to speed up Ubuntu installation
-
-To further speed up the Ubuntu VM and package installation, an internal http
-proxy could be used. `squid-deb-proxy` has prooven to be stable. To use an http
-proxy, specify:
-
- UBUNTU_INST_HTTP_PROXY="http://ubuntu-proxy.somedomain.com:8000"
-
-in your `localrc` file.
-
-## Reuse the Ubuntu VM
-
-Performing a minimal ubuntu installation could take a lot of time, depending on
-your mirror/network speed. If you run `install_os_domU.sh` script on a clean
-hypervisor, you can speed up the installation, by re-using the ubuntu vm from
-a previous installation.
-
-### Export the Ubuntu VM to an XVA
-
-Given you have an nfs export `TEMPLATE_NFS_DIR`:
-
- TEMPLATE_FILENAME=devstack-jeos.xva
- TEMPLATE_NAME=jeos_template_for_devstack
- mountdir=$(mktemp -d)
- mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
- VM="$(xe template-list name-label="$TEMPLATE_NAME" --minimal)"
- xe template-export template-uuid=$VM filename="$mountdir/$TEMPLATE_FILENAME"
- umount "$mountdir"
- rm -rf "$mountdir"
-
-### Import the Ubuntu VM
-
-Given you have an nfs export `TEMPLATE_NFS_DIR` where you exported the Ubuntu
-VM as `TEMPLATE_FILENAME`:
-
- mountdir=$(mktemp -d)
- mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
- xe vm-import filename="$mountdir/$TEMPLATE_FILENAME"
- umount "$mountdir"
- rm -rf "$mountdir"
-
+.. _os-xenapi: https://github.com/openstack/os-xenapi/
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
deleted file mode 100755
index 34ef719..0000000
--- a/tools/xen/build_xva.sh
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/bin/bash
-
-# This script is run by install_os_domU.sh
-#
-# It modifies the ubuntu image created by install_os_domU.sh
-# and previously moodified by prepare_guest_template.sh
-#
-# This script is responsible for:
-# - pushing in the DevStack code
-# - creating run.sh, to run the code on boot
-# It does this by mounting the disk image of the VM.
-#
-# The resultant image is then templated and started
-# by install_os_domU.sh
-
-# Exit on errors
-set -o errexit
-# Echo commands
-set -o xtrace
-
-# This directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $TOP_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $TOP_DIR/functions
-
-# Source params - override xenrc params in your localrc to suite your taste
-source xenrc
-
-#
-# Parameters
-#
-GUEST_NAME="$1"
-
-function _print_interface_config {
- local device_nr
- local ip_address
- local netmask
-
- device_nr="$1"
- ip_address="$2"
- netmask="$3"
-
- local device
-
- device="eth${device_nr}"
-
- echo "auto $device"
- if [ $ip_address == "dhcp" ]; then
- echo "iface $device inet dhcp"
- else
- echo "iface $device inet static"
- echo " address $ip_address"
- echo " netmask $netmask"
- fi
-
- # Turn off tx checksumming for better performance
- echo " post-up ethtool -K $device tx off"
-}
-
-function print_interfaces_config {
- echo "auto lo"
- echo "iface lo inet loopback"
-
- _print_interface_config $PUB_DEV_NR $PUB_IP $PUB_NETMASK
- _print_interface_config $VM_DEV_NR $VM_IP $VM_NETMASK
- _print_interface_config $MGT_DEV_NR $MGT_IP $MGT_NETMASK
-}
-
-#
-# Mount the VDI
-#
-STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
-add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
-
-# Make sure we have a stage
-if [ ! -d $STAGING_DIR/etc ]; then
- echo "Stage is not properly set up!"
- exit 1
-fi
-
-# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf
-if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then
- echo "Configuration without DHCP not supported"
- exit 1
-fi
-
-# Copy over devstack
-rm -f /tmp/devstack.tar
-cd $TOP_DIR/../../
-tar --exclude='stage' --exclude='xen/xvas' --exclude='xen/nova' -cvf /tmp/devstack.tar .
-mkdir -p $STAGING_DIR/opt/stack/devstack
-tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack
-cd $TOP_DIR
-
-# Create an systemd task for devstack
-cat >$STAGING_DIR/etc/systemd/system/devstack.service << EOF
-[Unit]
-Description=Install OpenStack by DevStack
-
-[Service]
-Type=oneshot
-RemainAfterExit=yes
-ExecStartPre=/bin/rm -f /opt/stack/runsh.succeeded
-ExecStart=/bin/su -c "/opt/stack/run.sh" stack
-StandardOutput=tty
-StandardError=tty
-
-[Install]
-WantedBy=multi-user.target
-
-EOF
-
-# enable this service
-ln -s $STAGING_DIR/etc/systemd/system/devstack.service $STAGING_DIR/etc/systemd/system/multi-user.target.wants/devstack.service
-
-# Configure the hostname
-echo $GUEST_NAME > $STAGING_DIR/etc/hostname
-
-# Hostname must resolve for rabbit
-HOSTS_FILE_IP=$PUB_IP
-if [ $MGT_IP != "dhcp" ]; then
- HOSTS_FILE_IP=$MGT_IP
-fi
-cat <<EOF >$STAGING_DIR/etc/hosts
-$HOSTS_FILE_IP $GUEST_NAME
-127.0.0.1 localhost localhost.localdomain
-EOF
-
-# Configure the network
-print_interfaces_config > $STAGING_DIR/etc/network/interfaces
-
-# Gracefully cp only if source file/dir exists
-function cp_it {
- if [ -e $1 ] || [ -d $1 ]; then
- cp -pRL $1 $2
- fi
-}
-
-# Copy over your ssh keys and env if desired
-COPYENV=${COPYENV:-1}
-if [ "$COPYENV" = "1" ]; then
- cp_it ~/.ssh $STAGING_DIR/opt/stack/.ssh
- cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/opt/stack/.ssh/authorized_keys
- cp_it ~/.gitconfig $STAGING_DIR/opt/stack/.gitconfig
- cp_it ~/.vimrc $STAGING_DIR/opt/stack/.vimrc
- cp_it ~/.bashrc $STAGING_DIR/opt/stack/.bashrc
-fi
-
-# Configure run.sh
-cat <<EOF >$STAGING_DIR/opt/stack/run.sh
-#!/bin/bash
-set -eux
-(
- flock -n 9 || exit 1
-
- sudo chown -R stack /opt/stack
-
- [ -e /opt/stack/runsh.succeeded ] && rm /opt/stack/runsh.succeeded
- echo \$\$ >> /opt/stack/run_sh.pid
-
- cd /opt/stack/devstack
- ./unstack.sh || true
- ./stack.sh
-
- # Got to the end - success
- touch /opt/stack/runsh.succeeded
-
- # Update /etc/issue
- (
- echo "OpenStack VM - Installed by DevStack"
- IPADDR=$(ip -4 address show eth0 | sed -n 's/.*inet \([0-9\.]\+\).*/\1/p')
- echo " Management IP: $IPADDR"
- echo -n " Devstack run: "
- if [ -e /opt/stack/runsh.succeeded ]; then
- echo "SUCCEEDED"
- else
- echo "FAILED"
- fi
- echo ""
- ) > /opt/stack/issue
- sudo cp /opt/stack/issue /etc/issue
-
- rm /opt/stack/run_sh.pid
-) 9> /opt/stack/.runsh_lock
-EOF
-
-chmod 755 $STAGING_DIR/opt/stack/run.sh
diff --git a/tools/xen/devstackubuntu_latecommand.sh b/tools/xen/devstackubuntu_latecommand.sh
deleted file mode 100644
index 2afbe2c..0000000
--- a/tools/xen/devstackubuntu_latecommand.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-set -eux
-
-# Need to set barrier=0 to avoid a Xen bug
-# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/824089
-sed -i -e 's/errors=/barrier=0,errors=/' /etc/fstab
-
-# Allow root to login with a password
-sed -i -e 's/.*PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config
-
-# Install the XenServer tools so IP addresses are reported
-wget --no-proxy @XS_TOOLS_URL@ -O /root/tools.deb
-dpkg -i /root/tools.deb
-rm /root/tools.deb
diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg
deleted file mode 100644
index 80f334b..0000000
--- a/tools/xen/devstackubuntupreseed.cfg
+++ /dev/null
@@ -1,471 +0,0 @@
-### Contents of the preconfiguration file (for squeeze)
-### Localization
-# Preseeding only locale sets language, country and locale.
-d-i debian-installer/locale string en_US
-
-# The values can also be preseeded individually for greater flexibility.
-#d-i debian-installer/language string en
-#d-i debian-installer/country string NL
-#d-i debian-installer/locale string en_GB.UTF-8
-# Optionally specify additional locales to be generated.
-#d-i localechooser/supported-locales en_US.UTF-8, nl_NL.UTF-8
-
-# Keyboard selection.
-# Disable automatic (interactive) keymap detection.
-d-i console-setup/ask_detect boolean false
-#d-i keyboard-configuration/modelcode string pc105
-d-i keyboard-configuration/layoutcode string us
-# To select a variant of the selected layout (if you leave this out, the
-# basic form of the layout will be used):
-#d-i keyboard-configuration/variantcode string dvorak
-
-### Network configuration
-# Disable network configuration entirely. This is useful for cdrom
-# installations on non-networked devices where the network questions,
-# warning and long timeouts are a nuisance.
-#d-i netcfg/enable boolean false
-
-# netcfg will choose an interface that has link if possible. This makes it
-# skip displaying a list if there is more than one interface.
-d-i netcfg/choose_interface select auto
-
-# To pick a particular interface instead:
-#d-i netcfg/choose_interface select eth1
-
-# If you have a slow dhcp server and the installer times out waiting for
-# it, this might be useful.
-d-i netcfg/dhcp_timeout string 120
-
-# If you prefer to configure the network manually, uncomment this line and
-# the static network configuration below.
-#d-i netcfg/disable_autoconfig boolean true
-
-# If you want the preconfiguration file to work on systems both with and
-# without a dhcp server, uncomment these lines and the static network
-# configuration below.
-#d-i netcfg/dhcp_failed note
-#d-i netcfg/dhcp_options select Configure network manually
-
-# Static network configuration.
-#d-i netcfg/get_nameservers string 192.168.1.1
-#d-i netcfg/get_ipaddress string 192.168.1.42
-#d-i netcfg/get_netmask string 255.255.255.0
-#d-i netcfg/get_gateway string 192.168.1.1
-#d-i netcfg/confirm_static boolean true
-
-# Any hostname and domain names assigned from dhcp take precedence over
-# values set here. However, setting the values still prevents the questions
-# from being shown, even if values come from dhcp.
-d-i netcfg/get_hostname string stack
-d-i netcfg/get_domain string stackpass
-
-# Disable that annoying WEP key dialog.
-d-i netcfg/wireless_wep string
-# The wacky dhcp hostname that some ISPs use as a password of sorts.
-#d-i netcfg/dhcp_hostname string radish
-
-# If non-free firmware is needed for the network or other hardware, you can
-# configure the installer to always try to load it, without prompting. Or
-# change to false to disable asking.
-#d-i hw-detect/load_firmware boolean true
-
-### Network console
-# Use the following settings if you wish to make use of the network-console
-# component for remote installation over SSH. This only makes sense if you
-# intend to perform the remainder of the installation manually.
-#d-i anna/choose_modules string network-console
-#d-i network-console/password password r00tme
-#d-i network-console/password-again password r00tme
-
-### Mirror settings
-# If you select ftp, the mirror/country string does not need to be set.
-#d-i mirror/protocol string ftp
-d-i mirror/country string manual
-d-i mirror/http/hostname string archive.ubuntu.com
-d-i mirror/http/directory string /ubuntu
-d-i mirror/http/proxy string
-
-# Alternatively: by default, the installer uses CC.archive.ubuntu.com where
-# CC is the ISO-3166-2 code for the selected country. You can preseed this
-# so that it does so without asking.
-#d-i mirror/http/mirror select CC.archive.ubuntu.com
-
-# Suite to install.
-#d-i mirror/suite string squeeze
-# Suite to use for loading installer components (optional).
-#d-i mirror/udeb/suite string squeeze
-# Components to use for loading installer components (optional).
-#d-i mirror/udeb/components multiselect main, restricted
-
-### Clock and time zone setup
-# Controls whether or not the hardware clock is set to UTC.
-d-i clock-setup/utc boolean true
-
-# You may set this to any valid setting for $TZ; see the contents of
-# /usr/share/zoneinfo/ for valid values.
-d-i time/zone string US/Pacific
-
-# Controls whether to use NTP to set the clock during the install
-d-i clock-setup/ntp boolean true
-# NTP server to use. The default is almost always fine here.
-d-i clock-setup/ntp-server string 0.us.pool.ntp.org
-
-### Partitioning
-## Partitioning example
-# If the system has free space you can choose to only partition that space.
-# This is only honoured if partman-auto/method (below) is not set.
-# Alternatives: custom, some_device, some_device_crypto, some_device_lvm.
-#d-i partman-auto/init_automatically_partition select biggest_free
-
-# Alternatively, you may specify a disk to partition. If the system has only
-# one disk the installer will default to using that, but otherwise the device
-# name must be given in traditional, non-devfs format (so e.g. /dev/hda or
-# /dev/sda, and not e.g. /dev/discs/disc0/disc).
-# For example, to use the first SCSI/SATA hard disk:
-#d-i partman-auto/disk string /dev/sda
-# In addition, you'll need to specify the method to use.
-# The presently available methods are:
-# - regular: use the usual partition types for your architecture
-# - lvm: use LVM to partition the disk
-# - crypto: use LVM within an encrypted partition
-d-i partman-auto/method string regular
-
-# If one of the disks that are going to be automatically partitioned
-# contains an old LVM configuration, the user will normally receive a
-# warning. This can be preseeded away...
-d-i partman-lvm/device_remove_lvm boolean true
-# The same applies to pre-existing software RAID array:
-d-i partman-md/device_remove_md boolean true
-# And the same goes for the confirmation to write the lvm partitions.
-d-i partman-lvm/confirm boolean true
-
-# For LVM partitioning, you can select how much of the volume group to use
-# for logical volumes.
-#d-i partman-auto-lvm/guided_size string max
-#d-i partman-auto-lvm/guided_size string 10GB
-#d-i partman-auto-lvm/guided_size string 50%
-
-# You can choose one of the three predefined partitioning recipes:
-# - atomic: all files in one partition
-# - home: separate /home partition
-# - multi: separate /home, /usr, /var, and /tmp partitions
-d-i partman-auto/choose_recipe select atomic
-
-# Or provide a recipe of your own...
-# If you have a way to get a recipe file into the d-i environment, you can
-# just point at it.
-#d-i partman-auto/expert_recipe_file string /hd-media/recipe
-
-# If not, you can put an entire recipe into the preconfiguration file in one
-# (logical) line. This example creates a small /boot partition, suitable
-# swap, and uses the rest of the space for the root partition:
-#d-i partman-auto/expert_recipe string \
-# boot-root :: \
-# 40 50 100 ext3 \
-# $primary{ } $bootable{ } \
-# method{ format } format{ } \
-# use_filesystem{ } filesystem{ ext3 } \
-# mountpoint{ /boot } \
-# . \
-# 500 10000 1000000000 ext3 \
-# method{ format } format{ } \
-# use_filesystem{ } filesystem{ ext3 } \
-# mountpoint{ / } \
-# . \
-# 64 512 300% linux-swap \
-# method{ swap } format{ } \
-# .
-
-# If you just want to change the default filesystem from ext3 to something
-# else, you can do that without providing a full recipe.
-d-i partman/default_filesystem string ext3
-
-# The full recipe format is documented in the file partman-auto-recipe.txt
-# included in the 'debian-installer' package or available from D-I source
-# repository. This also documents how to specify settings such as file
-# system labels, volume group names and which physical devices to include
-# in a volume group.
-
-# This makes partman automatically partition without confirmation, provided
-# that you told it what to do using one of the methods above.
-d-i partman-partitioning/confirm_write_new_label boolean true
-d-i partman/choose_partition select finish
-d-i partman/confirm boolean true
-d-i partman/confirm_nooverwrite boolean true
-
-## Partitioning using RAID
-# The method should be set to "raid".
-#d-i partman-auto/method string raid
-# Specify the disks to be partitioned. They will all get the same layout,
-# so this will only work if the disks are the same size.
-#d-i partman-auto/disk string /dev/sda /dev/sdb
-
-# Next you need to specify the physical partitions that will be used.
-#d-i partman-auto/expert_recipe string \
-# multiraid :: \
-# 1000 5000 4000 raid \
-# $primary{ } method{ raid } \
-# . \
-# 64 512 300% raid \
-# method{ raid } \
-# . \
-# 500 10000 1000000000 raid \
-# method{ raid } \
-# .
-
-# Last you need to specify how the previously defined partitions will be
-# used in the RAID setup. Remember to use the correct partition numbers
-# for logical partitions. RAID levels 0, 1, 5, 6 and 10 are supported;
-# devices are separated using "#".
-# Parameters are:
-# <raidtype> <devcount> <sparecount> <fstype> <mountpoint> \
-# <devices> <sparedevices>
-
-#d-i partman-auto-raid/recipe string \
-# 1 2 0 ext3 / \
-# /dev/sda1#/dev/sdb1 \
-# . \
-# 1 2 0 swap - \
-# /dev/sda5#/dev/sdb5 \
-# . \
-# 0 2 0 ext3 /home \
-# /dev/sda6#/dev/sdb6 \
-# .
-
-# For additional information see the file partman-auto-raid-recipe.txt
-# included in the 'debian-installer' package or available from D-I source
-# repository.
-
-# This makes partman automatically partition without confirmation.
-d-i partman-md/confirm boolean true
-d-i partman-partitioning/confirm_write_new_label boolean true
-d-i partman/choose_partition select finish
-d-i partman/confirm boolean true
-d-i partman/confirm_nooverwrite boolean true
-
-## Controlling how partitions are mounted
-# The default is to mount by UUID, but you can also choose "traditional" to
-# use traditional device names, or "label" to try filesystem labels before
-# falling back to UUIDs.
-#d-i partman/mount_style select uuid
-
-### Base system installation
-# Configure APT to not install recommended packages by default. Use of this
-# option can result in an incomplete system and should only be used by very
-# experienced users.
-#d-i base-installer/install-recommends boolean false
-
-# The kernel image (meta) package to be installed; "none" can be used if no
-# kernel is to be installed.
-d-i base-installer/kernel/image string linux-virtual
-
-### Account setup
-# Skip creation of a root account (normal user account will be able to
-# use sudo). The default is false; preseed this to true if you want to set
-# a root password.
-d-i passwd/root-login boolean true
-# Alternatively, to skip creation of a normal user account.
-d-i passwd/make-user boolean false
-
-# Root password, either in clear text
-d-i passwd/root-password password stackpass
-d-i passwd/root-password-again password stackpass
-# or encrypted using an MD5 hash.
-#d-i passwd/root-password-crypted password [MD5 hash]
-
-# To create a normal user account.
-#d-i passwd/user-fullname string Ubuntu User
-#d-i passwd/username string ubuntu
-# Normal user's password, either in clear text
-#d-i passwd/user-password password insecure
-#d-i passwd/user-password-again password insecure
-# or encrypted using an MD5 hash.
-#d-i passwd/user-password-crypted password [MD5 hash]
-# Create the first user with the specified UID instead of the default.
-#d-i passwd/user-uid string 1010
-# The installer will warn about weak passwords. If you are sure you know
-# what you're doing and want to override it, uncomment this.
-d-i user-setup/allow-password-weak boolean true
-
-# The user account will be added to some standard initial groups. To
-# override that, use this.
-#d-i passwd/user-default-groups string audio cdrom video
-
-# Set to true if you want to encrypt the first user's home directory.
-d-i user-setup/encrypt-home boolean false
-
-### Apt setup
-# You can choose to install restricted and universe software, or to install
-# software from the backports repository.
-d-i apt-setup/restricted boolean true
-d-i apt-setup/universe boolean true
-d-i apt-setup/backports boolean true
-# Uncomment this if you don't want to use a network mirror.
-#d-i apt-setup/use_mirror boolean false
-# Select which update services to use; define the mirrors to be used.
-# Values shown below are the normal defaults.
-#d-i apt-setup/services-select multiselect security
-#d-i apt-setup/security_host string security.ubuntu.com
-#d-i apt-setup/security_path string /ubuntu
-
-# Additional repositories, local[0-9] available
-#d-i apt-setup/local0/repository string \
-# http://local.server/ubuntu squeeze main
-#d-i apt-setup/local0/comment string local server
-# Enable deb-src lines
-#d-i apt-setup/local0/source boolean true
-# URL to the public key of the local repository; you must provide a key or
-# apt will complain about the unauthenticated repository and so the
-# sources.list line will be left commented out
-#d-i apt-setup/local0/key string http://local.server/key
-
-# By default the installer requires that repositories be authenticated
-# using a known gpg key. This setting can be used to disable that
-# authentication. Warning: Insecure, not recommended.
-#d-i debian-installer/allow_unauthenticated boolean true
-
-### Package selection
-#tasksel tasksel/first multiselect ubuntu-desktop
-#tasksel tasksel/first multiselect lamp-server, print-server
-#tasksel tasksel/first multiselect kubuntu-desktop
-tasksel tasksel/first multiselect openssh-server
-
-# Individual additional packages to install
-d-i pkgsel/include string cracklib-runtime curl wget ssh openssh-server tcpdump ethtool git sudo python-netaddr coreutils
-
-# Whether to upgrade packages after debootstrap.
-# Allowed values: none, safe-upgrade, full-upgrade
-d-i pkgsel/upgrade select safe-upgrade
-
-# Language pack selection
-#d-i pkgsel/language-packs multiselect de, en, zh
-
-# Policy for applying updates. May be "none" (no automatic updates),
-# "unattended-upgrades" (install security updates automatically), or
-# "landscape" (manage system with Landscape).
-d-i pkgsel/update-policy select unattended-upgrades
-
-# Some versions of the installer can report back on what software you have
-# installed, and what software you use. The default is not to report back,
-# but sending reports helps the project determine what software is most
-# popular and include it on CDs.
-#popularity-contest popularity-contest/participate boolean false
-
-# By default, the system's locate database will be updated after the
-# installer has finished installing most packages. This may take a while, so
-# if you don't want it, you can set this to "false" to turn it off.
-d-i pkgsel/updatedb boolean false
-
-### Boot loader installation
-# Grub is the default boot loader (for x86). If you want lilo installed
-# instead, uncomment this:
-#d-i grub-installer/skip boolean true
-# To also skip installing lilo, and install no bootloader, uncomment this
-# too:
-#d-i lilo-installer/skip boolean true
-
-# With a few exceptions for unusual partitioning setups, GRUB 2 is now the
-# default. If you need GRUB Legacy for some particular reason, then
-# uncomment this:
-d-i grub-installer/grub2_instead_of_grub_legacy boolean false
-
-# This is fairly safe to set, it makes grub install automatically to the MBR
-# if no other operating system is detected on the machine.
-d-i grub-installer/only_debian boolean true
-
-# This one makes grub-installer install to the MBR if it also finds some other
-# OS, which is less safe as it might not be able to boot that other OS.
-d-i grub-installer/with_other_os boolean true
-
-# Alternatively, if you want to install to a location other than the mbr,
-# uncomment and edit these lines:
-#d-i grub-installer/only_debian boolean false
-#d-i grub-installer/with_other_os boolean false
-#d-i grub-installer/bootdev string (hd0,0)
-# To install grub to multiple disks:
-#d-i grub-installer/bootdev string (hd0,0) (hd1,0) (hd2,0)
-
-# Optional password for grub, either in clear text
-#d-i grub-installer/password password r00tme
-#d-i grub-installer/password-again password r00tme
-# or encrypted using an MD5 hash, see grub-md5-crypt(8).
-#d-i grub-installer/password-crypted password [MD5 hash]
-
-# Use the following option to add additional boot parameters for the
-# installed system (if supported by the bootloader installer).
-# Note: options passed to the installer will be added automatically.
-#d-i debian-installer/add-kernel-opts string nousb
-
-### Finishing up the installation
-# During installations from serial console, the regular virtual consoles
-# (VT1-VT6) are normally disabled in /etc/inittab. Uncomment the next
-# line to prevent this.
-d-i finish-install/keep-consoles boolean true
-
-# Avoid that last message about the install being complete.
-d-i finish-install/reboot_in_progress note
-
-# This will prevent the installer from ejecting the CD during the reboot,
-# which is useful in some situations.
-#d-i cdrom-detect/eject boolean false
-
-# This is how to make the installer shutdown when finished, but not
-# reboot into the installed system.
-#d-i debian-installer/exit/halt boolean true
-# This will power off the machine instead of just halting it.
-#d-i debian-installer/exit/poweroff boolean true
-
-### X configuration
-# X can detect the right driver for some cards, but if you're preseeding,
-# you override whatever it chooses. Still, vesa will work most places.
-#xserver-xorg xserver-xorg/config/device/driver select vesa
-
-# A caveat with mouse autodetection is that if it fails, X will retry it
-# over and over. So if it's preseeded to be done, there is a possibility of
-# an infinite loop if the mouse is not autodetected.
-#xserver-xorg xserver-xorg/autodetect_mouse boolean true
-
-# Monitor autodetection is recommended.
-xserver-xorg xserver-xorg/autodetect_monitor boolean true
-# Uncomment if you have an LCD display.
-#xserver-xorg xserver-xorg/config/monitor/lcd boolean true
-# X has three configuration paths for the monitor. Here's how to preseed
-# the "medium" path, which is always available. The "simple" path may not
-# be available, and the "advanced" path asks too many questions.
-xserver-xorg xserver-xorg/config/monitor/selection-method \
- select medium
-xserver-xorg xserver-xorg/config/monitor/mode-list \
- select 1024x768 @ 60 Hz
-
-### Preseeding other packages
-# Depending on what software you choose to install, or if things go wrong
-# during the installation process, it's possible that other questions may
-# be asked. You can preseed those too, of course. To get a list of every
-# possible question that could be asked during an install, do an
-# installation, and then run these commands:
-# debconf-get-selections --installer > file
-# debconf-get-selections >> file
-
-
-#### Advanced options
-### Running custom commands during the installation
-# d-i preseeding is inherently not secure. Nothing in the installer checks
-# for attempts at buffer overflows or other exploits of the values of a
-# preconfiguration file like this one. Only use preconfiguration files from
-# trusted locations! To drive that home, and because it's generally useful,
-# here's a way to run any shell command you'd like inside the installer,
-# automatically.
-
-# This first command is run as early as possible, just after
-# preseeding is read.
-#d-i preseed/early_command string anna-install some-udeb
-# This command is run immediately before the partitioner starts. It may be
-# useful to apply dynamic partitioner preseeding that depends on the state
-# of the disks (which may not be visible when preseed/early_command runs).
-#d-i partman/early_command \
-# string debconf-set partman-auto/disk "$(list-devices disk | head -n1)"
-# This command is run just before the install finishes, but when there is
-# still a usable /target directory. You can chroot to /target and use it
-# directly, or use the apt-install and in-target commands to easily install
-# packages and run commands in the target system.
-d-i preseed/late_command string
diff --git a/tools/xen/functions b/tools/xen/functions
deleted file mode 100644
index bc0c515..0000000
--- a/tools/xen/functions
+++ /dev/null
@@ -1,341 +0,0 @@
-#!/bin/bash
-
-function die_with_error {
- local err_msg
-
- err_msg="$1"
-
- echo "$err_msg" >&2
- exit 1
-}
-
-function xapi_plugin_location {
- for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins" "/usr/lib64/xapi/plugins"; do
- if [ -d $PLUGIN_DIR ]; then
- echo $PLUGIN_DIR
- return 0
- fi
- done
- return 1
-}
-
-function create_directory_for_kernels {
- if [ -d "/boot/guest" ]; then
- echo "INFO: /boot/guest directory already exists, using that" >&2
- else
- local local_path
- local_path="$(get_local_sr_path)/os-guest-kernels"
- mkdir -p $local_path
- ln -s $local_path /boot/guest
- fi
-}
-
-function create_directory_for_images {
- if [ -d "/images" ]; then
- echo "INFO: /images directory already exists, using that" >&2
- else
- local local_path
- local_path="$(get_local_sr_path)/os-images"
- mkdir -p $local_path
- ln -s $local_path /images
- fi
-}
-
-function get_local_sr {
- xe pool-list params=default-SR minimal=true
-}
-
-function get_local_sr_path {
- pbd_path="/var/run/sr-mount/$(get_local_sr)"
- pbd_device_config_path=`xe pbd-list sr-uuid=$(get_local_sr) params=device-config | grep " path: "`
- if [ -n "$pbd_device_config_path" ]; then
- pbd_uuid=`xe pbd-list sr-uuid=$(get_local_sr) minimal=true`
- pbd_path=`xe pbd-param-get uuid=$pbd_uuid param-name=device-config param-key=path || echo ""`
- fi
- echo $pbd_path
-}
-
-function find_ip_by_name {
- local guest_name="$1"
- local interface="$2"
-
- local period=10
- local max_tries=10
- local i=0
-
- while true; do
- if [ $i -ge $max_tries ]; then
- echo "Timeout: ip address for interface $interface of $guest_name"
- exit 11
- fi
-
- ipaddress=$(xe vm-list --minimal \
- name-label=$guest_name \
- params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p")
-
- if [ -z "$ipaddress" ]; then
- sleep $period
- i=$((i+1))
- else
- echo $ipaddress
- break
- fi
- done
-}
-
-function _vm_uuid {
- local vm_name_label
-
- vm_name_label="$1"
-
- xe vm-list name-label="$vm_name_label" --minimal
-}
-
-function _create_new_network {
- local name_label
- name_label=$1
-
- xe network-create name-label="$name_label"
-}
-
-function _multiple_networks_with_name {
- local name_label
- name_label=$1
-
- # A comma indicates multiple matches
- xe network-list name-label="$name_label" --minimal | grep -q ","
-}
-
-function _network_exists {
- local name_label
- name_label=$1
-
- ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ]
-}
-
-function _bridge_exists {
- local bridge
- bridge=$1
-
- ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ]
-}
-
-function _network_uuid {
- local bridge_or_net_name
- bridge_or_net_name=$1
-
- if _bridge_exists "$bridge_or_net_name"; then
- xe network-list bridge="$bridge_or_net_name" --minimal
- else
- xe network-list name-label="$bridge_or_net_name" --minimal
- fi
-}
-
-function add_interface {
- local vm_name_label
- local bridge_or_network_name
-
- vm_name_label="$1"
- bridge_or_network_name="$2"
- device_number="$3"
-
- local vm
- local net
-
- vm=$(_vm_uuid "$vm_name_label")
- net=$(_network_uuid "$bridge_or_network_name")
- xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
-}
-
-function setup_network {
- local bridge_or_net_name
- bridge_or_net_name=$1
-
- if ! _bridge_exists "$bridge_or_net_name"; then
- if _network_exists "$bridge_or_net_name"; then
- if _multiple_networks_with_name "$bridge_or_net_name"; then
- cat >&2 << EOF
-ERROR: Multiple networks found matching name-label to "$bridge_or_net_name"
-please review your XenServer network configuration / localrc file.
-EOF
- exit 1
- fi
- else
- _create_new_network "$bridge_or_net_name"
- fi
- fi
-}
-
-function bridge_for {
- local bridge_or_net_name
- bridge_or_net_name=$1
-
- if _bridge_exists "$bridge_or_net_name"; then
- echo "$bridge_or_net_name"
- else
- xe network-list name-label="$bridge_or_net_name" params=bridge --minimal
- fi
-}
-
-function xenapi_ip_on {
- local bridge_or_net_name
- bridge_or_net_name=$1
-
- ip -4 addr show $(bridge_for "$bridge_or_net_name") |\
- awk '/inet/{split($2, ip, "/"); print ip[1];}'
-}
-
-function xenapi_is_listening_on {
- local bridge_or_net_name
- bridge_or_net_name=$1
-
- ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ]
-}
-
-function parameter_is_specified {
- local parameter_name
- parameter_name=$1
-
- compgen -v | grep "$parameter_name"
-}
-
-function append_kernel_cmdline {
- local vm_name_label
- local kernel_args
-
- vm_name_label="$1"
- kernel_args="$2"
-
- local vm
- local pv_args
-
- vm=$(_vm_uuid "$vm_name_label")
- pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm)
- xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
-}
-
-function destroy_all_vifs_of {
- local vm_name_label
-
- vm_name_label="$1"
-
- local vm
-
- vm=$(_vm_uuid "$vm_name_label")
- IFS=,
- for vif in $(xe vif-list vm-uuid=$vm --minimal); do
- xe vif-destroy uuid="$vif"
- done
- unset IFS
-}
-
-function have_multiple_hosts {
- xe host-list --minimal | grep -q ","
-}
-
-function attach_network {
- local bridge_or_net_name
-
- bridge_or_net_name="$1"
-
- local net
- local host
-
- net=$(_network_uuid "$bridge_or_net_name")
- host=$(xe host-list --minimal)
-
- xe network-attach uuid=$net host-uuid=$host
-}
-
-function set_vm_memory {
- local vm_name_label
- local memory
-
- vm_name_label="$1"
- memory="$2"
-
- local vm
-
- vm=$(_vm_uuid "$vm_name_label")
-
- xe vm-memory-limits-set \
- static-min=${memory}MiB \
- static-max=${memory}MiB \
- dynamic-min=${memory}MiB \
- dynamic-max=${memory}MiB \
- uuid=$vm
-}
-
-function max_vcpus {
- local vm_name_label
-
- vm_name_label="$1"
-
- local vm
- local host
- local cpu_count
-
- host=$(xe host-list --minimal)
- vm=$(_vm_uuid "$vm_name_label")
-
- cpu_count=$(xe host-param-get \
- param-name=cpu_info \
- uuid=$host |
- sed -e 's/^.*cpu_count: \([0-9]*\);.*$/\1/g')
-
- if [ -z "$cpu_count" ]; then
- # get dom0's vcpu count
- cpu_count=$(cat /proc/cpuinfo | grep processor | wc -l)
- fi
-
- # Assert cpu_count is not empty
- [ -n "$cpu_count" ]
-
- # Assert ithas a numeric nonzero value
- expr "$cpu_count" + 0
-
- # 8 VCPUs should be enough for devstack VM; avoid using too
- # many VCPUs:
- # 1. too many VCPUs may trigger a kernel bug which result VM
- # not able to boot:
- # https://kernel.googlesource.com/pub/scm/linux/kernel/git/wsa/linux/+/e2e004acc7cbe3c531e752a270a74e95cde3ea48
- # 2. The remaining CPUs can be used for other purpose:
- # e.g. boot test VMs.
- MAX_VCPUS=8
- if [ $cpu_count -ge $MAX_VCPUS ]; then
- cpu_count=$MAX_VCPUS
- fi
-
- xe vm-param-set uuid=$vm VCPUs-max=$cpu_count
- xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
-}
-
-function get_domid {
- local vm_name_label
-
- vm_name_label="$1"
-
- xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
-}
-
-function install_conntrack_tools {
- local xs_host
- local xs_ver_major
- local centos_ver
- local conntrack_conf
- xs_host=$(xe host-list --minimal)
- xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1)
- if [ $xs_ver_major -gt 6 ]; then
- # Only support conntrack-tools in Dom0 with XS7.0 and above
- if [ ! -f /usr/sbin/conntrackd ]; then
- sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo
- centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'/' -f 1 | cut -d'-' -f 1)
- yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools
- # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode
- mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back
- conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats)
- cp $conntrack_conf /etc/conntrackd/conntrackd.conf
- fi
- service conntrackd restart
- fi
-}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
deleted file mode 100755
index f4ca71a..0000000
--- a/tools/xen/install_os_domU.sh
+++ /dev/null
@@ -1,418 +0,0 @@
-#!/bin/bash
-
-# This script must be run on a XenServer or XCP machine
-#
-# It creates a DomU VM that runs OpenStack services
-#
-# For more details see: README.md
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-export LC_ALL=C
-
-# This directory
-THIS_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $THIS_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $THIS_DIR/functions
-
-#
-# Get Settings
-#
-TOP_DIR=$(cd $THIS_DIR/../../ && pwd)
-source $TOP_DIR/inc/meta-config
-rm -f $TOP_DIR/.localrc.auto
-extract_localrc_section $TOP_DIR/local.conf $TOP_DIR/localrc $TOP_DIR/.localrc.auto
-
-# Source params - override xenrc params in your localrc to suit your taste
-source $THIS_DIR/xenrc
-
-xe_min()
-{
- local cmd="$1"
- shift
- xe "$cmd" --minimal "$@"
-}
-
-#
-# Prepare Dom0
-# including installing XenAPI plugins
-#
-
-cd $THIS_DIR
-
-# Die if multiple hosts listed
-if have_multiple_hosts; then
- cat >&2 << EOF
-ERROR: multiple hosts found. This might mean that the XenServer is a member
-of a pool - Exiting.
-EOF
- exit 1
-fi
-
-#
-# Configure Networking
-#
-
-MGT_NETWORK=`xe pif-list management=true params=network-uuid minimal=true`
-MGT_BRIDGE_OR_NET_NAME=`xe network-list uuid=$MGT_NETWORK params=bridge minimal=true`
-
-setup_network "$VM_BRIDGE_OR_NET_NAME"
-setup_network "$MGT_BRIDGE_OR_NET_NAME"
-setup_network "$PUB_BRIDGE_OR_NET_NAME"
-
-if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
- if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then
- cat >&2 << EOF
-ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network
-found on XenServer by searching for networks by that value as name-label or
-bridge name or the network found does not match the network specified by
-VM_BRIDGE_OR_NET_NAME. Please check your localrc file.
-EOF
- exit 1
- fi
-fi
-
-if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then
- cat >&2 << EOF
-ERROR: XenAPI does not have an assigned IP address on the management network.
-please review your XenServer network configuration / localrc file.
-EOF
- exit 1
-fi
-
-HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME")
-
-# Set up ip forwarding, but skip on xcp-xapi
-if [ -a /etc/sysconfig/network ]; then
- if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then
- # FIXME: This doesn't work on reboot!
- echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network
- fi
-fi
-# Also, enable ip forwarding in rc.local, since the above trick isn't working
-if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then
- echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local
-fi
-# Enable ip forwarding at runtime as well
-echo 1 > /proc/sys/net/ipv4/ip_forward
-
-
-#
-# Shutdown previous runs
-#
-
-DO_SHUTDOWN=${DO_SHUTDOWN:-1}
-CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
-if [ "$DO_SHUTDOWN" = "1" ]; then
- # Shutdown all domU's that created previously
- clean_templates_arg=""
- if $CLEAN_TEMPLATES; then
- clean_templates_arg="--remove-templates"
- fi
- ./scripts/uninstall-os-vpx.sh $clean_templates_arg
-
- # Destroy any instances that were launched
- for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
- echo "Shutting down nova instance $uuid"
- xe vm-uninstall uuid=$uuid force=true
- done
-
- # Destroy orphaned vdis
- for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do
- xe vdi-destroy uuid=$uuid
- done
-fi
-
-
-#
-# Create Ubuntu VM template
-# and/or create VM from template
-#
-
-GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
-TNAME="jeos_template_for_devstack"
-SNAME_TEMPLATE="jeos_snapshot_for_devstack"
-SNAME_FIRST_BOOT="before_first_boot"
-
-function wait_for_VM_to_halt {
- set +x
- echo "Waiting for the VM to halt. Progress in-VM can be checked with XenCenter or xl console:"
- mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
- domid=$(get_domid "$GUEST_NAME")
- echo "ssh root@$mgmt_ip \"xl console $domid\""
- while true; do
- state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
- if [ -n "$state" ]; then
- break
- else
- echo -n "."
- sleep 20
- fi
- done
- set -x
-}
-
-templateuuid=$(xe template-list name-label="$TNAME")
-if [ -z "$templateuuid" ]; then
- #
- # Install Ubuntu over network
- #
- UBUNTU_INST_BRIDGE_OR_NET_NAME=${UBUNTU_INST_BRIDGE_OR_NET_NAME:-"$MGT_BRIDGE_OR_NET_NAME"}
-
- # always update the preseed file, incase we have a newer one
- PRESEED_URL=${PRESEED_URL:-""}
- if [ -z "$PRESEED_URL" ]; then
- PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg"
-
- HTTP_SERVER_LOCATION="/opt/xensource/www"
- if [ ! -e $HTTP_SERVER_LOCATION ]; then
- HTTP_SERVER_LOCATION="/var/www/html"
- mkdir -p $HTTP_SERVER_LOCATION
- fi
-
- # Copy the tools DEB to the XS web server
- XS_TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb"
- ISO_DIR="/opt/xensource/packages/iso"
- if [ -e "$ISO_DIR" ]; then
- TOOLS_ISO=$(ls -1 $ISO_DIR/*-tools-*.iso | head -1)
- TMP_DIR=/tmp/temp.$RANDOM
- mkdir -p $TMP_DIR
- mount -o loop $TOOLS_ISO $TMP_DIR
- # the target deb package maybe *amd64.deb or *all.deb,
- # so use *amd64.deb by default. If it doesn't exist,
- # then use *all.deb.
- DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb)
- cp $DEB_FILE $HTTP_SERVER_LOCATION
- umount $TMP_DIR
- rmdir $TMP_DIR
- XS_TOOLS_URL=${HOST_IP}/$(basename $DEB_FILE)
- fi
-
- cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
- cp -f $THIS_DIR/devstackubuntu_latecommand.sh $HTTP_SERVER_LOCATION/latecommand.sh
-
- sed \
- -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \
- -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \
- -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \
- -e "s,\(d-i passwd/root-password password\).*,\1 $GUEST_PASSWORD,g" \
- -e "s,\(d-i passwd/root-password-again password\).*,\1 $GUEST_PASSWORD,g" \
- -e "s,\(d-i preseed/late_command string\).*,\1 in-target mkdir -p /tmp; in-target wget --no-proxy ${HOST_IP}/latecommand.sh -O /root/latecommand.sh; in-target bash /root/latecommand.sh,g" \
- -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
-
- sed \
- -e "s,@XS_TOOLS_URL@,$XS_TOOLS_URL,g" \
- -i "${HTTP_SERVER_LOCATION}/latecommand.sh"
- fi
-
- # Update the template
- $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL
-
- # create a new VM from the given template with eth0 attached to the given
- # network
- $THIS_DIR/scripts/install-os-vpx.sh \
- -t "$UBUNTU_INST_TEMPLATE_NAME" \
- -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \
- -l "$GUEST_NAME"
-
- set_vm_memory "$GUEST_NAME" "1024"
-
- xe vm-start vm="$GUEST_NAME"
-
- # wait for install to finish
- wait_for_VM_to_halt
-
- # set VM to restart after a reboot
- vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME")
- xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid"
-
- # Make template from VM
- snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE")
- xe snapshot-clone uuid=$snuuid new-name-label="$TNAME"
-else
- #
- # Template already installed, create VM from template
- #
- vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
-fi
-
-if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then
- echo "User requested to quit after JEOS installation"
- exit 0
-fi
-
-#
-# Prepare VM for DevStack
-#
-xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
-
-# Install XenServer tools, and other such things
-$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME"
-
-# Set virtual machine parameters
-set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
-
-# Max out VCPU count for better performance
-max_vcpus "$GUEST_NAME"
-
-# Wipe out all network cards
-destroy_all_vifs_of "$GUEST_NAME"
-
-# Add only one interface to prepare the guest template
-add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0"
-
-# start the VM to run the prepare steps
-xe vm-start vm="$GUEST_NAME"
-
-# Wait for prep script to finish and shutdown system
-wait_for_VM_to_halt
-
-## Setup network cards
-# Wipe out all
-destroy_all_vifs_of "$GUEST_NAME"
-# Tenant network
-add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR"
-# Management network
-add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR"
-# Public network
-add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR"
-
-#
-# Inject DevStack inside VM disk
-#
-$THIS_DIR/build_xva.sh "$GUEST_NAME"
-
-FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}"
-append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
-
-# Add a separate xvdb, if it was requested
-if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then
- vm=$(xe vm-list name-label="$GUEST_NAME" --minimal)
-
- # Add a new disk
- localsr=$(get_local_sr)
- extra_vdi=$(xe vdi-create \
- name-label=xvdb-added-by-devstack \
- virtual-size="${XEN_XVDB_SIZE_GB}GiB" \
- sr-uuid=$localsr type=user)
- xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1
-fi
-
-# create a snapshot before the first boot
-# to allow a quick re-run with the same settings
-xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
-
-#
-# Run DevStack VM
-#
-xe vm-start vm="$GUEST_NAME"
-
-function ssh_no_check {
- ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@"
-}
-
-# Get hold of the Management IP of OpenStack VM
-OS_VM_MANAGEMENT_ADDRESS=$MGT_IP
-if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then
- OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
-fi
-
-# Get hold of the Service IP of OpenStack VM
-if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then
- OS_VM_SERVICES_ADDRESS=$MGT_IP
- if [ $MGT_IP == "dhcp" ]; then
- OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
- fi
-else
- OS_VM_SERVICES_ADDRESS=$PUB_IP
- if [ $PUB_IP == "dhcp" ]; then
- OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR)
- fi
-fi
-
-# Create an ssh-keypair, and set it up for dom0 user
-rm -f /root/dom0key /root/dom0key.pub
-ssh-keygen -f /root/dom0key -P "" -C "dom0"
-DOMID=$(get_domid "$GUEST_NAME")
-
-xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)"
-xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID
-
-function run_on_appliance {
- ssh \
- -i /root/dom0key \
- -o UserKnownHostsFile=/dev/null \
- -o StrictHostKeyChecking=no \
- -o BatchMode=yes \
- "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@"
-}
-
-# Wait until we can log in to the appliance
-while ! run_on_appliance true; do
- sleep 1
-done
-
-# Remove authenticated_keys updater cronjob
-echo "" | run_on_appliance crontab -
-
-# Generate a passwordless ssh key for domzero user
-echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance
-
-# Authenticate that user to dom0
-run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
-
-# If we have copied our ssh credentials, use ssh to monitor while the installation runs
-WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
-COPYENV=${COPYENV:-1}
-if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then
- set +x
-
- echo "VM Launched - Waiting for run.sh"
- while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e /opt/stack/run_sh.pid"; do
- sleep 10
- done
- echo -n "devstack service is running, waiting for stack.sh to start logging..."
-
- pid=`ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "cat /opt/stack/run_sh.pid"`
- if [ -n "$SCREEN_LOGDIR" ]; then
- while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e ${SCREEN_LOGDIR}/stack.log"; do
- sleep 10
- done
-
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "tail --pid $pid -n +1 -f ${SCREEN_LOGDIR}/stack.log"
- else
- echo -n "SCREEN_LOGDIR not set; just waiting for process $pid to finish"
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "wait $pid"
- fi
-
- set -x
- # Fail if devstack did not succeed
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /opt/stack/runsh.succeeded'
-
- set +x
- echo "################################################################################"
- echo ""
- echo "All Finished!"
- echo "You can visit the OpenStack Dashboard"
- echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
-else
- set +x
- echo "################################################################################"
- echo ""
- echo "All Finished!"
- echo "Now, you can monitor the progress of the stack.sh installation by "
- echo "looking at the console of your domU / checking the log files."
- echo ""
- echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
- echo "and then do: 'sudo systemctl status devstack' to check if devstack is still running."
- echo "Check that /opt/stack/runsh.succeeded exists"
- echo ""
- echo "When devstack completes, you can visit the OpenStack Dashboard"
- echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
-fi
diff --git a/tools/xen/mocks b/tools/xen/mocks
deleted file mode 100644
index 3b9b05c..0000000
--- a/tools/xen/mocks
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-test ! -e "$LIST_OF_ACTIONS" && {
- echo "Mocking is not set up properly."
- echo "LIST_OF_ACTIONS should point to an existing file."
- exit 1
-}
-
-test ! -e "$LIST_OF_DIRECTORIES" && {
- echo "Mocking is not set up properly."
- echo "LIST_OF_DIRECTORIES should point to an existing file."
- exit 1
-}
-
-test ! -e "$XE_RESPONSE" && {
- echo "Mocking is not set up properly."
- echo "XE_RESPONSE should point to an existing file."
- exit 1
-}
-
-test ! -e "$XE_CALLS" && {
- echo "Mocking is not set up properly."
- echo "XE_CALLS should point to an existing file."
- exit 1
-}
-
-function mktemp {
- if test "${1:-}" = "-d";
- then
- echo "tempdir"
- else
- echo "tempfile"
- fi
-}
-
-function wget {
- if [[ $@ =~ "failurl" ]]; then
- return 1
- fi
- echo "wget $@" >> $LIST_OF_ACTIONS
-}
-
-function mkdir {
- if test "${1:-}" = "-p";
- then
- echo "$2" >> $LIST_OF_DIRECTORIES
- fi
-}
-
-function unzip {
- echo "Random rubbish from unzip"
- echo "unzip $@" >> $LIST_OF_ACTIONS
-}
-
-function rm {
- echo "rm $@" >> $LIST_OF_ACTIONS
-}
-
-function ln {
- echo "ln $@" >> $LIST_OF_ACTIONS
-}
-
-function [ {
- if test "${1:-}" = "-d";
- then
- echo "[ $@" >> $LIST_OF_ACTIONS
- for directory in $(cat $LIST_OF_DIRECTORIES)
- do
- if test "$directory" = "$2"
- then
- return 0
- fi
- done
- return 1
- fi
- echo "Mock test does not implement the requested function: ${1:-}"
- exit 1
-}
-
-function die_with_error {
- echo "$1" >> $DEAD_MESSAGES
-}
-
-function xe {
- cat $XE_RESPONSE
- {
- for i in $(seq "$#")
- do
- eval "echo \"\$$i\""
- done
- } >> $XE_CALLS
-}
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
deleted file mode 100755
index 6de1afc..0000000
--- a/tools/xen/prepare_guest.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/bin/bash
-
-# This script is run on an Ubuntu VM.
-# This script is inserted into the VM by prepare_guest_template.sh
-# and is run when that VM boots.
-# It customizes a fresh Ubuntu install, so it is ready
-# to run stack.sh
-#
-# This includes installing the XenServer tools,
-# creating the user called "stack",
-# and shuts down the VM to signal the script has completed
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# Configurable nuggets
-GUEST_PASSWORD="$1"
-STACK_USER="$2"
-DOMZERO_USER="$3"
-
-
-function setup_domzero_user {
- local username
-
- username="$1"
-
- local key_updater_script
- local sudoers_file
- key_updater_script="/home/$username/update_authorized_keys.sh"
- sudoers_file="/etc/sudoers.d/allow_$username"
-
- # Create user
- adduser --disabled-password --quiet "$username" --gecos "$username"
-
- # Give passwordless sudo
- cat > $sudoers_file << EOF
- $username ALL = NOPASSWD: ALL
-EOF
- chmod 0440 $sudoers_file
-
- # A script to populate this user's authenticated_keys from xenstore
- cat > $key_updater_script << EOF
-#!/bin/bash
-set -eux
-
-DOMID=\$(sudo xenstore-read domid)
-sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username
-sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value
-cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys
-EOF
-
- # Give the key updater to the user
- chown $username:$username $key_updater_script
- chmod 0700 $key_updater_script
-
- # Setup the .ssh folder
- mkdir -p /home/$username/.ssh
- chown $username:$username /home/$username/.ssh
- chmod 0700 /home/$username/.ssh
- touch /home/$username/.ssh/authorized_keys
- chown $username:$username /home/$username/.ssh/authorized_keys
- chmod 0600 /home/$username/.ssh/authorized_keys
-
- # Setup the key updater as a cron job
- crontab -u $username - << EOF
-* * * * * $key_updater_script
-EOF
-
-}
-
-# Make a small cracklib dictionary, so that passwd still works, but we don't
-# have the big dictionary.
-mkdir -p /usr/share/cracklib
-echo a | cracklib-packer
-
-# Make /etc/shadow, and set the root password
-pwconv
-echo "root:$GUEST_PASSWORD" | chpasswd
-
-# Put the VPX into UTC.
-rm -f /etc/localtime
-
-# Add stack user
-groupadd libvirtd
-useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
-echo $STACK_USER:$GUEST_PASSWORD | chpasswd
-echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
-
-setup_domzero_user "$DOMZERO_USER"
-
-# Add an udev rule, so that new block devices could be written by stack user
-cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF
-KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660"
-EOF
-
-# Give ownership of /opt/stack to stack user
-chown -R $STACK_USER /opt/stack
-
-function setup_vimrc {
- if [ ! -e $1 ]; then
- # Simple but usable vimrc
- cat > $1 <<EOF
-se ts=4
-se expandtab
-se shiftwidth=4
-EOF
- fi
-}
-
-# Setup simple .vimrcs
-setup_vimrc /root/.vimrc
-setup_vimrc /opt/stack/.vimrc
-
-# remove self from local.rc
-# so this script is not run again
-rm -rf /etc/rc.local
-
-# Restore rc.local file
-cp /etc/rc.local.preparebackup /etc/rc.local
-
-# shutdown to notify we are done
-shutdown -h now
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
deleted file mode 100755
index 6cdddda..0000000
--- a/tools/xen/prepare_guest_template.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-
-# This script is run by install_os_domU.sh
-#
-# Parameters:
-# - $GUEST_NAME - hostname for the DomU VM
-#
-# It modifies the ubuntu image created by install_os_domU.sh
-#
-# This script is responsible for cusomtizing the fresh ubuntu
-# image so on boot it runs the prepare_guest.sh script
-# that modifies the VM so it is ready to run stack.sh.
-# It does this by mounting the disk image of the VM.
-#
-# The resultant image is started by install_os_domU.sh,
-# and once the VM has shutdown, build_xva.sh is run
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# This directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
-
-# Include onexit commands
-. $TOP_DIR/scripts/on_exit.sh
-
-# xapi functions
-. $TOP_DIR/functions
-
-# Source params - override xenrc params in your localrc to suite your taste
-source xenrc
-
-#
-# Parameters
-#
-GUEST_NAME="$1"
-
-# Mount the VDI
-STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
-add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
-
-# Make sure we have a stage
-if [ ! -d $STAGING_DIR/etc ]; then
- echo "Stage is not properly set up!"
- exit 1
-fi
-
-# Copy prepare_guest.sh to VM
-mkdir -p $STAGING_DIR/opt/stack/
-cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh
-
-# backup rc.local
-cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup
-
-# run prepare_guest.sh on boot
-cat <<EOF >$STAGING_DIR/etc/rc.local
-#!/bin/sh -e
-bash /opt/stack/prepare_guest.sh \\
- "$GUEST_PASSWORD" "$STACK_USER" "$DOMZERO_USER" \\
- > /opt/stack/prepare_guest.log 2>&1
-EOF
-
-# Update ubuntu repositories
-cat > $STAGING_DIR/etc/apt/sources.list << EOF
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} main restricted
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates main restricted
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} universe
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates universe
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE} multiverse
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-updates multiverse
-deb http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse
-deb-src http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY} ${UBUNTU_INST_RELEASE}-backports main restricted universe multiverse
-
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security main restricted
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security universe
-deb http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse
-deb-src http://security.ubuntu.com/ubuntu ${UBUNTU_INST_RELEASE}-security multiverse
-EOF
-
-rm -f $STAGING_DIR/etc/apt/apt.conf
-if [ -n "$UBUNTU_INST_HTTP_PROXY" ]; then
- cat > $STAGING_DIR/etc/apt/apt.conf << EOF
-Acquire::http::Proxy "$UBUNTU_INST_HTTP_PROXY";
-EOF
-fi
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
deleted file mode 100755
index 66f7ef4..0000000
--- a/tools/xen/scripts/install-os-vpx.sh
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-set -eux
-
-BRIDGE=
-NAME_LABEL=
-TEMPLATE_NAME=
-
-usage()
-{
-cat << EOF
-
- Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE]
-
- Install a VM from a template
-
- OPTIONS:
-
- -h Shows this message.
- -t template VM template to use
- -l name Specifies the name label for the VM.
- -n bridge The bridge/network to use for eth0. Defaults to xenbr0
-EOF
-}
-
-get_params()
-{
- while getopts "hbn:r:l:t:" OPTION; do
- case $OPTION in
- h) usage
- exit 1
- ;;
- n)
- BRIDGE=$OPTARG
- ;;
- l)
- NAME_LABEL=$OPTARG
- ;;
- t)
- TEMPLATE_NAME=$OPTARG
- ;;
- ?)
- usage
- exit
- ;;
- esac
- done
- if [[ -z $BRIDGE ]]; then
- BRIDGE=xenbr0
- fi
-
- if [[ -z $TEMPLATE_NAME ]]; then
- echo "Please specify a template name" >&2
- exit 1
- fi
-
- if [[ -z $NAME_LABEL ]]; then
- echo "Please specify a name-label for the new VM" >&2
- exit 1
- fi
-}
-
-
-xe_min()
-{
- local cmd="$1"
- shift
- xe "$cmd" --minimal "$@"
-}
-
-
-find_network()
-{
- result=$(xe_min network-list bridge="$1")
- if [ "$result" = "" ]; then
- result=$(xe_min network-list name-label="$1")
- fi
- echo "$result"
-}
-
-
-create_vif()
-{
- local v="$1"
- echo "Installing VM interface on [$BRIDGE]"
- local out_network_uuid
- out_network_uuid=$(find_network "$BRIDGE")
- xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
-}
-
-
-
-# Make the VM auto-start on server boot.
-set_auto_start()
-{
- local v="$1"
- xe vm-param-set uuid="$v" other-config:auto_poweron=true
-}
-
-
-destroy_vifs()
-{
- local v="$1"
- IFS=,
- for vif in $(xe_min vif-list vm-uuid="$v"); do
- xe vif-destroy uuid="$vif"
- done
- unset IFS
-}
-
-
-get_params "$@"
-
-vm_uuid=$(xe_min vm-install template="$TEMPLATE_NAME" new-name-label="$NAME_LABEL")
-destroy_vifs "$vm_uuid"
-set_auto_start "$vm_uuid"
-create_vif "$vm_uuid"
-xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh
deleted file mode 100755
index 6ea3642..0000000
--- a/tools/xen/scripts/install_ubuntu_template.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-#
-# This creates an Ubuntu Server 32bit or 64bit template
-# on Xenserver 5.6.x, 6.0.x and 6.1.x
-# The template does a net install only
-#
-# Based on a script by: David Markey <david.markey@citrix.com>
-#
-
-set -o errexit
-set -o nounset
-set -o xtrace
-
-# This directory
-BASE_DIR=$(cd $(dirname "$0") && pwd)
-
-# For default setings see xenrc
-source $BASE_DIR/../xenrc
-
-# Get the params
-preseed_url=$1
-
-# Delete template or skip template creation as required
-previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \
- params=uuid --minimal)
-if [ -n "$previous_template" ]; then
- if $CLEAN_TEMPLATES; then
- xe template-param-clear param-name=other-config uuid=$previous_template
- xe template-uninstall template-uuid=$previous_template force=true
- else
- echo "Template $UBUNTU_INST_TEMPLATE_NAME already present"
- exit 0
- fi
-fi
-
-# Get built-in template
-builtin_name="Debian Squeeze 6.0 (32-bit)"
-builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal)
-if [[ -z $builtin_uuid ]]; then
- echo "Can't find the Debian Squeeze 32bit template on your XenServer."
- exit 1
-fi
-
-# Clone built-in template to create new template
-new_uuid=$(xe vm-clone uuid=$builtin_uuid \
- new-name-label="$UBUNTU_INST_TEMPLATE_NAME")
-disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024))
-
-# Some of these settings can be found in example preseed files
-# however these need to be answered before the netinstall
-# is ready to fetch the preseed file, and as such must be here
-# to get a fully automated install
-pvargs="quiet console=hvc0 partman/default_filesystem=ext3 \
-console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \
-keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \
-netcfg/choose_interface=eth0 \
-netcfg/get_hostname=os netcfg/get_domain=os auto \
-url=${preseed_url}"
-
-if [ "$UBUNTU_INST_IP" != "dhcp" ]; then
- netcfgargs="netcfg/disable_autoconfig=true \
-netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \
-netcfg/get_ipaddress=${UBUNTU_INST_IP} \
-netcfg/get_netmask=${UBUNTU_INST_NETMASK} \
-netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \
-netcfg/confirm_static=true"
- pvargs="${pvargs} ${netcfgargs}"
-fi
-
-xe template-param-set uuid=$new_uuid \
- other-config:install-methods=http \
- other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \
- PV-args="$pvargs" \
- other-config:debian-release="$UBUNTU_INST_RELEASE" \
- other-config:default_template=true \
- other-config:disks='<provision><disk device="0" size="'$disk_size'" sr="" bootable="true" type="system"/></provision>' \
- other-config:install-arch="$UBUNTU_INST_ARCH"
-
-if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then
- xe template-param-set uuid=$new_uuid \
- other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY"
-fi
-
-echo "Ubuntu template installed uuid:$new_uuid"
diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi
deleted file mode 100755
index 909ce32..0000000
--- a/tools/xen/scripts/manage-vdi
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-action="$1"
-vm="$2"
-device="${3-0}"
-part="${4-}"
-
-function xe_min() {
- local cmd="$1"
- shift
- xe "$cmd" --minimal "$@"
-}
-
-function run_udev_settle() {
- which_udev=$(which udevsettle) || true
- if [ -n "$which_udev" ]; then
- udevsettle
- else
- udevadm settle
- fi
-}
-
-vm_uuid=$(xe_min vm-list name-label="$vm")
-vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \
- userdevice="$device")
-
-dom0_uuid=$(xe_min vm-list is-control-domain=true)
-
-function get_mount_device() {
- vbd_uuid=$1
-
- dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
- if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then
- DEBIAN_FRONTEND=noninteractive \
- apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \
- install kpartx &> /dev/null || true
- mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p")
- if [ -z "$mapping" ]; then
- echo "Failed to find mapping"
- exit -1
- fi
-
- local device="/dev/mapper/${mapping}"
- for (( i = 0; i < 5; i++ )) ; do
- if [ -b $device ] ; then
- echo $device
- return
- fi
- sleep 1
- done
- echo "ERROR: timed out waiting for dev-mapper"
- exit 1
- else
- echo "/dev/$dev$part"
- fi
-}
-
-function clean_dev_mappings() {
- dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
- if [[ "$dev" =~ "sm/" || "$dev" =~ "blktap-2/" ]]; then
- kpartx -dv "/dev/$dev"
- fi
-}
-
-function open_vdi() {
- vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \
- device=autodetect)
- mp=$(mktemp -d)
- xe vbd-plug uuid="$vbd_uuid"
-
- run_udev_settle
-
- mount_device=$(get_mount_device "$vbd_uuid")
- mount "$mount_device" "$mp"
- echo "Your vdi is mounted at $mp"
-}
-
-function close_vdi() {
- vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid")
- mount_device=$(get_mount_device "$vbd_uuid")
- run_udev_settle
- umount "$mount_device"
-
- clean_dev_mappings
-
- xe vbd-unplug uuid=$vbd_uuid
- xe vbd-destroy uuid=$vbd_uuid
-}
-
-if [ "$action" == "open" ]; then
- open_vdi
-elif [ "$action" == "close" ]; then
- close_vdi
-fi
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
deleted file mode 100755
index 2846dc4..0000000
--- a/tools/xen/scripts/on_exit.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-set -o xtrace
-
-if [ -z "${on_exit_hooks:-}" ]; then
- on_exit_hooks=()
-fi
-
-on_exit()
-{
- for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do
- eval "${on_exit_hooks[$i]}"
- done
-}
-
-add_on_exit()
-{
- local n=${#on_exit_hooks[*]}
- on_exit_hooks[$n]="$*"
- if [[ $n -eq 0 ]]; then
- trap on_exit EXIT
- fi
-}
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
deleted file mode 100755
index 96dad7e..0000000
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-set -ex
-
-# By default, don't remove the templates
-REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"}
-if [ "$1" = "--remove-templates" ]; then
- REMOVE_TEMPLATES=true
-fi
-
-xe_min()
-{
- local cmd="$1"
- shift
- xe "$cmd" --minimal "$@"
-}
-
-destroy_vdi()
-{
- local vbd_uuid="$1"
- local type
- type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
- local dev
- dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
- local vdi_uuid
- vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
-
- if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
- xe vdi-destroy uuid=$vdi_uuid
- fi
-}
-
-uninstall()
-{
- local vm_uuid="$1"
- local power_state
- power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
-
- if [ "$power_state" != "halted" ]; then
- xe vm-shutdown vm=$vm_uuid force=true
- fi
-
- for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
- destroy_vdi "$v"
- done
-
- xe vm-uninstall vm=$vm_uuid force=true >/dev/null
-}
-
-uninstall_template()
-{
- local vm_uuid="$1"
-
- for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
- destroy_vdi "$v"
- done
-
- xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null
-}
-
-# remove the VMs and their disks
-for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
- uninstall "$u"
-done
-
-# remove the templates
-if [ "$REMOVE_TEMPLATES" == "true" ]; then
- for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
- uninstall_template "$u"
- done
-fi
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
deleted file mode 100755
index 324e6a1..0000000
--- a/tools/xen/test_functions.sh
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/bash
-
-# Tests for functions.
-#
-# The tests are sourcing the mocks file to mock out various functions. The
-# mocking-out always happens in a sub-shell, thus it does not have impact on
-# the functions defined here.
-
-# To run the tests, please run:
-#
-# ./test_functions.sh run_tests
-#
-# To only print out the discovered test functions, run:
-#
-# ./test_functions.sh
-
-. functions
-
-# Setup
-function before_each_test {
- LIST_OF_DIRECTORIES=$(mktemp)
- truncate -s 0 $LIST_OF_DIRECTORIES
-
- LIST_OF_ACTIONS=$(mktemp)
- truncate -s 0 $LIST_OF_ACTIONS
-
- XE_RESPONSE=$(mktemp)
- truncate -s 0 $XE_RESPONSE
-
- XE_CALLS=$(mktemp)
- truncate -s 0 $XE_CALLS
-
- DEAD_MESSAGES=$(mktemp)
- truncate -s 0 $DEAD_MESSAGES
-}
-
-# Teardown
-function after_each_test {
- rm -f $LIST_OF_DIRECTORIES
- rm -f $LIST_OF_ACTIONS
- rm -f $XE_RESPONSE
- rm -f $XE_CALLS
-}
-
-# Helpers
-function setup_xe_response {
- echo "$1" > $XE_RESPONSE
-}
-
-function given_directory_exists {
- echo "$1" >> $LIST_OF_DIRECTORIES
-}
-
-function assert_directory_exists {
- grep "$1" $LIST_OF_DIRECTORIES
-}
-
-function assert_previous_command_failed {
- [ "$?" != "0" ] || exit 1
-}
-
-function assert_xe_min {
- grep -qe "^--minimal\$" $XE_CALLS
-}
-
-function assert_xe_param {
- grep -qe "^$1\$" $XE_CALLS
-}
-
-function assert_died_with {
- diff -u <(echo "$1") $DEAD_MESSAGES
-}
-
-function mock_out {
- local FNNAME="$1"
- local OUTPUT="$2"
-
- . <(cat << EOF
-function $FNNAME {
- echo "$OUTPUT"
-}
-EOF
-)
-}
-
-function assert_symlink {
- grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS
-}
-
-# Tests
-function test_plugin_directory_on_xenserver {
- given_directory_exists "/etc/xapi.d/plugins/"
-
- PLUGDIR=$(. mocks && xapi_plugin_location)
-
- [ "/etc/xapi.d/plugins/" = "$PLUGDIR" ]
-}
-
-function test_plugin_directory_on_xcp {
- given_directory_exists "/usr/lib/xcp/plugins/"
-
- PLUGDIR=$(. mocks && xapi_plugin_location)
-
- [ "/usr/lib/xcp/plugins/" = "$PLUGDIR" ]
-}
-
-function test_no_plugin_directory_found {
- set +e
-
- local IGNORE
- IGNORE=$(. mocks && xapi_plugin_location)
-
- assert_previous_command_failed
-
- grep "[ -d /etc/xapi.d/plugins/ ]" $LIST_OF_ACTIONS
- grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
-}
-
-function test_create_directory_for_kernels {
- (
- . mocks
- mock_out get_local_sr_path /var/run/sr-mount/uuid1
- create_directory_for_kernels
- )
-
- assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels"
- assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels"
-}
-
-function test_create_directory_for_kernels_existing_dir {
- (
- . mocks
- given_directory_exists "/boot/guest"
- create_directory_for_kernels
- )
-
- diff -u $LIST_OF_ACTIONS - << EOF
-[ -d /boot/guest ]
-EOF
-}
-
-function test_create_directory_for_images {
- (
- . mocks
- mock_out get_local_sr_path /var/run/sr-mount/uuid1
- create_directory_for_images
- )
-
- assert_directory_exists "/var/run/sr-mount/uuid1/os-images"
- assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images"
-}
-
-function test_create_directory_for_images_existing_dir {
- (
- . mocks
- given_directory_exists "/images"
- create_directory_for_images
- )
-
- diff -u $LIST_OF_ACTIONS - << EOF
-[ -d /images ]
-EOF
-}
-
-function test_get_local_sr {
- setup_xe_response "uuid123"
-
- local RESULT
- RESULT=$(. mocks && get_local_sr)
-
- [ "$RESULT" == "uuid123" ]
-
- assert_xe_param "pool-list" params=default-SR minimal=true
-}
-
-function test_get_local_sr_path {
- local RESULT
- RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
-
- [ "/var/run/sr-mount/uuid1" == "$RESULT" ]
-}
-
-# Test runner
-[ "$1" = "" ] && {
- grep -e "^function *test_" $0 | cut -d" " -f2
-}
-
-[ "$1" = "run_tests" ] && {
- for testname in $($0); do
- echo "$testname"
- before_each_test
- (
- set -eux
- $testname
- )
- if [ "$?" != "0" ]; then
- echo "FAIL"
- exit 1
- else
- echo "PASS"
- fi
-
- after_each_test
- done
-}
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
deleted file mode 100644
index 169e042..0000000
--- a/tools/xen/xenrc
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-
-#
-# XenServer specific defaults for the /tools/xen/ scripts
-# Similar to stackrc, you can override these in your localrc
-#
-
-# Name of this guest
-GUEST_NAME=${GUEST_NAME:-DevStackOSDomU}
-
-# Template cleanup
-CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
-
-# Size of image
-VDI_MB=${VDI_MB:-5000}
-
-# Devstack now contains many components. 4GB ram is not enough to prevent
-# swapping and memory fragmentation - the latter of which can cause failures
-# such as blkfront failing to plug a VBD and lead to random test fails.
-#
-# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs
-OSDOMU_MEM_MB=6144
-OSDOMU_VDI_GB=8
-
-# Network mapping. Specify bridge names or network names. Network names may
-# differ across localised versions of XenServer. If a given bridge/network
-# was not found, a new network will be created with the specified name.
-
-# Get the management network from the XS installation
-VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
-PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
-
-# VM Password
-GUEST_PASSWORD=${GUEST_PASSWORD:-secret}
-
-# Extracted variables for OpenStack VM network device numbers.
-# Make sure they form a continuous sequence starting from 0
-MGT_DEV_NR=0
-VM_DEV_NR=1
-PUB_DEV_NR=2
-
-# Host Interface, i.e. the interface on the nova vm you want to expose the
-# services on. Usually the device connected to the management network or the
-# one connected to the public network is used.
-HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"}
-
-#
-# Our nova host's network info
-#
-
-# Management network
-MGT_IP=${MGT_IP:-dhcp}
-MGT_NETMASK=${MGT_NETMASK:-ignored}
-
-# VM Network
-VM_IP=${VM_IP:-10.255.255.255}
-VM_NETMASK=${VM_NETMASK:-255.255.255.0}
-
-# Public network
-# Aligned with stack.sh - see FLOATING_RANGE
-PUB_IP=${PUB_IP:-172.24.4.10}
-PUB_NETMASK=${PUB_NETMASK:-255.255.255.0}
-
-# Ubuntu install settings
-UBUNTU_INST_RELEASE="xenial"
-UBUNTU_INST_TEMPLATE_NAME="Ubuntu 16.04 (64-bit) for DevStack"
-# For 12.04 use "precise" and update template name
-# However, for 12.04, you should be using
-# XenServer 6.1 and later or XCP 1.6 or later
-# 11.10 is only really supported with XenServer 6.0.2 and later
-UBUNTU_INST_ARCH="amd64"
-UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com"
-UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
-UBUNTU_INST_HTTP_PROXY=""
-UBUNTU_INST_LOCALE="en_US"
-UBUNTU_INST_KEYBOARD="us"
-# network configuration for ubuntu netinstall
-UBUNTU_INST_IP="dhcp"
-UBUNTU_INST_NAMESERVERS=""
-UBUNTU_INST_NETMASK=""
-UBUNTU_INST_GATEWAY=""
-
-# Create a separate xvdb. Tis could be used as a backing device for cinder
-# volumes. Specify
-# XEN_XVDB_SIZE_GB=10
-# VOLUME_BACKING_DEVICE=/dev/xvdb
-# in your localrc to avoid kernel lockups:
-# https://bugs.launchpad.net/cinder/+bug/1023755
-#
-# Set the size to 0 to avoid creation of additional disk.
-XEN_XVDB_SIZE_GB=0
-
-STACK_USER=stack
-DOMZERO_USER=domzero
-
-RC_DIR="../.."
-
-restore_nounset=$(set +o | grep nounset)
-set +u
-
-## Note that the lines below are coming from stackrc to support
-## new-style config files
-source $RC_DIR/functions-common
-
-# allow local overrides of env variables, including repo config
-if [[ -f $RC_DIR/localrc ]]; then
- # Old-style user-supplied config
- source $RC_DIR/localrc
-elif [[ -f $RC_DIR/.localrc.auto ]]; then
- # New-style user-supplied config extracted from local.conf
- source $RC_DIR/.localrc.auto
-fi
-
-$restore_nounset
diff --git a/tox.ini b/tox.ini
index cc7c544..74436b0 100644
--- a/tox.ini
+++ b/tox.ini
@@ -34,16 +34,7 @@
-print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
-deps =
- Pygments
- docutils
- sphinx>=1.5.1,<1.6.1
- pbr>=2.0.0,!=2.1.0
- oslosphinx
- nwdiag
- blockdiag
- sphinxcontrib-blockdiag
- sphinxcontrib-nwdiag
+deps = -r{toxinidir}/doc/requirements.txt
whitelist_externals = bash
setenv =
TOP_DIR={toxinidir}
@@ -51,11 +42,5 @@
python setup.py build_sphinx
[testenv:venv]
-deps =
- pbr>=2.0.0,!=2.1.0
- sphinx>=1.5.1,<1.6.1
- oslosphinx
- blockdiag
- sphinxcontrib-blockdiag
- sphinxcontrib-nwdiag
+deps = -r{toxinidir}/doc/requirements.txt
commands = {posargs}
diff --git a/unstack.sh b/unstack.sh
index 77a151f..ccea0ef 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -45,6 +45,10 @@
# Configure Projects
# ==================
+# Determine what system we are running on. This provides ``os_VENDOR``,
+# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME`` and ``DISTRO``
+GetDistro
+
# Plugin Phase 0: override_defaults - allow plugins to override
# defaults before other services are run
run_phase override_defaults
@@ -83,10 +87,6 @@
load_plugin_settings
-# Determine what system we are running on. This provides ``os_VENDOR``,
-# ``os_RELEASE``, ``os_PACKAGE``, ``os_CODENAME``
-GetOSVersion
-
set -o xtrace
# Run extras
@@ -171,15 +171,6 @@
stop_dstat
fi
-# Clean up the remainder of the screen processes
-SCREEN=$(which screen)
-if [[ -n "$SCREEN" ]]; then
- SESSION=$(screen -ls | awk "/[0-9]+.${SCREEN_NAME}/"'{ print $1 }')
- if [[ -n "$SESSION" ]]; then
- screen -X -S $SESSION quit
- fi
-fi
-
# NOTE: Cinder automatically installs the lvm2 package, independently of the
# enabled backends. So if Cinder is enabled, and installed successfully we are
# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.