Merge "Use swift port variable in keystone and cinder"
diff --git a/.gitignore b/.gitignore
index 2778a65..8870bb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@
shocco
src
stack-screenrc
+userrc_early
diff --git a/Makefile b/Makefile
index 082aff2..a6bb230 100644
--- a/Makefile
+++ b/Makefile
@@ -26,7 +26,7 @@
./unstack.sh
wheels:
- WHEELHOUSE=$(WHEELHOUSE) tools/build-wheels.sh
+ WHEELHOUSE=$(WHEELHOUSE) tools/build_wheels.sh
docs:
tox -edocs
diff --git a/README.md b/README.md
index acc3e5a..dd394c2 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@
* To provide an environment for the OpenStack CI testing on every commit
to the projects
-Read more at http://devstack.org.
+Read more at http://docs.openstack.org/developer/devstack
IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you
execute before you run them, as they install software and will alter your
@@ -77,361 +77,21 @@
of your hypervisor of choice to reduce testing cycle times. You might even save
enough time to write one more feature before the next feature freeze...
-``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo``
-for all of those tasks. However, it needs to be not-root for most of its
-work and for all of the OpenStack services. ``stack.sh`` specifically
-does not run if started as root.
+``stack.sh`` needs to have root access for a lot of tasks, but uses
+``sudo`` for all of those tasks. However, it needs to be not-root for
+most of its work and for all of the OpenStack services. ``stack.sh``
+specifically does not run if started as root.
-This is a recent change (Oct 2013) from the previous behaviour of
-automatically creating a ``stack`` user. Automatically creating
-user accounts is not the right response to running as root, so
-that bit is now an explicit step using ``tools/create-stack-user.sh``.
-Run that (as root!) or just check it out to see what DevStack's
-expectations are for the account it runs under. Many people simply
-use their usual login (the default 'ubuntu' login on a UEC image
-for example).
+DevStack will not automatically create the user, but provides a helper
+script in ``tools/create-stack-user.sh``. Run that (as root!) or just
+check it out to see what DevStack's expectations are for the account
+it runs under. Many people simply use their usual login (the default
+'ubuntu' login on a UEC image for example).
# Customizing
-You can override environment variables used in `stack.sh` by creating file
-name `local.conf` with a ``localrc`` section as shown below. It is likely
-that you will need to do this to tweak your networking configuration should
-you need to access your cloud from a different host.
-
- [[local|localrc]]
- VARIABLE=value
-
-See the **Local Configuration** section below for more details.
-
-# Database Backend
-
-Multiple database backends are available. The available databases are defined
-in the lib/databases directory.
-`mysql` is the default database, choose a different one by putting the
-following in the `localrc` section:
-
- disable_service mysql
- enable_service postgresql
-
-`mysql` is the default database.
-
-# RPC Backend
-
-Support for a RabbitMQ RPC backend is included. Additional RPC backends may
-be available via external plugins. Enabling or disabling RabbitMQ is handled
-via the usual service functions and ``ENABLED_SERVICES``.
-
-Example disabling RabbitMQ in ``local.conf``:
-
- disable_service rabbit
-
-# Apache Frontend
-
-Apache web server can be enabled for wsgi services that support being deployed
-under HTTPD + mod_wsgi. By default, services that recommend running under
-HTTPD + mod_wsgi are deployed under Apache. To use an alternative deployment
-strategy (e.g. eventlet) for services that support an alternative to HTTPD +
-mod_wsgi set ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your
-``local.conf``.
-
-Each service that can be run under HTTPD + mod_wsgi also has an override
-toggle available that can be set in your ``local.conf``.
-
-Keystone is run under HTTPD + mod_wsgi by default.
-
-Example (Keystone):
-
- KEYSTONE_USE_MOD_WSGI="True"
-
-Example (Nova):
-
- NOVA_USE_MOD_WSGI="True"
-
-Example (Swift):
-
- SWIFT_USE_MOD_WSGI="True"
-
-# Swift
-
-Swift is disabled by default. When enabled, it is configured with
-only one replica to avoid being IO/memory intensive on a small
-vm. When running with only one replica the account, container and
-object services will run directly in screen. The others services like
-replicator, updaters or auditor runs in background.
-
-If you would like to enable Swift you can add this to your `localrc` section:
-
- enable_service s-proxy s-object s-container s-account
-
-If you want a minimal Swift install with only Swift and Keystone you
-can have this instead in your `localrc` section:
-
- disable_all_services
- enable_service key mysql s-proxy s-object s-container s-account
-
-If you only want to do some testing of a real normal swift cluster
-with multiple replicas you can do so by customizing the variable
-`SWIFT_REPLICAS` in your `localrc` section (usually to 3).
-
-# Swift S3
-
-If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will
-install the swift3 middleware emulation. Swift will be configured to
-act as a S3 endpoint for Keystone so effectively replacing the
-`nova-objectstore`.
-
-Only Swift proxy server is launched in the screen session all other
-services are started in background and managed by `swift-init` tool.
-
-# Neutron
-
-Basic Setup
-
-In order to enable Neutron in a single node setup, you'll need the
-following settings in your `local.conf`:
-
- disable_service n-net
- enable_service q-svc
- enable_service q-agt
- enable_service q-dhcp
- enable_service q-l3
- enable_service q-meta
- enable_service q-metering
-
-Then run `stack.sh` as normal.
-
-DevStack supports setting specific Neutron configuration flags to the
-service, ML2 plugin, DHCP and L3 configuration files:
-
- [[post-config|/$Q_PLUGIN_CONF_FILE]]
- [ml2]
- mechanism_drivers=openvswitch,l2population
-
- [[post-config|$NEUTRON_CONF]]
- [DEFAULT]
- quota_port=42
-
- [[post-config|$Q_L3_CONF_FILE]]
- [DEFAULT]
- agent_mode=legacy
-
- [[post-config|$Q_DHCP_CONF_FILE]]
- [DEFAULT]
- dnsmasq_dns_servers = 8.8.8.8,8.8.4.4
-
-The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute
-hosts. This is a simple way to configure the ml2 plugin:
-
- # VLAN configuration
- ENABLE_TENANT_VLANS=True
-
- # GRE tunnel configuration
- ENABLE_TENANT_TUNNELS=True
-
- # VXLAN tunnel configuration
- Q_ML2_TENANT_NETWORK_TYPE=vxlan
-
-The above will default in DevStack to using the OVS on each compute host.
-To change this, set the `Q_AGENT` variable to the agent you want to run
-(e.g. linuxbridge).
-
- Variable Name Notes
- ----------------------------------------------------------------------------
- Q_AGENT This specifies which agent to run with the
- ML2 Plugin (Typically either `openvswitch`
- or `linuxbridge`).
- Defaults to `openvswitch`.
- Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default
- is `openvswitch,linuxbridge`.
- Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to
- all available TypeDrivers.
- Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to
- `tunnel_id_ranges=1:1000'.
- Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to
- `vni_ranges=1001:2000`
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none.
-
-# Heat
-
-Heat is disabled by default (see `stackrc` file). To enable it explicitly
-you'll need the following settings in your `localrc` section:
-
- enable_service heat h-api h-api-cfn h-api-cw h-eng
-
-Heat can also run in standalone mode, and be configured to orchestrate
-on an external OpenStack cloud. To launch only Heat in standalone mode
-you'll need the following settings in your `localrc` section:
-
- disable_all_services
- enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng
- HEAT_STANDALONE=True
- KEYSTONE_SERVICE_HOST=...
- KEYSTONE_AUTH_HOST=...
-
-# Tempest
-
-If tempest has been successfully configured, a basic set of smoke
-tests can be run as follows:
-
- $ cd /opt/stack/tempest
- $ tox -efull tempest.scenario.test_network_basic_ops
-
-By default tempest is downloaded and the config file is generated, but the
-tempest package is not installed in the system's global site-packages (the
-package install includes installing dependences). So tempest won't run
-outside of tox. If you would like to install it add the following to your
-``localrc`` section:
-
- INSTALL_TEMPEST=True
-
-# DevStack on Xenserver
-
-If you would like to use Xenserver as the hypervisor, please refer
-to the instructions in `./tools/xen/README.md`.
-
-# Additional Projects
-
-DevStack has a hook mechanism to call out to a dispatch script at specific
-points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This
-allows upper-layer projects, especially those that the lower layer projects
-have no dependency on, to be added to DevStack without modifying the core
-scripts. Tempest is built this way as an example of how to structure the
-dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md`
-for more information.
-
-# Multi-Node Setup
-
-A more interesting setup involves running multiple compute nodes, with Neutron
-networks connecting VMs on different compute nodes.
-You should run at least one "controller node", which should have a `stackrc`
-that includes at least:
-
- disable_service n-net
- enable_service q-svc
- enable_service q-agt
- enable_service q-dhcp
- enable_service q-l3
- enable_service q-meta
- enable_service neutron
-
-You likely want to change your `localrc` section to run a scheduler that
-will balance VMs across hosts:
-
- SCHEDULER=nova.scheduler.filter_scheduler.FilterScheduler
-
-You can then run many compute nodes, each of which should have a `stackrc`
-which includes the following, with the IP address of the above controller node:
-
- ENABLED_SERVICES=n-cpu,rabbit,neutron,q-agt
- SERVICE_HOST=[IP of controller node]
- MYSQL_HOST=$SERVICE_HOST
- RABBIT_HOST=$SERVICE_HOST
- Q_HOST=$SERVICE_HOST
- MATCHMAKER_REDIS_HOST=$SERVICE_HOST
-
-# Multi-Region Setup
-
-We want to setup two devstack (RegionOne and RegionTwo) with shared keystone
-(same users and services) and horizon.
-Keystone and Horizon will be located in RegionOne.
-Full spec is available at:
-https://wiki.openstack.org/wiki/Heat/Blueprints/Multi_Region_Support_for_Heat.
-
-In RegionOne:
-
- REGION_NAME=RegionOne
-
-In RegionTwo:
-
- disable_service horizon
- KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
- KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
- REGION_NAME=RegionTwo
-
-# Cells
-
-Cells is a new scaling option with a full spec at:
-http://wiki.openstack.org/blueprint-nova-compute-cells.
-
-To setup a cells environment add the following to your `localrc` section:
-
- enable_service n-cell
-
-Be aware that there are some features currently missing in cells, one notable
-one being security groups. The exercises have been patched to disable
-functionality not supported by cells.
-
-# IPv6
-
-By default, most Openstack services are bound to 0.0.0.0
-and service endpoints are registered as IPv4 addresses.
-A new variable was created to control this behavior, and to
-allow for operation over IPv6 instead of IPv4.
-
-For this, add the following to `local.conf`:
-
- SERVICE_IP_VERSION=6
-
-When set to "6" devstack services will open listen sockets on ::
-and service endpoints will be registered using HOST_IPV6 as the
-address. The default value for this setting is `4`. Dual-mode
-support, for example `4+6` is not currently supported.
-
-
-# Local Configuration
-
-Historically DevStack has used ``localrc`` to contain all local configuration
-and customizations. More and more of the configuration variables available for
-DevStack are passed-through to the individual project configuration files.
-The old mechanism for this required specific code for each file and did not
-scale well. This is handled now by a master local configuration file.
-
-# local.conf
-
-The new config file ``local.conf`` is an extended-INI format that introduces
-a new meta-section header that provides some additional information such
-as a phase name and destination config filename:
-
- [[ <phase> | <config-file-name> ]]
-
-where ``<phase>`` is one of a set of phase names defined by ``stack.sh``
-and ``<config-file-name>`` is the configuration filename. The filename is
-eval'ed in the ``stack.sh`` context so all environment variables are
-available and may be used. Using the project config file variables in
-the header is strongly suggested (see the ``NOVA_CONF`` example below).
-If the path of the config file does not exist it is skipped.
-
-The defined phases are:
-
-* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced
-* **post-config** - runs after the layer 2 services are configured
- and before they are started
-* **extra** - runs after services are started and before any files
- in ``extra.d`` are executed
-* **post-extra** - runs after files in ``extra.d`` are executed
-
-The file is processed strictly in sequence; meta-sections may be specified more
-than once but if any settings are duplicated the last to appear in the file
-will be used.
-
- [[post-config|$NOVA_CONF]]
- [DEFAULT]
- use_syslog = True
-
- [osapi_v3]
- enabled = False
-
-A specific meta-section ``local|localrc`` is used to provide a default
-``localrc`` file (actually ``.localrc.auto``). This allows all custom
-settings for DevStack to be contained in a single file. If ``localrc``
-exists it will be used instead to preserve backward-compatibility.
-
- [[local|localrc]]
- FIXED_RANGE=10.254.1.0/24
- ADMIN_PASSWORD=speciale
- LOGFILE=$DEST/logs/stack.sh.log
-
-Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT*
-start with a ``/`` (slash) character. A slash will need to be added:
-
- [[post-config|/$Q_PLUGIN_CONF_FILE]]
+DevStack can be extensively configured via the configuration file
+`local.conf`. It is likely that you will need to provide and modify
+this file if you want anything other than the most basic setup. Start
+by reading the [configuration guide](doc/source/configuration.rst) for
+details of the configuration file and the many available options.
diff --git a/clean.sh b/clean.sh
index 74bcaee..ae28aa9 100755
--- a/clean.sh
+++ b/clean.sh
@@ -41,13 +41,13 @@
source $TOP_DIR/lib/tls
source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/lvm
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/keystone
source $TOP_DIR/lib/glance
source $TOP_DIR/lib/nova
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/neutron-legacy
source $TOP_DIR/lib/ironic
@@ -134,7 +134,9 @@
# Clean up files
-FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN=".localrc.auto .localrc.password "
+FILES_TO_CLEAN+="docs/files docs/html shocco/ "
+FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* "
FILES_TO_CLEAN+=".stackenv .prereqs"
for file in $FILES_TO_CLEAN; do
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 90b7d44..d70d3da 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -6,42 +6,24 @@
:local:
:depth: 1
-DevStack has always tried to be mostly-functional with a minimal amount
-of configuration. The number of options has ballooned as projects add
-features, new projects added and more combinations need to be tested.
-Historically DevStack obtained all local configuration and
-customizations from a ``localrc`` file. The number of configuration
-variables that are simply passed-through to the individual project
-configuration files is also increasing. The old mechanism for this
-(``EXTRAS_OPTS`` and friends) required specific code for each file and
-did not scale well.
-
-In Oct 2013 a new configuration method was introduced (in `review
-46768 <https://review.openstack.org/#/c/46768/>`__) to hopefully
-simplify this process and meet the following goals:
-
-- contain all non-default local configuration in a single file
-- be backward-compatible with ``localrc`` to smooth the transition
- process
-- allow settings in arbitrary configuration files to be changed
-
local.conf
==========
-The new configuration file is ``local.conf`` and should reside in the
-root Devstack directory. An example of such ``local.conf`` file
-is provided in the ``devstack/samples`` directory. Copy this file into
-the root Devstack directory and adapt it to your needs. It is a modified INI
-format file that introduces a meta-section header to carry additional
-information regarding the configuration files to be changed.
+DevStack configuration is modified via the file ``local.conf``. It is
+a modified INI format file that introduces a meta-section header to
+carry additional information regarding the configuration files to be
+changed.
+
+A sample is provided in ``devstack/samples``
The new header is similar to a normal INI section header but with double
brackets (``[[ ... ]]``) and two internal fields separated by a pipe
-(``|``):
-
+(``|``). Note that there are no spaces between the double brackets and the
+internal fields. Likewise, there are no spaces between the pipe and the
+internal fields:
::
- [[ <phase> | <config-file-name> ]]
+ '[[' <phase> '|' <config-file-name> ']]'
where ``<phase>`` is one of a set of phase names defined by ``stack.sh``
and ``<config-file-name>`` is the configuration filename. The filename
@@ -148,6 +130,15 @@
Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``.
``HOST_IPV6`` is not set by default.
+Historical Notes
+================
+
+Historically DevStack obtained all local configuration and
+customizations from a ``localrc`` file. In Oct 2013 the
+``local.conf`` configuration method was introduced (in `review 46768
+<https://review.openstack.org/#/c/46768/>`__) to simplify this
+process.
+
Configuration Notes
===================
@@ -228,6 +219,98 @@
SYSLOG_HOST=$HOST_IP
SYSLOG_PORT=516
+
+Example Logging Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For example, non-interactive installs probably wish to save output to
+a file, keep service logs and disable color in the stored files.
+
+ ::
+
+ [[local|localrc]]
+ DEST=/opt/stack/
+ LOGDIR=$DEST/logs
+ LOGFILE=$LOGDIR/stack.sh.log
+ LOG_COLOR=False
+
+Database Backend
+----------------
+
+Multiple database backends are available. The available databases are defined
+in the lib/databases directory.
+`mysql` is the default database, choose a different one by putting the
+following in the `localrc` section:
+
+ ::
+
+ disable_service mysql
+ enable_service postgresql
+
+`mysql` is the default database.
+
+RPC Backend
+-----------
+
+Support for a RabbitMQ RPC backend is included. Additional RPC
+backends may be available via external plugins. Enabling or disabling
+RabbitMQ is handled via the usual service functions and
+``ENABLED_SERVICES``.
+
+Example disabling RabbitMQ in ``local.conf``:
+
+::
+ disable_service rabbit
+
+
+Apache Frontend
+---------------
+
+The Apache web server can be enabled for wsgi services that support
+being deployed under HTTPD + mod_wsgi. By default, services that
+recommend running under HTTPD + mod_wsgi are deployed under Apache. To
+use an alternative deployment strategy (e.g. eventlet) for services
+that support an alternative to HTTPD + mod_wsgi set
+``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your
+``local.conf``.
+
+Each service that can be run under HTTPD + mod_wsgi also has an
+override toggle available that can be set in your ``local.conf``.
+
+Keystone is run under Apache with ``mod_wsgi`` by default.
+
+Example (Keystone)
+
+::
+
+ KEYSTONE_USE_MOD_WSGI="True"
+
+Example (Nova):
+
+::
+
+ NOVA_USE_MOD_WSGI="True"
+
+Example (Swift):
+
+::
+
+ SWIFT_USE_MOD_WSGI="True"
+
+Example (Heat):
+
+::
+
+ HEAT_USE_MOD_WSGI="True"
+
+
+Example (Cinder):
+
+::
+
+ CINDER_USE_MOD_WSGI="True"
+
+
Libraries from Git
------------------
@@ -295,48 +378,6 @@
PIP_UPGRADE=True
-Swift
------
-
-Swift is now used as the back-end for the S3-like object store. When
-enabled Nova's objectstore (``n-obj`` in ``ENABLED_SERVICES``) is
-automatically disabled. Enable Swift by adding it services to
-``ENABLED_SERVICES``
-
- ::
-
- enable_service s-proxy s-object s-container s-account
-
-Setting Swift's hash value is required and you will be prompted for it
-if Swift is enabled so just set it to something already:
-
- ::
-
- SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
-
-For development purposes the default number of replicas is set to
-``1`` to reduce the overhead required. To better simulate a production
-deployment set this to ``3`` or more.
-
- ::
-
- SWIFT_REPLICAS=3
-
-The data for Swift is stored in the source tree by default (in
-``$DEST/swift/data``) and can be moved by setting
-``SWIFT_DATA_DIR``. The specified directory will be created if it does
-not exist.
-
- ::
-
- SWIFT_DATA_DIR=$DEST/data/swift
-
-*Note*: Previously just enabling ``swift`` was sufficient to start the
-Swift services. That does not provide proper service granularity,
-particularly in multi-host configurations, and is considered
-deprecated. Some service combination tests now check for specific
-Swift services and the old blanket acceptance will longer work
-correctly.
Service Catalog Backend
-----------------------
@@ -354,47 +395,46 @@
DevStack's default configuration in ``sql`` mode is set in
``files/keystone_data.sh``
-Cinder
-------
-The logical volume group used to hold the Cinder-managed volumes is
-set by ``VOLUME_GROUP``, the logical volume name prefix is set with
-``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
-with ``VOLUME_BACKING_FILE_SIZE``.
+Guest Images
+------------
+
+Images provided in URLS via the comma-separated ``IMAGE_URLS``
+variable will be downloaded and uploaded to glance by DevStack.
+
+Default guest-images are predefined for each type of hypervisor and
+their testing-requirements in ``stack.sh``. Setting
+``DOWNLOAD_DEFAULT_IMAGES=False`` will prevent DevStack downloading
+these default images; in that case, you will want to populate
+``IMAGE_URLS`` with sufficient images to satisfy testing-requirements.
::
- VOLUME_GROUP="stack-volumes"
- VOLUME_NAME_PREFIX="volume-"
- VOLUME_BACKING_FILE_SIZE=10250M
+ DOWNLOAD_DEFAULT_IMAGES=False
+ IMAGE_URLS="http://foo.bar.com/image.qcow,"
+ IMAGE_URLS+="http://foo.bar.com/image2.qcow"
-Multi-host DevStack
--------------------
-Running DevStack with multiple hosts requires a custom ``local.conf``
-section for each host. The master is the same as a single host
-installation with ``MULTI_HOST=True``. The slaves have fewer services
-enabled and a couple of host variables pointing to the master.
+Instance Type
+-------------
-Master
-~~~~~~
+``DEFAULT_INSTANCE_TYPE`` can be used to configure the default instance
+type. When this parameter is not specified, Devstack creates additional
+micro & nano flavors for really small instances to run Tempest tests.
-Set ``MULTI_HOST`` to true
- ::
+For guests with larger memory requirements, ``DEFAULT_INSTANCE_TYPE``
+should be specified in the configuration file so Tempest selects the
+default flavors instead.
- MULTI_HOST=True
-
-Slave
-~~~~~
-
-Set the following options to point to the master
+KVM on Power with QEMU 2.4 requires 512 MB to load the firmware -
+`QEMU 2.4 - PowerPC <http://wiki.qemu.org/ChangeLog/2.4>`__ so users
+running instances on ppc64/ppc64le can choose one of the default
+created flavors as follows:
::
- MYSQL_HOST=w.x.y.z
- RABBIT_HOST=w.x.y.z
- GLANCE_HOSTPORT=w.x.y.z:9292
- ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api
+ DEFAULT_INSTANCE_TYPE=m1.tiny
+
IP Version
----------
@@ -447,29 +487,163 @@
HOST_IPV6=${some_local_ipv6_address}
-Examples
-========
+Multi-node setup
+~~~~~~~~~~~~~~~~
-- Eliminate a Cinder pass-through (``CINDER_PERIODIC_INTERVAL``):
+See the :doc:`multi-node lab guide<guides/multinode-lab>`
- ::
+Projects
+--------
- [[post-config|$CINDER_CONF]]
- [DEFAULT]
- periodic_interval = 60
+Neutron
+~~~~~~~
-- Sample ``local.conf`` with screen logging enabled:
+See the :doc:`neutron configuration guide<guides/neutron>` for
+details on configuration of Neutron
- ::
- [[local|localrc]]
- FIXED_RANGE=10.254.1.0/24
- NETWORK_GATEWAY=10.254.1.1
- LOGDAYS=1
- LOGDIR=$DEST/logs
- LOGFILE=$LOGDIR/stack.sh.log
- ADMIN_PASSWORD=quiet
- DATABASE_PASSWORD=$ADMIN_PASSWORD
- RABBIT_PASSWORD=$ADMIN_PASSWORD
- SERVICE_PASSWORD=$ADMIN_PASSWORD
- SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50
+Swift
+~~~~~
+
+Swift is disabled by default. When enabled, it is configured with
+only one replica to avoid being IO/memory intensive on a small
+VM. When running with only one replica the account, container and
+object services will run directly in screen. The others services like
+replicator, updaters or auditor runs in background.
+
+If you would like to enable Swift you can add this to your `localrc`
+section:
+
+::
+
+ enable_service s-proxy s-object s-container s-account
+
+If you want a minimal Swift install with only Swift and Keystone you
+can have this instead in your `localrc` section:
+
+::
+
+ disable_all_services
+ enable_service key mysql s-proxy s-object s-container s-account
+
+If you only want to do some testing of a real normal swift cluster
+with multiple replicas you can do so by customizing the variable
+`SWIFT_REPLICAS` in your `localrc` section (usually to 3).
+
+Swift S3
+++++++++
+
+If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will
+install the swift3 middleware emulation. Swift will be configured to
+act as a S3 endpoint for Keystone so effectively replacing the
+`nova-objectstore`.
+
+Only Swift proxy server is launched in the screen session all other
+services are started in background and managed by `swift-init` tool.
+
+Heat
+~~~~
+
+Heat is disabled by default (see `stackrc` file). To enable it
+explicitly you'll need the following settings in your `localrc`
+section
+
+::
+
+ enable_service heat h-api h-api-cfn h-api-cw h-eng
+
+Heat can also run in standalone mode, and be configured to orchestrate
+on an external OpenStack cloud. To launch only Heat in standalone mode
+you'll need the following settings in your `localrc` section
+
+::
+
+ disable_all_services
+ enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng
+ HEAT_STANDALONE=True
+ KEYSTONE_SERVICE_HOST=...
+ KEYSTONE_AUTH_HOST=...
+
+Tempest
+~~~~~~~
+
+If tempest has been successfully configured, a basic set of smoke
+tests can be run as follows:
+
+::
+
+ $ cd /opt/stack/tempest
+ $ tox -efull tempest.scenario.test_network_basic_ops
+
+By default tempest is downloaded and the config file is generated, but the
+tempest package is not installed in the system's global site-packages (the
+package install includes installing dependences). So tempest won't run
+outside of tox. If you would like to install it add the following to your
+``localrc`` section:
+
+::
+
+ INSTALL_TEMPEST=True
+
+
+Xenserver
+~~~~~~~~~
+
+If you would like to use Xenserver as the hypervisor, please refer to
+the instructions in `./tools/xen/README.md`.
+
+Cells
+~~~~~
+
+`Cells <http://wiki.openstack.org/blueprint-nova-compute-cells>`__ is
+an alternative scaling option. To setup a cells environment add the
+following to your `localrc` section:
+
+::
+
+ enable_service n-cell
+
+Be aware that there are some features currently missing in cells, one
+notable one being security groups. The exercises have been patched to
+disable functionality not supported by cells.
+
+Cinder
+~~~~~~
+
+The logical volume group used to hold the Cinder-managed volumes is
+set by ``VOLUME_GROUP``, the logical volume name prefix is set with
+``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
+with ``VOLUME_BACKING_FILE_SIZE``.
+
+ ::
+
+ VOLUME_GROUP="stack-volumes"
+ VOLUME_NAME_PREFIX="volume-"
+ VOLUME_BACKING_FILE_SIZE=10250M
+
+
+Keystone
+~~~~~~~~
+
+Multi-Region Setup
+++++++++++++++++++
+
+We want to setup two devstack (RegionOne and RegionTwo) with shared
+keystone (same users and services) and horizon. Keystone and Horizon
+will be located in RegionOne. Full spec is available at:
+`<https://wiki.openstack.org/wiki/Heat/Blueprints/Multi_Region_Support_for_Heat>`__.
+
+In RegionOne:
+
+::
+
+ REGION_NAME=RegionOne
+
+In RegionTwo:
+
+::
+
+ disable_service horizon
+ KEYSTONE_SERVICE_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
+ KEYSTONE_AUTH_HOST=<KEYSTONE_IP_ADDRESS_FROM_REGION_ONE>
+ REGION_NAME=RegionTwo
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 0db8932..3562bfa 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -124,24 +124,30 @@
enable_service q-svc
-How do I run a specific OpenStack milestone?
+How do I run a specific OpenStack release?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-OpenStack milestones have tags set in the git repo. Set the
-appropriate tag in the ``*_BRANCH`` variables in ``local.conf``.
-Swift is on its own release schedule so pick a tag in the Swift repo
-that is just before the milestone release. For example:
+DevStack master tracks the upstream master of all the projects. If you
+would like to run a stable branch of OpenStack, you should use the
+corresponding stable branch of DevStack as well. For instance the
+``stable/kilo`` version of DevStack will already default to all the
+projects running at ``stable/kilo`` levels.
- ::
+Note: it's also possible to manually adjust the ``*_BRANCH`` variables
+further if you would like to test specific milestones, or even custom
+out of tree branches. This is done with entries like the following in
+your ``local.conf``
+
+::
[[local|localrc]]
- GLANCE_BRANCH=stable/kilo
- HORIZON_BRANCH=stable/kilo
- KEYSTONE_BRANCH=stable/kilo
- NOVA_BRANCH=stable/kilo
- GLANCE_BRANCH=stable/kilo
- NEUTRON_BRANCH=stable/kilo
- SWIFT_BRANCH=2.3.0
+ GLANCE_BRANCH=11.0.0.0rc1
+ NOVA_BRANCH=12.0.0.0.rc1
+
+
+Upstream DevStack is only tested with master and stable
+branches. Setting custom BRANCH definitions is not guarunteed to
+produce working results.
What can I do about RabbitMQ not wanting to start on my fresh new VM?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 1530a84..5660bc5 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -128,7 +128,7 @@
MULTI_HOST=1
LOGFILE=/opt/stack/logs/stack.sh.log
ADMIN_PASSWORD=labstack
- MYSQL_PASSWORD=supersecret
+ DATABASE_PASSWORD=supersecret
RABBIT_PASSWORD=supersecrete
SERVICE_PASSWORD=supersecrete
SERVICE_TOKEN=xyzpdqlazydog
@@ -169,7 +169,7 @@
MULTI_HOST=1
LOGFILE=/opt/stack/logs/stack.sh.log
ADMIN_PASSWORD=labstack
- MYSQL_PASSWORD=supersecret
+ DATABASE_PASSWORD=supersecret
RABBIT_PASSWORD=supersecrete
SERVICE_PASSWORD=supersecrete
SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 40a5632..5891f68 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -52,14 +52,14 @@
RABBIT_HOST=172.18.161.6
GLANCE_HOSTPORT=172.18.161.6:9292
ADMIN_PASSWORD=secrete
- MYSQL_PASSWORD=secrete
+ DATABASE_PASSWORD=secrete
RABBIT_PASSWORD=secrete
SERVICE_PASSWORD=secrete
SERVICE_TOKEN=secrete
## Neutron options
Q_USE_SECGROUP=True
- FLOATING_RANGE="172.18.161.1/24"
+ FLOATING_RANGE="172.18.161.0/24"
FIXED_RANGE="10.0.0.0/24"
Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
PUBLIC_NETWORK_GATEWAY="172.18.161.1"
@@ -72,20 +72,57 @@
+Neutron Networking with Open vSwitch and Provider Networks
+==========================================================
+
+In some instances, it is desirable to use neutron's provider
+networking extension, so that networks that are configured on an
+external router can be utilized by neutron, and instances created via
+Nova can attach to the network managed by the external router.
+
+For example, in some lab environments, a hardware router has been
+pre-configured by another party, and an OpenStack developer has been
+given a VLAN tag and IP address range, so that instances created via
+DevStack will use the external router for L3 connectivity, as opposed
+to the neutron L3 service.
+
+Physical Network Setup
+----------------------
+
+.. nwdiag::
+
+ nwdiag {
+ inet [ shape = cloud ];
+ router;
+ inet -- router;
+
+ network provider_net {
+ address = "203.0.113.0/24"
+ router [ address = "203.0.113.1" ];
+ controller;
+ compute1;
+ compute2;
+ }
+
+ network control_plane {
+ router [ address = "10.0.0.1" ]
+ address = "10.0.0.0/24"
+ controller [ address = "10.0.0.2" ]
+ compute1 [ address = "10.0.0.3" ]
+ compute2 [ address = "10.0.0.4" ]
+ }
+ }
-Using Neutron with Multiple Interfaces
-======================================
-
-The first interface, eth0 is used for the OpenStack management (API,
-message bus, etc) as well as for ssh for an administrator to access
-the machine.
+On a compute node, the first interface, eth0 is used for the OpenStack
+management (API, message bus, etc) as well as for ssh for an
+administrator to access the machine.
::
stack@compute:~$ ifconfig eth0
eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc
- inet addr:192.168.1.18
+ inet addr:10.0.0.3
eth1 is manually configured at boot to not have an IP address.
Consult your operating system documentation for the appropriate
@@ -101,9 +138,6 @@
The second physical interface, eth1 is added to a bridge (in this case
named br-ex), which is used to forward network traffic from guest VMs.
-Network traffic from eth1 on the compute nodes is then NAT'd by the
-controller node that runs Neutron's `neutron-l3-agent` and provides L3
-connectivity.
::
@@ -123,100 +157,6 @@
Interface "eth1"
-
-
-Disabling Next Generation Firewall Tools
-========================================
-
-DevStack does not properly operate with modern firewall tools. Specifically
-it will appear as if the guest VM can access the external network via ICMP,
-but UDP and TCP packets will not be delivered to the guest VM. The root cause
-of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's
-firewall manager) apply firewall rules to all interfaces in the system, rather
-then per-device. One solution to this problem is to revert to iptables
-functionality.
-
-To get a functional firewall configuration for Fedora do the following:
-
-::
-
- sudo service iptables save
- sudo systemctl disable firewalld
- sudo systemctl enable iptables
- sudo systemctl stop firewalld
- sudo systemctl start iptables
-
-
-To get a functional firewall configuration for distributions containing ufw,
-disable ufw. Note ufw is generally not enabled by default in Ubuntu. To
-disable ufw if it was enabled, do the following:
-
-::
-
- sudo service iptables save
- sudo ufw disable
-
-
-
-
-Neutron Networking with Open vSwitch
-====================================
-
-Configuring neutron, OpenStack Networking in DevStack is very similar to
-configuring `nova-network` - many of the same configuration variables
-(like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are
-used by neutron, which is intentional.
-
-The only difference is the disabling of `nova-network` in your
-local.conf, and the enabling of the neutron components.
-
-
-Configuration
--------------
-
-::
-
- FIXED_RANGE=10.0.0.0/24
- FLOATING_RANGE=192.168.27.0/24
- PUBLIC_NETWORK_GATEWAY=192.168.27.2
-
- disable_service n-net
- enable_service q-svc
- enable_service q-agt
- enable_service q-dhcp
- enable_service q-meta
- enable_service q-l3
-
- Q_USE_SECGROUP=True
- ENABLE_TENANT_VLANS=True
- TENANT_VLAN_RANGE=1000:1999
- PHYSICAL_NETWORK=default
- OVS_PHYSICAL_BRIDGE=br-ex
-
-In this configuration we are defining FLOATING_RANGE to be a
-subnet that exists in the private RFC1918 address space - however in
-in a real setup FLOATING_RANGE would be a public IP address range.
-
-Note that extension drivers for the ML2 plugin is set by
-`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you
-want to remove all the extension drivers (even 'port_security'), set
-`Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
-
-Neutron Networking with Open vSwitch and Provider Networks
-==========================================================
-
-In some instances, it is desirable to use neutron's provider
-networking extension, so that networks that are configured on an
-external router can be utilized by neutron, and instances created via
-Nova can attach to the network managed by the external router.
-
-For example, in some lab environments, a hardware router has been
-pre-configured by another party, and an OpenStack developer has been
-given a VLAN tag and IP address range, so that instances created via
-DevStack will use the external router for L3 connectivity, as opposed
-to the neutron L3 service.
-
-
Service Configuration
---------------------
@@ -241,8 +181,21 @@
::
+ HOST_IP=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ RABBIT_HOST=10.0.0.2
+ GLANCE_HOSTPORT=10.0.0.2:9292
PUBLIC_INTERFACE=eth1
+ ADMIN_PASSWORD=secrete
+ MYSQL_PASSWORD=secrete
+ RABBIT_PASSWORD=secrete
+ SERVICE_PASSWORD=secrete
+ SERVICE_TOKEN=secrete
+
## Neutron options
Q_USE_SECGROUP=True
ENABLE_TENANT_VLANS=True
@@ -274,24 +227,37 @@
allocated to you, so that you could access your instances from the
public internet.
-The following is a snippet of the DevStack configuration on the
-compute node.
+The following is the DevStack configuration on
+compute node 1.
::
+ HOST_IP=10.0.0.3
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ SERVICE_HOST=10.0.0.2
+ MYSQL_HOST=10.0.0.2
+ RABBIT_HOST=10.0.0.2
+ GLANCE_HOSTPORT=10.0.0.2:9292
+ ADMIN_PASSWORD=secrete
+ MYSQL_PASSWORD=secrete
+ RABBIT_PASSWORD=secrete
+ SERVICE_PASSWORD=secrete
+ SERVICE_TOKEN=secrete
+
# Services that a compute node runs
ENABLED_SERVICES=n-cpu,rabbit,q-agt
## Neutron options
- Q_USE_SECGROUP=True
- ENABLE_TENANT_VLANS=True
- TENANT_VLAN_RANGE=3001:4000
PHYSICAL_NETWORK=default
OVS_PHYSICAL_BRIDGE=br-ex
PUBLIC_INTERFACE=eth1
Q_USE_PROVIDER_NETWORKING=True
Q_L3_ENABLED=False
+Compute node 2's configuration will be exactly the same, except
+`HOST_IP` will be `10.0.0.4`
+
When DevStack is configured to use provider networking (via
`Q_USE_PROVIDER_NETWORKING` is True and `Q_L3_ENABLED` is False) -
DevStack will automatically add the network interface defined in
@@ -301,3 +267,47 @@
created, named `br-ex` which is managed by Open vSwitch, and the
second interface on the compute node, `eth1` is attached to the
bridge, to forward traffic sent by guest VMs.
+
+Miscellaneous Tips
+==================
+
+
+Disabling Next Generation Firewall Tools
+----------------------------------------
+
+DevStack does not properly operate with modern firewall tools. Specifically
+it will appear as if the guest VM can access the external network via ICMP,
+but UDP and TCP packets will not be delivered to the guest VM. The root cause
+of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's
+firewall manager) apply firewall rules to all interfaces in the system, rather
+then per-device. One solution to this problem is to revert to iptables
+functionality.
+
+To get a functional firewall configuration for Fedora do the following:
+
+::
+
+ sudo service iptables save
+ sudo systemctl disable firewalld
+ sudo systemctl enable iptables
+ sudo systemctl stop firewalld
+ sudo systemctl start iptables
+
+
+To get a functional firewall configuration for distributions containing ufw,
+disable ufw. Note ufw is generally not enabled by default in Ubuntu. To
+disable ufw if it was enabled, do the following:
+
+::
+
+ sudo service iptables save
+ sudo ufw disable
+
+Configuring Extension Drivers for the ML2 Plugin
+------------------------------------------------
+
+Extension drivers for the ML2 plugin are set with the variable
+`Q_ML2_PLUGIN_EXT_DRIVERS`, and includes the 'port_security' extension
+by default. If you want to remove all the extension drivers (even
+'port_security'), set `Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
+
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 236ece9..a01c368 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -105,7 +105,7 @@
FIXED_NETWORK_SIZE=256
FLAT_INTERFACE=eth0
ADMIN_PASSWORD=supersecret
- MYSQL_PASSWORD=iheartdatabases
+ DATABASE_PASSWORD=iheartdatabases
RABBIT_PASSWORD=flopsymopsy
SERVICE_PASSWORD=iheartksl
SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index c2ce1a3..53c3fa9 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -64,7 +64,7 @@
cd devstack
echo '[[local|localrc]]' > local.conf
echo ADMIN_PASSWORD=password >> local.conf
- echo MYSQL_PASSWORD=password >> local.conf
+ echo DATABASE_PASSWORD=password >> local.conf
echo RABBIT_PASSWORD=password >> local.conf
echo SERVICE_PASSWORD=password >> local.conf
echo SERVICE_TOKEN=tokentoken >> local.conf
@@ -78,6 +78,11 @@
As DevStack will refuse to run as root, this configures ``cloud-init``
to create a non-root user and run the ``start.sh`` script as that user.
+If you are using cloud-init and you have not
+`enabled custom logging <../configuration.html#enable-logging>`_ of the stack
+output, then the stack output can be found in
+``/var/log/cloud-init-output.log`` by default.
+
Launching By Hand
-----------------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 2dd0241..99e96b1 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -154,7 +154,6 @@
* `functions <functions.html>`__ - DevStack-specific functions
* `functions-common <functions-common.html>`__ - Functions shared with other projects
* `lib/apache <lib/apache.html>`__
-* `lib/ceilometer <lib/ceilometer.html>`__
* `lib/ceph <lib/ceph.html>`__
* `lib/cinder <lib/cinder.html>`__
* `lib/database <lib/database.html>`__
@@ -173,7 +172,7 @@
* `lib/swift <lib/swift.html>`__
* `lib/tempest <lib/tempest.html>`__
* `lib/tls <lib/tls.html>`__
-* `lib/zaqar <lib/zaqar.html>`__
+* `lib/trove <lib/trove.html>`__
* `unstack.sh <unstack.sh.html>`__
* `clean.sh <clean.sh.html>`__
* `run\_tests.sh <run_tests.sh.html>`__
@@ -181,7 +180,6 @@
* `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
* `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
* `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
-* `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
* `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
* `inc/ini-config <inc/ini-config.html>`__
@@ -239,4 +237,3 @@
* `exercises/sec\_groups.sh <exercises/sec_groups.sh.html>`__
* `exercises/swift.sh <exercises/swift.sh.html>`__
* `exercises/volumes.sh <exercises/volumes.sh.html>`__
-* `exercises/zaqar.sh <exercises/zaqar.sh.html>`__
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 99bfb85..49b3a7f 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -17,17 +17,31 @@
The following are plugins that exist for official OpenStack projects.
-+--------------------+-------------------------------------------+--------------------+
-|Plugin Name |URL |Comments |
-+--------------------+-------------------------------------------+--------------------+
-|magnum |git://git.openstack.org/openstack/magnum | |
-+--------------------+-------------------------------------------+--------------------+
-|sahara |git://git.openstack.org/openstack/sahara | |
-+--------------------+-------------------------------------------+--------------------+
-|trove |git://git.openstack.org/openstack/trove | |
-+--------------------+-------------------------------------------+--------------------+
-|zaqar |git://git.openstack.org/openstack/zaqar | |
-+--------------------+-------------------------------------------+--------------------+
++------------------+---------------------------------------------+--------------------+
+|Plugin Name |URL |Comments |
++------------------+---------------------------------------------+--------------------+
+|aodh |git://git.openstack.org/openstack/aodh | alarming |
++------------------+---------------------------------------------+--------------------+
+|barbican |git://git.openstack.org/openstack/barbican | key management |
++------------------+---------------------------------------------+--------------------+
+|ceilometer |git://git.openstack.org/openstack/ceilometer | metering |
++------------------+---------------------------------------------+--------------------+
+|gnocchi |git://git.openstack.org/openstack/gnocchi | metric |
++------------------+---------------------------------------------+--------------------+
+|magnum |git://git.openstack.org/openstack/magnum | |
++------------------+---------------------------------------------+--------------------+
+|manila |git://git.openstack.org/openstack/manila | file shares |
++------------------+---------------------------------------------+--------------------+
+|mistral |git://git.openstack.org/openstack/mistral | |
++------------------+---------------------------------------------+--------------------+
+|rally |git://git.openstack.org/openstack/rally | |
++------------------+---------------------------------------------+--------------------+
+|sahara |git://git.openstack.org/openstack/sahara | |
++------------------+---------------------------------------------+--------------------+
+|trove |git://git.openstack.org/openstack/trove | |
++------------------+---------------------------------------------+--------------------+
+|zaqar |git://git.openstack.org/openstack/zaqar | |
++------------------+---------------------------------------------+--------------------+
@@ -54,7 +68,7 @@
| Plugin Name | URL | Comments |
| | | |
+-------------+------------------------------------------------------------+------------+
-|glusterfs |git://git.openstack.org/stackforge/devstack-plugin-glusterfs| |
+|glusterfs |git://git.openstack.org/openstack/devstack-plugin-glusterfs | |
+-------------+------------------------------------------------------------+------------+
| | | |
+-------------+------------------------------------------------------------+------------+
@@ -62,14 +76,16 @@
Additional Services
===================
-+-------------+------------------------------------------+------------+
-| Plugin Name | URL | Comments |
-| | | |
-+-------------+------------------------------------------+------------+
-|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ |
-+-------------+------------------------------------------+------------+
-| | | |
-+-------------+------------------------------------------+------------+
++----------------+--------------------------------------------------+------------+
+| Plugin Name | URL | Comments |
+| | | |
++----------------+--------------------------------------------------+------------+
+|ec2-api |git://git.openstack.org/openstack/ec2-api |[as1]_ |
++----------------+--------------------------------------------------+------------+
+|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector| |
++----------------+--------------------------------------------------+------------+
+| | | |
++----------------+--------------------------------------------------+------------+
.. [as1] first functional devstack plugin, hence why used in most of
the examples.
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 803dd08..8bd3797 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -56,7 +56,7 @@
An example would be as follows::
- enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
+ enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
plugin.sh contract
==================
@@ -178,7 +178,7 @@
===============
Devstack provides a framework for getting packages installed at an early
-phase of its execution. This packages may be defined in a plugin as files
+phase of its execution. These packages may be defined in a plugin as files
that contain new-line separated lists of packages required by the plugin
Supported packaging systems include apt and yum across multiple distributions.
@@ -202,13 +202,12 @@
For everyday use, DevStack plugins can exist in any git tree that's
accessible on the internet. However, when using DevStack plugins in
the OpenStack gate, they must live in projects in OpenStack's
-gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are
-fine. This allows testing of the plugin as well as provides network
+gerrit. This allows testing of the plugin as well as provides network
isolation against upstream git repository failures (which we see often
enough to be an issue).
Ideally a plugin will be included within the ``devstack`` directory of
-the project they are being tested. For example, the stackforge/ec2-api
+the project they are being tested. For example, the openstack/ec2-api
project has its plugin support in its own tree.
However, some times a DevStack plugin might be used solely to
@@ -218,7 +217,7 @@
integration of SDN controllers (e.g. ovn, OpenDayLight), or
integration of alternate RPC systems (e.g. zmq, qpid). In these cases
the best practice is to build a dedicated
-``stackforge/devstack-plugin-FOO`` project.
+``openstack/devstack-plugin-FOO`` project.
To enable a plugin to be used in a gate job, the following lines will
be needed in your ``jenkins/jobs/<project>.yaml`` definition in
@@ -228,12 +227,12 @@
# Because we are testing a non standard project, add the
# our project repository. This makes zuul do the right
# reference magic for testing changes.
- export PROJECTS="stackforge/ec2-api $PROJECTS"
+ export PROJECTS="openstack/ec2-api $PROJECTS"
# note the actual url here is somewhat irrelevant because it
# caches in nodepool, however make it a valid url for
# documentation purposes.
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api"
+ export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
See Also
========
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 01d548d..808ef76 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -31,18 +31,13 @@
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
+# Test as the admin user
+# note this imports stackrc/functions, etc
+. $TOP_DIR/openrc admin admin
# Import exercise configuration
source $TOP_DIR/exerciserc
-# Test as the admin user
-. $TOP_DIR/openrc admin admin
-
# If nova api is not enabled we exit with exitcode 55 so that
# the exercise is skipped
is_service_enabled n-api || exit 55
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index a0de4cc..a8fbd86 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -122,41 +122,47 @@
}
function get_image_id {
- local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ local IMAGE_ID
+ IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
echo "$IMAGE_ID"
}
function get_tenant_id {
local TENANT_NAME=$1
- local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ local TENANT_ID
+ TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
echo "$TENANT_ID"
}
function get_user_id {
local USER_NAME=$1
- local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
+ local USER_ID
+ USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
echo "$USER_ID"
}
function get_role_id {
local ROLE_NAME=$1
- local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+ local ROLE_ID
+ ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
echo "$ROLE_ID"
}
function get_network_id {
local NETWORK_NAME="$1"
- local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+ local NETWORK_ID
+ NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
echo $NETWORK_ID
}
function get_flavor_id {
local INSTANCE_TYPE=$1
- local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ local FLAVOR_ID
+ FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
echo "$FLAVOR_ID"
}
@@ -185,13 +191,15 @@
function remove_tenant {
local TENANT=$1
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
openstack project delete $TENANT_ID
}
function remove_user {
local USER=$1
- local USER_ID=$(get_user_id $USER)
+ local USER_ID
+ USER_ID=$(get_user_id $USER)
openstack user delete $USER_ID
}
@@ -221,9 +229,11 @@
local NET_NAME="${TENANT}-net$NUM"
local ROUTER_NAME="${TENANT}-router${NUM}"
source $TOP_DIR/openrc admin admin
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
source $TOP_DIR/openrc $TENANT $TENANT
- local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ local NET_ID
+ NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
neutron_debug_admin probe-create --device-owner compute $NET_ID
@@ -251,7 +261,8 @@
done
#TODO (nati) Add multi-nic test
#TODO (nati) Add public-net test
- local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
+ local VM_UUID
+ VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
--image $(get_image_id) \
$NIC \
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
@@ -309,7 +320,8 @@
local NUM=$2
local NET_NAME="${TENANT}-net$NUM"
source $TOP_DIR/openrc admin admin
- local TENANT_ID=$(get_tenant_id $TENANT)
+ local TENANT_ID
+ TENANT_ID=$(get_tenant_id $TENANT)
#TODO(nati) comment out until l3-agent merged
#for res in port subnet net router;do
for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
diff --git a/exercises/zaqar.sh b/exercises/zaqar.sh
deleted file mode 100755
index c370b12..0000000
--- a/exercises/zaqar.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-
-# **zaqar.sh**
-
-# Sanity check that Zaqar started if enabled
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-is_service_enabled zaqar-server || exit 55
-
-$CURL_GET http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/extras.d/70-zaqar.sh b/extras.d/70-zaqar.sh
deleted file mode 100644
index 63c4fd5..0000000
--- a/extras.d/70-zaqar.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# zaqar.sh - Devstack extras script to install Zaqar
-
-if is_service_enabled zaqar-server; then
- if [[ "$1" == "source" ]]; then
- # Initial source
- source $TOP_DIR/lib/zaqar
- elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing Zaqar"
- install_zaqarclient
- install_zaqar
- elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- echo_summary "Configuring Zaqar"
- configure_zaqar
- configure_zaqarclient
-
- if is_service_enabled key; then
- create_zaqar_accounts
- fi
-
- elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- echo_summary "Initializing Zaqar"
- init_zaqar
- start_zaqar
- fi
-
- if [[ "$1" == "unstack" ]]; then
- stop_zaqar
- fi
-fi
diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template
deleted file mode 100644
index 79f14c3..0000000
--- a/files/apache-ceilometer.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %PORT%
-
-<VirtualHost *:%PORT%>
- WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup ceilometer-api
- WSGIScriptAlias / %WSGIAPP%
- WSGIApplicationGroup %{GLOBAL}
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/%APACHE_NAME%/ceilometer.log
- CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined
-</VirtualHost>
-
-WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template
new file mode 100644
index 0000000..e1246f1
--- /dev/null
+++ b/files/apache-cinder-api.template
@@ -0,0 +1,26 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess osapi_volume processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup osapi_volume
+ WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/c-api.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+
+ <Directory %CINDER_BIN_DIR%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template
new file mode 100644
index 0000000..ab33c66
--- /dev/null
+++ b/files/apache-heat-api-cfn.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup heat-api-cfn
+ WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ AllowEncodedSlashes On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+
+ <Directory %HEAT_BIN_DIR%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template
new file mode 100644
index 0000000..06c91bb
--- /dev/null
+++ b/files/apache-heat-api-cloudwatch.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup heat-api-cloudwatch
+ WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ AllowEncodedSlashes On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+
+ <Directory %HEAT_BIN_DIR%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template
new file mode 100644
index 0000000..4924b39
--- /dev/null
+++ b/files/apache-heat-api.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup heat-api
+ WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ AllowEncodedSlashes On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/heat-api.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+
+ <Directory %HEAT_BIN_DIR%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 6dd1ad9..4d3d2d6 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -5,7 +5,7 @@
<VirtualHost *:%PUBLICPORT%>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
WSGIProcessGroup keystone-public
- WSGIScriptAlias / %PUBLICWSGI%
+ WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
@@ -16,12 +16,22 @@
%SSLENGINE%
%SSLCERTFILE%
%SSLKEYFILE%
+
+ <Directory %KEYSTONE_BIN%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
</VirtualHost>
<VirtualHost *:%ADMINPORT%>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
WSGIProcessGroup keystone-admin
- WSGIScriptAlias / %ADMINWSGI%
+ WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
@@ -32,6 +42,16 @@
%SSLENGINE%
%SSLCERTFILE%
%SSLKEYFILE%
+
+ <Directory %KEYSTONE_BIN%>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
</VirtualHost>
Alias /identity %PUBLICWSGI%
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
index 235d958..6443567 100644
--- a/files/apache-nova-ec2-api.template
+++ b/files/apache-nova-ec2-api.template
@@ -14,3 +14,12 @@
%SSLCERTFILE%
%SSLKEYFILE%
</VirtualHost>
+
+Alias /ec2 %PUBLICWSGI%
+<Location /ec2>
+ SetHandler wsgi-script
+ Options +ExecCGI
+ WSGIProcessGroup nova-ec2-api
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/zaqar-server b/files/debs/zaqar-server
deleted file mode 100644
index 6c2a4d1..0000000
--- a/files/debs/zaqar-server
+++ /dev/null
@@ -1,4 +0,0 @@
-python-pymongo
-mongodb-server
-pkg-config
-redis-server # NOPRIME
\ No newline at end of file
diff --git a/files/rpms/zaqar-server b/files/rpms/zaqar-server
deleted file mode 100644
index 78806fb..0000000
--- a/files/rpms/zaqar-server
+++ /dev/null
@@ -1,5 +0,0 @@
-selinux-policy-targeted
-mongodb
-mongodb-server
-pymongo
-redis # NOPRIME
diff --git a/functions b/functions
index 4001e9d..ca5955e 100644
--- a/functions
+++ b/functions
@@ -36,10 +36,9 @@
# - ``FILES`` must be set to the cache dir
# - ``GLANCE_HOSTPORT``
#
-# upload_image image-url glance-token
+# upload_image image-url
function upload_image {
local image_url=$1
- local token=$2
local image image_fname image_name
@@ -71,7 +70,7 @@
# OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
if [[ "$image_url" =~ 'openvz' ]]; then
image_name="${image_fname%.tar.gz}"
- openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
+ openstack --os-cloud=devstack-admin image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
return
fi
@@ -182,7 +181,7 @@
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
- openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
+ openstack --os-cloud=devstack-admin image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
return
fi
@@ -199,8 +198,7 @@
force_vm_mode="--property vm_mode=xen"
fi
openstack \
- --os-token $token \
- --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
+ --os-cloud=devstack-admin \
image create \
"$image_name" --public \
--container-format=ovf --disk-format=vhd \
@@ -214,8 +212,7 @@
if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
image_name="${image_fname%.xen-raw.tgz}"
openstack \
- --os-token $token \
- --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
+ --os-cloud=devstack-admin \
image create \
"$image_name" --public \
--container-format=tgz --disk-format=raw \
@@ -231,8 +228,7 @@
fi
openstack \
- --os-token $token \
- --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
+ --os-cloud=devstack-admin \
image create \
"$image_name" --public \
--container-format=bare --disk-format=ploop \
@@ -268,7 +264,8 @@
;;
*.img)
image_name=$(basename "$image" ".img")
- local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
+ local format
+ format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
disk_format=$format
else
@@ -314,9 +311,9 @@
if [ "$container_format" = "bare" ]; then
if [ "$unpack" = "zcat" ]; then
- openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+ openstack --os-cloud=devstack-admin image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
else
- openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
+ openstack --os-cloud=devstack-admin image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -324,12 +321,12 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-cloud=devstack-admin image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-cloud=devstack-admin image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
fi
- openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
+ openstack --os-cloud=devstack-admin image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
fi
}
@@ -345,7 +342,7 @@
# No backends registered means this is likely called from ``localrc``
# This is now deprecated usage
DATABASE_TYPE=$1
- DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
+ deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc"
else
# This should no longer get called...here for posterity
use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
@@ -409,7 +406,8 @@
local vm_id=$1
local network_name=$2
local nova_result="$(nova show $vm_id)"
- local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+ local ip
+ ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
if [[ $ip = "" ]];then
echo "$nova_result"
die $LINENO "[Fail] Coudn't get ipaddress of VM"
@@ -459,7 +457,8 @@
# homedir permissions on RHEL and common practice of making DEST in
# the stack user's homedir.
- local real_path=$(readlink -f $1)
+ local real_path
+ real_path=$(readlink -f $1)
local rebuilt_path=""
for i in $(echo ${real_path} | tr "/" " "); do
rebuilt_path=$rebuilt_path"/"$i
diff --git a/functions-common b/functions-common
index f6a5253..ceefd44 100644
--- a/functions-common
+++ b/functions-common
@@ -67,15 +67,69 @@
done
}
-# Normalize config values to True or False
-# Accepts as False: 0 no No NO false False FALSE
-# Accepts as True: 1 yes Yes YES true True TRUE
-# VAR=$(trueorfalse default-value test-value)
+# Update/create user clouds.yaml file.
+# clouds.yaml will have
+# - A `devstack` entry for the `demo` user for the `demo` project.
+# - A `devstack-admin` entry for the `admin` user for the `admin` project.
+# write_clouds_yaml
+function write_clouds_yaml {
+ # The location is a variable to allow for easier refactoring later to make it
+ # overridable. There is currently no usecase where doing so makes sense, so
+ # it's not currently configurable.
+ for clouds_path in /etc/openstack ~/.config/openstack ; do
+ CLOUDS_YAML=$clouds_path/clouds.yaml
+
+ sudo mkdir -p $(dirname $CLOUDS_YAML)
+ sudo chown -R $STACK_USER $(dirname $CLOUDS_YAML)
+
+ CA_CERT_ARG=''
+ if [ -f "$SSL_BUNDLE_FILE" ]; then
+ CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
+ fi
+ $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack \
+ --os-region-name $REGION_NAME \
+ --os-identity-api-version 3 \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_AUTH_URI \
+ --os-username demo \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name demo
+ $TOP_DIR/tools/update_clouds_yaml.py \
+ --file $CLOUDS_YAML \
+ --os-cloud devstack-admin \
+ --os-region-name $REGION_NAME \
+ --os-identity-api-version 3 \
+ $CA_CERT_ARG \
+ --os-auth-url $KEYSTONE_AUTH_URI \
+ --os-username admin \
+ --os-password $ADMIN_PASSWORD \
+ --os-project-name admin
+ done
+}
+
+# trueorfalse <True|False> <VAR>
+#
+# Normalize config-value provided in variable VAR to either "True" or
+# "False". If VAR is unset (i.e. $VAR evaluates as empty), the value
+# of the second argument will be used as the default value.
+#
+# Accepts as False: 0 no No NO false False FALSE
+# Accepts as True: 1 yes Yes YES true True TRUE
+#
+# usage:
+# VAL=$(trueorfalse False VAL)
function trueorfalse {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local default=$1
+
+ if [ -z $2 ]; then
+ die $LINENO "variable to normalize required"
+ fi
local testval=${!2:-}
case "$testval" in
@@ -100,7 +154,8 @@
# backtrace level
function backtrace {
local level=$1
- local deep=$((${#BASH_SOURCE[@]} - 1))
+ local deep
+ deep=$((${#BASH_SOURCE[@]} - 1))
echo "[Call Trace]"
while [ $level -le $deep ]; do
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
@@ -130,7 +185,8 @@
# die_if_not_set $LINENO env-var "message"
function die_if_not_set {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
@@ -140,11 +196,18 @@
$xtrace
}
+function deprecated {
+ local text=$1
+ DEPRECATED_TEXT+="\n$text"
+ echo "WARNING: $text"
+}
+
# Prints line number and "message" in error format
# err $LINENO "message"
function err {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
@@ -161,7 +224,8 @@
# err_if_not_set $LINENO env-var "message"
function err_if_not_set {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
@@ -197,7 +261,8 @@
# warn $LINENO "message"
function warn {
local exitcode=$?
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
echo $msg
@@ -433,7 +498,8 @@
local git_remote=$1
local git_dest=$2
local git_ref=$3
- local orig_dir=$(pwd)
+ local orig_dir
+ orig_dir=$(pwd)
local git_clone_flags=""
RECLONE=$(trueorfalse False RECLONE)
@@ -453,8 +519,11 @@
if echo $git_ref | egrep -q "^refs"; then
# If our branch name is a gerrit style refs/changes/...
if [[ ! -d $git_dest ]]; then
- [[ "$ERROR_ON_CLONE" = "True" ]] && \
+ if [[ "$ERROR_ON_CLONE" = "True" ]]; then
+ echo "The $git_dest project was not found; if this is a gate job, add"
+ echo "the project to the \$PROJECTS variable in the job definition."
die $LINENO "Cloning not allowed in this configuration"
+ fi
git_timed clone $git_clone_flags $git_remote $git_dest
fi
cd $git_dest
@@ -462,8 +531,11 @@
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $git_dest ]]; then
- [[ "$ERROR_ON_CLONE" = "True" ]] && \
+ if [[ "$ERROR_ON_CLONE" = "True" ]]; then
+ echo "The $git_dest project was not found; if this is a gate job, add"
+ echo "the project to the \$PROJECTS variable in the job definition."
die $LINENO "Cloning not allowed in this configuration"
+ fi
git_timed clone $git_clone_flags $git_remote $git_dest
cd $git_dest
# This checkout syntax works for both branches and tags
@@ -591,7 +663,8 @@
host_ip=""
# Find the interface used for the default route
host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
- local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | awk /$af'/ {split($2,parts,"/"); print parts[1]}')
+ local host_ips
+ host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
local ip
for ip in $host_ips; do
# Attempt to filter out IP addresses that are part of the fixed and
@@ -640,7 +713,8 @@
# copy over a default policy.json and policy.d for projects
function install_default_policy {
local project=$1
- local project_uc=$(echo $1|tr a-z A-Z)
+ local project_uc
+ project_uc=$(echo $1|tr a-z A-Z)
local conf_dir="${project_uc}_CONF_DIR"
# eval conf dir to get the variable
conf_dir="${!conf_dir}"
@@ -673,7 +747,8 @@
# Add a terminating comma to policy lines without one
# Remove the closing '}' and all lines following to the end-of-file
- local tmpfile=$(mktemp)
+ local tmpfile
+ tmpfile=$(mktemp)
uniq ${policy_file} | sed -e '
s/]$/],/
/^[}]/,$d
@@ -690,16 +765,13 @@
# Usage: get_or_create_domain <name> <description>
function get_or_create_domain {
local domain_id
- local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets domain id
domain_id=$(
# Gets domain id
- openstack --os-token=$OS_TOKEN --os-url=$os_url \
- --os-identity-api-version=3 domain show $1 \
+ openstack domain show $1 \
-f value -c id 2>/dev/null ||
# Creates new domain
- openstack --os-token=$OS_TOKEN --os-url=$os_url \
- --os-identity-api-version=3 domain create $1 \
+ openstack domain create $1 \
--description "$2" \
-f value -c id
)
@@ -710,13 +782,11 @@
# Usage: get_or_create_group <groupname> <domain> [<description>]
function get_or_create_group {
local desc="${3:-}"
- local os_url="$KEYSTONE_SERVICE_URI_V3"
local group_id
# Gets group id
group_id=$(
# Creates new group with --or-show
- openstack --os-token=$OS_TOKEN --os-url=$os_url \
- --os-identity-api-version=3 group create $1 \
+ openstack group create $1 \
--domain $2 --description "$desc" --or-show \
-f value -c id
)
@@ -738,8 +808,6 @@
openstack user create \
$1 \
--password "$2" \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--domain=$3 \
$email \
--or-show \
@@ -754,9 +822,7 @@
local project_id
project_id=$(
# Creates new project with --or-show
- openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
- project create $1 \
+ openstack project create $1 \
--domain=$2 \
--or-show -f value -c id
)
@@ -770,8 +836,6 @@
role_id=$(
# Creates role with --or-show
openstack role create $1 \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--or-show -f value -c id
)
echo $role_id
@@ -784,21 +848,21 @@
# Gets user role id
user_role_id=$(openstack role list \
--user $2 \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--column "ID" \
--project $3 \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$user_role_id" ]]; then
- # Adds role to user
- user_role_id=$(openstack role add \
- $1 \
+ # Adds role to user and get it
+ openstack role add $1 \
--user $2 \
+ --project $3
+ user_role_id=$(openstack role list \
+ --user $2 \
+ --column "ID" \
--project $3 \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
- | grep " id " | get_field 2)
+ --column "Name" \
+ | grep " $1 " | get_field 1)
fi
echo $user_role_id
}
@@ -809,21 +873,15 @@
local group_role_id
# Gets group role id
group_role_id=$(openstack role list \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--group $2 \
--project $3 \
-c "ID" -f value)
if [[ -z "$group_role_id" ]]; then
# Adds role to group and get it
openstack role add $1 \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--group $2 \
--project $3
group_role_id=$(openstack role list \
- --os-url=$KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--group $2 \
--project $3 \
-c "ID" -f value)
@@ -841,8 +899,6 @@
openstack service show $2 -f value -c id 2>/dev/null ||
# Creates new service if not exists
openstack service create \
- --os-url $KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
$2 \
--name $1 \
--description="$3" \
@@ -861,8 +917,6 @@
# gets support for this, the check for the region name can be removed.
# Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772
endpoint_id=$(openstack endpoint list \
- --os-url $KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
--service $1 \
--interface $2 \
--region $4 \
@@ -870,8 +924,6 @@
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
endpoint_id=$(openstack endpoint create \
- --os-url $KEYSTONE_SERVICE_URI_V3 \
- --os-identity-api-version=3 \
$1 $2 $3 --region $4 -f value -c id)
fi
@@ -889,7 +941,8 @@
# scenarios currently that use the returned id. Ideally this behaviour
# should be pushed out to the service setups and let them create the
# endpoints they need.
- local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
+ local public_id
+ public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
_get_or_create_endpoint_with_interface $1 admin $4 $2
_get_or_create_endpoint_with_interface $1 internal $5 $2
@@ -935,19 +988,26 @@
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
+ # time all the apt operations
+ time_start "apt-get"
+
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+
+ # stop the clock
+ time_stop "apt-get"
}
function _parse_package_files {
@@ -1004,10 +1064,12 @@
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local services=$@
- local package_dir=$(_get_package_dir)
+ local package_dir
+ package_dir=$(_get_package_dir)
local file_to_parse=""
local service=""
@@ -1033,10 +1095,6 @@
if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then
file_to_parse="${file_to_parse} ${package_dir}/cinder"
fi
- elif [[ $service == ceilometer-* ]]; then
- if [[ ! $file_to_parse =~ $package_dir/ceilometer ]]; then
- file_to_parse="${file_to_parse} ${package_dir}/ceilometer"
- fi
elif [[ $service == s-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/swift ]]; then
file_to_parse="${file_to_parse} ${package_dir}/swift"
@@ -1076,7 +1134,8 @@
# The same metadata used in the main DevStack prerequisite files may be used
# in these prerequisite files, see get_packages() for more info.
function get_plugin_packages {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local files_to_parse=""
local package_dir=""
@@ -1101,7 +1160,8 @@
fi
if is_ubuntu; then
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
# if there are transient errors pulling the updates, that's fine.
@@ -1429,6 +1489,22 @@
# Kill via pid if we have one available
if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
+ # oslo.service tends to stop actually shutting down
+ # reliably in between releases because someone believes it
+ # is dying too early due to some inflight work they
+ # have. This is a tension. It happens often enough we're
+ # going to just account for it in devstack and assume it
+ # doesn't work.
+ #
+ # Set OSLO_SERVICE_WORKS=True to skip this block
+ if [[ -z "$OSLO_SERVICE_WORKS" ]]; then
+ # TODO(danms): Remove this double-kill when we have
+ # this fixed in all services:
+ # https://bugs.launchpad.net/oslo-incubator/+bug/1446583
+ sleep 1
+ # /bin/true becakse pkill on a non existant process returns an error
+ pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true
+ fi
rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
if [[ "$USE_SCREEN" = "True" ]]; then
@@ -1453,7 +1529,7 @@
return
fi
- # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
+ # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME
# make this -o errexit safe
failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
@@ -1663,13 +1739,26 @@
local mode=$1
local phase=$2
if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i $mode $phase
+ local extra_plugin_file_name
+ for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do
+ [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase
+ # NOTE(sdague): generate a big warning about using
+ # extras.d in an unsupported way which will let us track
+ # unsupported usage in the gate.
+ local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh"
+ local extra
+ extra=$(basename $extra_plugin_file_name)
+ if [[ ! ( $exceptions =~ "$extra" ) ]]; then
+ deprecated "extras.d support is being removed in Mitaka-1"
+ deprecated "jobs for project $extra will break after that point"
+ deprecated "please move project to a supported devstack plugin model"
+ fi
done
fi
# the source phase corresponds to settings loading in plugins
if [[ "$mode" == "source" ]]; then
load_plugin_settings
+ verify_disabled_services
elif [[ "$mode" == "override_defaults" ]]; then
plugin_override_defaults
else
@@ -1725,25 +1814,26 @@
ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove")
}
-# disable_service() removes the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are present.
+# disable_service() prepares the services passed as argument to be
+# removed from the ``ENABLED_SERVICES`` list, if they are present.
#
# For example:
# disable_service rabbit
#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
+# Uses global ``DISABLED_SERVICES``
# disable_service service [service ...]
function disable_service {
- local tmpsvcs=",${ENABLED_SERVICES},"
+ local disabled_svcs="${DISABLED_SERVICES}"
+ local enabled_svcs=",${ENABLED_SERVICES},"
local service
for service in $@; do
+ disabled_svcs+=",$service"
if is_service_enabled $service; then
- tmpsvcs=${tmpsvcs//,$service,/,}
+ enabled_svcs=${enabled_svcs//,$service,/,}
fi
done
- ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+ DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs")
+ ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs")
}
# enable_service() adds the services passed as argument to the
@@ -1760,6 +1850,10 @@
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in $@; do
+ if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+ warn $LINENO "Attempt to enable_service ${service} when it has been disabled"
+ continue
+ fi
if ! is_service_enabled $service; then
tmpsvcs+=",$service"
fi
@@ -1777,7 +1871,6 @@
# There are special cases for some 'catch-all' services::
# **nova** returns true if any service enabled start with **n-**
# **cinder** returns true if any service enabled start with **c-**
-# **ceilometer** returns true if any service enabled start with **ceilometer**
# **glance** returns true if any service enabled start with **g-**
# **neutron** returns true if any service enabled start with **q-**
# **swift** returns true if any service enabled start with **s-**
@@ -1792,7 +1885,8 @@
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local enabled=1
local services=$@
@@ -1803,8 +1897,7 @@
# Look for top-level 'enabled' function for this service
if type is_${service}_enabled >/dev/null 2>&1; then
# A function exists for this service, use it
- is_${service}_enabled
- enabled=$?
+ is_${service}_enabled && enabled=0
fi
# TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
@@ -1813,7 +1906,6 @@
[[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
[[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
- [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
[[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
[[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
@@ -1866,6 +1958,18 @@
return 0
}
+# Make sure that nothing has manipulated ENABLED_SERVICES in a way
+# that conflicts with prior calls to disable_service.
+# Uses global ``ENABLED_SERVICES``
+function verify_disabled_services {
+ local service
+ for service in ${ENABLED_SERVICES//,/ }; do
+ if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+ die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'"
+ fi
+ done
+}
+
# System Functions
# ================
@@ -1873,7 +1977,8 @@
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local args=( $@ )
local last
@@ -1909,8 +2014,10 @@
local ip=$1
local range=$2
local masklen=${range#*/}
- local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
- local subnet=$(maskip $ip $(cidr2netmask $masklen))
+ local network
+ network=$(maskip ${range%/*} $(cidr2netmask $masklen))
+ local subnet
+ subnet=$(maskip $ip $(cidr2netmask $masklen))
[[ $network == $subnet ]]
}
@@ -1962,7 +2069,8 @@
# Returns true if the directory is on a filesystem mounted via NFS.
function is_nfs_directory {
- local mount_type=`stat -f -L -c %T $1`
+ local mount_type
+ mount_type=`stat -f -L -c %T $1`
test "$mount_type" == "nfs"
}
@@ -1973,13 +2081,15 @@
local ip=$1
local mask=$2
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
- local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
+ local subnet
+ subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
echo $subnet
}
# Return the current python as "python<major>.<minor>"
function python_version {
- local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+ local python_version
+ python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
echo "python${python_version}"
}
@@ -2038,6 +2148,77 @@
fi
}
+# Timing infrastructure - figure out where large blocks of time are
+# used in DevStack
+#
+# The timing infrastructure for DevStack is about collecting buckets
+# of time that are spend in some subtask. For instance, that might be
+# 'apt', 'pip', 'osc', even database migrations. We do this by a pair
+# of functions: time_start / time_stop.
+#
+# These take a single parameter: $name - which specifies the name of
+# the bucket to be accounted against. time_totals function spits out
+# the results.
+#
+# Resolution is only in whole seconds, so should be used for long
+# running activities.
+
+declare -A TOTAL_TIME
+declare -A START_TIME
+
+# time_start $name
+#
+# starts the clock for a timer by name. Errors if that clock is
+# already started.
+function time_start {
+ local name=$1
+ local start_time=${START_TIME[$name]}
+ if [[ -n "$start_time" ]]; then
+ die $LINENO "Trying to start the clock on $name, but it's already been started"
+ fi
+ START_TIME[$name]=$(date +%s)
+}
+
+# time_stop $name
+#
+# stops the clock for a timer by name, and accumulate that time in the
+# global counter for that name. Errors if that clock had not
+# previously been started.
+function time_stop {
+ local name
+ local end_time
+ local elpased_time
+ local total
+ local start_time
+
+ name=$1
+ start_time=${START_TIME[$name]}
+
+ if [[ -z "$start_time" ]]; then
+ die $LINENO "Trying to stop the clock on $name, but it was never started"
+ fi
+ end_time=$(date +%s)
+ elapsed_time=$(($end_time - $start_time))
+ total=${TOTAL_TIME[$name]:-0}
+ # reset the clock so we can start it in the future
+ START_TIME[$name]=""
+ TOTAL_TIME[$name]=$(($total + $elapsed_time))
+}
+
+# time_totals
+#
+# prints out total time
+function time_totals {
+ echo
+ echo "========================"
+ echo "DevStack Components Timed"
+ echo "========================"
+ echo
+ for t in ${!TOTAL_TIME[*]}; do
+ local v=${TOTAL_TIME[$t]}
+ echo "$t - $v secs"
+ done
+}
# Restore xtrace
$XTRACE
diff --git a/inc/ini-config b/inc/ini-config
index 8e7c018..42a66c6 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -15,37 +15,50 @@
# ================
# Append a new option in an ini file without replacing the old value
-# iniadd config-file section option value1 value2 value3 ...
+# iniadd [-sudo] config-file section option value1 value2 value3 ...
function iniadd {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="-sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
shift 3
local values="$(iniget_multiline $file $section $option) $@"
- iniset_multiline $file $section $option $values
+ iniset_multiline $sudo $file $section $option $values
$xtrace
}
# Comment an option in an INI file
-# inicomment config-file section option
+# inicomment [-sudo] config-file section option
function inicomment {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
- sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+ $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
$xtrace
}
# Get an option from an INI file
# iniget config-file section option
function iniget {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -60,7 +73,8 @@
# Get a multiple line option from an INI file
# iniget_multiline config-file section option
function iniget_multiline {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -75,7 +89,8 @@
# Determinate is the given option present in the INI file
# ini_has_option config-file section option
function ini_has_option {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local file=$1
local section=$2
@@ -95,10 +110,16 @@
# in the argument list. Doing that will cause incorrect configuration
# if spaces are used in the config values.
#
-# iniadd_literal config-file section option value
+# iniadd_literal [-sudo] config-file section option value
function iniadd_literal {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
@@ -110,7 +131,7 @@
fi
# Add it
- sed -i -e "/^\[$section\]/ a\\
+ $sudo sed -i -e "/^\[$section\]/ a\\
$option = $value
" "$file"
@@ -118,10 +139,16 @@
}
# Remove an option from an INI file
-# inidelete config-file section option
+# inidelete [-sudo] config-file section option
function inidelete {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
@@ -132,16 +159,23 @@
fi
# Remove old values
- sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+ $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
$xtrace
}
# Set an option in an INI file
-# iniset config-file section option value
+# iniset [-sudo] config-file section option value
+# - if the file does not exist, it is created
function iniset {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
@@ -154,26 +188,33 @@
if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
# Add section at the end
- echo -e "\n[$section]" >>"$file"
+ echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
fi
if ! ini_has_option "$file" "$section" "$option"; then
# Add it
- sed -i -e "/^\[$section\]/ a\\
+ $sudo sed -i -e "/^\[$section\]/ a\\
$option = $value
" "$file"
else
- local sep=$(echo -ne "\x01")
+ local sep
+ sep=$(echo -ne "\x01")
# Replace it
- sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+ $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
$xtrace
}
# Set a multiple line option in an INI file
-# iniset_multiline config-file section option value1 value2 valu3 ...
+# iniset_multiline [-sudo] config-file section option value1 value2 valu3 ...
function iniset_multiline {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
@@ -188,14 +229,14 @@
done
if ! grep -q "^\[$section\]" "$file"; then
# Add section at the end
- echo -e "\n[$section]" >>"$file"
+ echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
else
# Remove old values
- sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+ $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
fi
# Add new ones
for v in $values; do
- sed -i -e "/^\[$section\]/ a\\
+ $sudo sed -i -e "/^\[$section\]/ a\\
$option = $v
" "$file"
done
@@ -205,12 +246,18 @@
# Uncomment an option in an INI file
# iniuncomment config-file section option
function iniuncomment {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
+ local sudo=""
+ if [ $1 == "-sudo" ]; then
+ sudo="sudo "
+ shift
+ fi
local file=$1
local section=$2
local option=$3
- sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+ $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
$xtrace
}
diff --git a/inc/meta-config b/inc/meta-config
index e5f902d..d74db59 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -89,7 +89,8 @@
# note, configfile might be a variable (note the iniset, etc
# created in the mega-awk below is "eval"ed too, so we just leave
# it alone.
- local real_configfile=$(eval echo $configfile)
+ local real_configfile
+ real_configfile=$(eval echo $configfile)
if [ ! -f $real_configfile ]; then
touch $real_configfile
fi
diff --git a/inc/python b/inc/python
index 5c9dc5c..91ceb44 100644
--- a/inc/python
+++ b/inc/python
@@ -38,7 +38,8 @@
# Get the path to the direcotry where python executables are installed.
# get_python_exec_prefix
function get_python_exec_prefix {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
@@ -60,7 +61,8 @@
# pip_install_gr packagename
function pip_install_gr {
local name=$1
- local clean_name=$(get_from_global_requirements $name)
+ local clean_name
+ clean_name=$(get_from_global_requirements $name)
pip_install $clean_name
}
@@ -69,7 +71,8 @@
# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
# pip_install package [package ...]
function pip_install {
- local xtrace=$(set +o | grep xtrace)
+ local xtrace
+ xtrace=$(set +o | grep xtrace)
set +o xtrace
local upgrade=""
local offline=${OFFLINE:-False}
@@ -78,6 +81,8 @@
return
fi
+ time_start "pip_install"
+
PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE)
if [[ "$PIP_UPGRADE" = "True" ]] ; then
upgrade="--upgrade"
@@ -98,7 +103,8 @@
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
local sudo_pip="env"
else
- local cmd_pip=$(get_pip_command)
+ local cmd_pip
+ cmd_pip=$(get_pip_command)
local sudo_pip="sudo -H"
fi
fi
@@ -107,7 +113,8 @@
# Always apply constraints
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
- local pip_version=$(python -c "import pip; \
+ local pip_version
+ pip_version=$(python -c "import pip; \
print(pip.__version__.strip('.')[0])")
if (( pip_version<6 )); then
die $LINENO "Currently installed pip version ${pip_version} does not" \
@@ -124,7 +131,7 @@
$@
# Also install test requirements
- local test_req="$@/test-requirements.txt"
+ local test_req="${!#}/test-requirements.txt"
if [[ -e "$test_req" ]]; then
echo "Installing test-requirements for $test_req"
$sudo_pip \
@@ -135,13 +142,16 @@
$cmd_pip $upgrade \
-r $test_req
fi
+
+ time_stop "pip_install"
}
# get version of a package from global requirements file
# get_from_global_requirements <package>
function get_from_global_requirements {
local package=$1
- local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+ local required_pkg
+ required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
if [[ $required_pkg == "" ]]; then
die $LINENO "Can't find package $package in requirements"
fi
@@ -157,6 +167,28 @@
return $enabled
}
+# determine if a package was installed from git
+function lib_installed_from_git {
+ local name=$1
+ pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git'
+}
+
+# check that everything that's in LIBS_FROM_GIT was actually installed
+# correctly, this helps double check issues with library fat fingering.
+function check_libs_from_git {
+ local lib=""
+ local not_installed=""
+ for lib in $(echo ${LIBS_FROM_GIT} | tr "," " "); do
+ if ! lib_installed_from_git "$lib"; then
+ not_installed+=" $lib"
+ fi
+ done
+ # if anything is not installed, say what it is.
+ if [[ -n "$not_installed" ]]; then
+ die $LINENO "The following LIBS_FROM_GIT were not installed correct: $not_installed"
+ fi
+}
+
# setup a library by name. If we are trying to use the library from
# git, we'll do a git based install, otherwise we'll punt and the
# library should be installed by a requirements pull from another
@@ -198,7 +230,8 @@
# practical ways.
function is_in_projects_txt {
local project_dir=$1
- local project_name=$(basename $project_dir)
+ local project_name
+ project_name=$(basename $project_dir)
grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
}
@@ -217,7 +250,8 @@
if [ -n "$REQUIREMENTS_DIR" ]; then
# Constrain this package to this project directory from here on out.
- local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+ local name
+ name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
$REQUIREMENTS_DIR/upper-constraints.txt -- $name \
"$flags file://$project_dir#egg=$name"
diff --git a/inc/rootwrap b/inc/rootwrap
index f91e557..63ab59a 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -41,7 +41,8 @@
# configure_rootwrap project
function configure_rootwrap {
local project=$1
- local project_uc=$(echo $1|tr a-z A-Z)
+ local project_uc
+ project_uc=$(echo $1|tr a-z A-Z)
local bin_dir="${project_uc}_BIN_DIR"
bin_dir="${!bin_dir}"
local project_dir="${project_uc}_DIR"
@@ -60,7 +61,8 @@
sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
# Set up the rootwrap sudoers
- local tempfile=$(mktemp)
+ local tempfile
+ tempfile=$(mktemp)
# Specify rootwrap.conf as first parameter to rootwrap
rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
diff --git a/lib/apache b/lib/apache
index a8e9bc5..17526c7 100644
--- a/lib/apache
+++ b/lib/apache
@@ -72,11 +72,14 @@
# various differences between Apache 2.2 and 2.4 that warrant special handling.
function get_apache_version {
if is_ubuntu; then
- local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
+ local version_str
+ version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
elif is_fedora; then
- local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
+ local version_str
+ version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
elif is_suse; then
- local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
+ local version_str
+ version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
else
exit_distro_not_supported "cannot determine apache version"
fi
@@ -115,7 +118,8 @@
function apache_site_config_for {
local site=$@
if is_ubuntu; then
- local apache_version=$(get_apache_version)
+ local apache_version
+ apache_version=$(get_apache_version)
if [[ "$apache_version" == "2.2" ]]; then
# Ubuntu 12.04 - Apache 2.2
echo $APACHE_CONF_DIR/${site}
diff --git a/lib/ceilometer b/lib/ceilometer
deleted file mode 100644
index 3df75b7..0000000
--- a/lib/ceilometer
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/bin/bash
-#
-# lib/ceilometer
-# Install and start **Ceilometer** service
-
-# To enable a minimal set of Ceilometer services, add the following to the
-# ``localrc`` section of ``local.conf``:
-#
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
-#
-# To ensure Ceilometer alarming services are enabled also, further add to the
-# localrc section of local.conf:
-#
-# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
-#
-# To enable Ceilometer to collect the IPMI based meters, further add to the
-# localrc section of local.conf:
-#
-# enable_service ceilometer-aipmi
-#
-# NOTE: Currently, there are two ways to get the IPMI based meters in
-# OpenStack. One way is to configure Ironic conductor to report those meters
-# for the nodes managed by Ironic and to have Ceilometer notification
-# agent to collect them. Ironic by default does NOT enable that reporting
-# functionality. So in order to do so, users need to set the option of
-# conductor.send_sensor_data to true in the ironic.conf configuration file
-# for the Ironic conductor service, and also enable the
-# ceilometer-anotification service.
-#
-# The other way is to use Ceilometer ipmi agent only to get the IPMI based
-# meters. To avoid duplicated meters, users need to make sure to set the
-# option of conductor.send_sensor_data to false in the ironic.conf
-# configuration file if the node on which Ceilometer ipmi agent is running
-# is also managed by Ironic.
-#
-# Several variables set in the localrc section adjust common behaviors
-# of Ceilometer (see within for additional settings):
-#
-# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi.
-# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600.
-# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es')
-# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz.
-# CEILOMETER_EVENTS: Enable event collection
-
-# Dependencies:
-#
-# - functions
-# - OS_AUTH_URL for auth in api
-# - DEST set to the destination directory
-# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
-# - STACK_USER service user
-
-# stack.sh
-# ---------
-# - install_ceilometer
-# - configure_ceilometer
-# - init_ceilometer
-# - start_ceilometer
-# - stop_ceilometer
-# - cleanup_ceilometer
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient
-GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
-
-CEILOMETER_DIR=$DEST/ceilometer
-CEILOMETER_CONF_DIR=/etc/ceilometer
-CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
-CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
-CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
-CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
-
-# Support potential entry-points console scripts in VENV or not
-if [[ ${USE_VENV} = True ]]; then
- PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv
- CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin
-else
- CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Set up database backend
-CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
-
-# Ceilometer connection info.
-CEILOMETER_SERVICE_PROTOCOL=http
-CEILOMETER_SERVICE_HOST=$SERVICE_HOST
-CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
-CEILOMETER_USE_MOD_WSGI=$(trueorfalse False CEILOMETER_USE_MOD_WSGI)
-
-# To enable OSprofiler change value of this variable to "notifications,profiler"
-CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
-CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True}
-
-CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-}
-CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-}
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,ceilometer
-
-
-# Functions
-# ---------
-
-# Test if any Ceilometer services are enabled
-# is_ceilometer_enabled
-function is_ceilometer_enabled {
- [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0
- return 1
-}
-
-# create_ceilometer_accounts() - Set up common required Ceilometer accounts
-#
-# Project User Roles
-# ------------------------------------------------------------------
-# SERVICE_TENANT_NAME ceilometer admin
-# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled)
-function create_ceilometer_accounts {
-
- # Ceilometer
- if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
-
- create_service_user "ceilometer" "admin"
-
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service"
- get_or_create_endpoint "metering" \
- "$REGION_NAME" \
- "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
- "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
- "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
- fi
- if is_service_enabled swift; then
- # Ceilometer needs ResellerAdmin role to access Swift account stats.
- get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME
- fi
- fi
-}
-
-
-# _cleanup_keystone_apache_wsgi() - Remove WSGI files, disable and remove Apache vhost file
-function _cleanup_ceilometer_apache_wsgi {
- sudo rm -f $CEILOMETER_WSGI_DIR/*
- sudo rm -f $(apache_site_config_for ceilometer)
-}
-
-# cleanup_ceilometer() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_ceilometer {
- if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- mongo ceilometer --eval "db.dropDatabase();"
- elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
- curl -XDELETE "localhost:9200/events_*"
- fi
- if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- _cleanup_ceilometer_apache_wsgi
- fi
-}
-
-function _config_ceilometer_apache_wsgi {
- sudo mkdir -p $CEILOMETER_WSGI_DIR
-
- local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
- local apache_version=$(get_apache_version)
- local venv_path=""
-
- # Copy proxy vhost and wsgi file
- sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
-
- if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages"
- fi
-
- sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf
- sudo sed -e "
- s|%PORT%|$CEILOMETER_SERVICE_PORT|g;
- s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g;
- s|%USER%|$STACK_USER|g;
- s|%VIRTUALENV%|$venv_path|g
- " -i $ceilometer_apache_conf
-}
-
-# configure_ceilometer() - Set config files, create data dirs, etc
-function configure_ceilometer {
- sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR $CEILOMETER_API_LOG_DIR
-
- iniset_rpc_backend ceilometer $CEILOMETER_CONF
-
- iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS"
- iniset $CEILOMETER_CONF DEFAULT verbose True
- iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
-
- if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then
- iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL
- iniset $CEILOMETER_CONF compute workload_partitioning True
- fi
-
- # Install the policy file for the API server
- cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
- iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json
-
- cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR
- cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR
-
- if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then
- sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
- fi
-
- # The compute and central agents need these credentials in order to
- # call out to other services' public APIs.
- # The alarm evaluator needs these options to call ceilometer APIs
- iniset $CEILOMETER_CONF service_credentials os_username ceilometer
- iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD
- iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME
- iniset $CEILOMETER_CONF service_credentials os_region_name $REGION_NAME
- iniset $CEILOMETER_CONF service_credentials os_auth_url $KEYSTONE_SERVICE_URI/v2.0
-
- configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
-
- iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS
-
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
- iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
- iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer)
- iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
- iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
- elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then
- # es is only supported for events. we will use sql for alarming/metering.
- iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer)
- iniset $CEILOMETER_CONF database event_connection es://localhost:9200
- iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
- iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
- ${TOP_DIR}/pkg/elasticsearch.sh start
- cleanup_ceilometer
- elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer
- iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer
- iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer
- configure_mongodb
- cleanup_ceilometer
- else
- die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND"
- fi
-
- if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
- iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere
- iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP"
- iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER"
- iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
- fi
-
- if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- iniset $CEILOMETER_CONF api pecan_debug "False"
- _config_ceilometer_apache_wsgi
- fi
-
- if is_service_enabled ceilometer-aipmi; then
- # Configure rootwrap for the ipmi agent
- configure_rootwrap ceilometer
- fi
-}
-
-function configure_mongodb {
- # Server package is the same on all
- local packages=mongodb-server
-
- if is_fedora; then
- # mongodb client
- packages="${packages} mongodb"
- fi
-
- install_package ${packages}
-
- if is_fedora; then
- # Ensure smallfiles is selected to minimize freespace requirements
- sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
-
- restart_service mongod
- fi
-
- # Give mongodb time to start-up
- sleep 5
-}
-
-# init_ceilometer() - Initialize etc.
-function init_ceilometer {
- # Create cache dir
- sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
- rm -f $CEILOMETER_AUTH_CACHE_DIR/*
-
- if is_service_enabled mysql postgresql; then
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
- recreate_database ceilometer
- $CEILOMETER_BIN_DIR/ceilometer-dbsync
- fi
- fi
-}
-
-# install_redis() - Install the redis server.
-function install_redis {
- if is_ubuntu; then
- install_package redis-server
- restart_service redis-server
- else
- # This will fail (correctly) where a redis package is unavailable
- install_package redis
- restart_service redis
- fi
-}
-
-# install_ceilometer() - Collect source and prepare
-function install_ceilometer {
- git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH
- setup_develop $CEILOMETER_DIR
-
- if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then
- install_package memcached
- elif echo $CEILOMETER_COORDINATION_URL | grep -q '^redis:'; then
- install_redis
- fi
-
- if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- pip_install_gr pymongo
- fi
-
- # Only install virt drivers if we're running nova compute
- if is_service_enabled n-cpu ; then
- if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
- pip_install_gr libvirt-python
- fi
-
- if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
- pip_install_gr oslo.vmware
- fi
- fi
-
- if [ "$CEILOMETER_BACKEND" = 'es' ] ; then
- ${TOP_DIR}/pkg/elasticsearch.sh download
- ${TOP_DIR}/pkg/elasticsearch.sh install
- fi
-}
-
-# install_ceilometerclient() - Collect source and prepare
-function install_ceilometerclient {
- if use_library_from_git "python-ceilometerclient"; then
- git_clone_by_name "python-ceilometerclient"
- setup_dev_lib "python-ceilometerclient"
- sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion
- fi
-}
-
-# start_ceilometer() - Start running processes, including screen
-function start_ceilometer {
- run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
- run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF"
- run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
- run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-agent-ipmi --config-file $CEILOMETER_CONF"
-
- if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
- run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
- else
- enable_apache_site ceilometer
- restart_apache_server
- tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log
- tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log
- fi
-
-
- # Start the compute agent last to allow time for the collector to
- # fully wake up and connect to the message bus. See bug #1355809
- if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
- run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP
- fi
- if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
- run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF"
- fi
-
- # Only die on API if it was actually intended to be turned on
- if is_service_enabled ceilometer-api; then
- echo "Waiting for ceilometer-api to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then
- die $LINENO "ceilometer-api did not start"
- fi
- fi
-
- run_process ceilometer-alarm-notifier "$CEILOMETER_BIN_DIR/ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
- run_process ceilometer-alarm-evaluator "$CEILOMETER_BIN_DIR/ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
-}
-
-# stop_ceilometer() - Stop running processes
-function stop_ceilometer {
- if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- disable_apache_site ceilometer
- restart_apache_server
- fi
- # Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
- stop_process $serv
- done
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/ceph b/lib/ceph
index 8e34aa4..29d2aca 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -83,7 +83,8 @@
# ------------
function get_ceph_version {
- local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
+ local ceph_version_str
+ ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
echo $ceph_version_str
}
@@ -106,7 +107,8 @@
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function undefine_virsh_secret {
if is_service_enabled cinder || is_service_enabled nova; then
- local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+ local virsh_uuid
+ virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
fi
}
@@ -219,7 +221,8 @@
done
# pools data and metadata were removed in the Giant release so depending on the version we apply different commands
- local ceph_version=$(get_ceph_version)
+ local ceph_version
+ ceph_version=$(get_ceph_version)
# change pool replica size according to the CEPH_REPLICAS set by the user
if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
diff --git a/lib/cinder b/lib/cinder
index 2ed02e8..ed9a103 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -93,7 +93,7 @@
if [[ $CINDER_SECURE_DELETE == "False" ]]; then
CINDER_VOLUME_CLEAR_DEFAULT="none"
fi
- DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n"
+ deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE."
fi
CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
@@ -108,9 +108,8 @@
CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,cinder
-
+# Toggle for deploying Cinder under HTTPD + mod_wsgi
+CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False}
# Source the enabled backends
if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -140,13 +139,19 @@
return 1
}
+# _cinder_cleanup_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cinder_cleanup_apache_wsgi {
+ sudo rm -f $(apache_site_config_for osapi-volume)
+}
+
# cleanup_cinder() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_cinder {
# ensure the volume group is cleared up because fails might
# leave dead volumes in the group
if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
- local targets=$(sudo tgtadm --op show --mode target)
+ local targets
+ targets=$(sudo tgtadm --op show --mode target)
if [ $? -ne 0 ]; then
# If tgt driver isn't running this won't work obviously
# So check the response and restart if need be
@@ -186,6 +191,44 @@
fi
done
fi
+
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ _cinder_cleanup_apache_wsgi
+ fi
+}
+
+# _cinder_config_apache_wsgi() - Set WSGI config files
+function _cinder_config_apache_wsgi {
+ local cinder_apache_conf
+ cinder_apache_conf=$(apache_site_config_for osapi-volume)
+ local cinder_ssl=""
+ local cinder_certfile=""
+ local cinder_keyfile=""
+ local cinder_api_port=$CINDER_SERVICE_PORT
+ local venv_path=""
+
+ if is_ssl_enabled_service c-api; then
+ cinder_ssl="SSLEngine On"
+ cinder_certfile="SSLCertificateFile $CINDER_SSL_CERT"
+ cinder_keyfile="SSLCertificateKeyFile $CINDER_SSL_KEY"
+ fi
+ if [[ ${USE_VENV} = True ]]; then
+ venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages"
+ fi
+
+ # copy proxy vhost file
+ sudo cp $FILES/apache-cinder-api.template $cinder_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$cinder_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%APIWORKERS%|$API_WORKERS|g
+ s|%CINDER_BIN_DIR%|$CINDER_BIN_DIR|g;
+ s|%SSLENGINE%|$cinder_ssl|g;
+ s|%SSLCERTFILE%|$cinder_certfile|g;
+ s|%SSLKEYFILE%|$cinder_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $cinder_apache_conf
}
# configure_cinder() - Set config files, create data dirs, etc
@@ -279,13 +322,17 @@
fi
# Format logging
- if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id"
else
# Set req-id, project-name and resource in log format
iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s"
fi
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ _cinder_config_apache_wsgi
+ fi
+
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
configure_cinder_driver
fi
@@ -402,6 +449,13 @@
install_package tgt
fi
fi
+
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ install_apache_wsgi
+ if is_ssl_enabled_service "c-api"; then
+ enable_mod_ssl
+ fi
+ fi
}
# install_cinderclient() - Collect source and prepare
@@ -449,10 +503,16 @@
fi
fi
- run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
- echo "Waiting for Cinder API to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then
- die $LINENO "c-api did not start"
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ enable_apache_site osapi-volume
+ restart_apache_server
+ tail_log c-api /var/log/$APACHE_NAME/c-api.log
+ else
+ run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
+ echo "Waiting for Cinder API to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then
+ die $LINENO "c-api did not start"
+ fi
fi
run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
@@ -471,9 +531,16 @@
# stop_cinder() - Stop running processes
function stop_cinder {
+ if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then
+ disable_apache_site osapi-volume
+ restart_apache_server
+ else
+ stop_process c-api
+ fi
+
# Kill the cinder screen windows
local serv
- for serv in c-api c-bak c-sch c-vol; do
+ for serv in c-bak c-sch c-vol; do
stop_process $serv
done
}
diff --git a/lib/database b/lib/database
index 5bbbe31..13740b9 100644
--- a/lib/database
+++ b/lib/database
@@ -101,7 +101,7 @@
# a multi-node DevStack installation.
# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services
- BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
+ BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
return 0
}
@@ -135,14 +135,6 @@
database_connection_url_$DATABASE_TYPE $db
}
-function get_database_type {
- if [[ -n "${SQLALCHEMY_DATABASE_DRIVER}" ]]; then
- echo "${DATABASE_TYPE}+${SQLALCHEMY_DATABASE_DRIVER}"
- else
- echo "${DATABASE_TYPE}"
- fi
-}
-
# Restore xtrace
$XTRACE
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 7ae9a93..c2ab32e 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -12,12 +12,6 @@
set +o xtrace
MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL}
-# Force over to pymysql driver by default if we are using it.
-if is_service_enabled mysql; then
- if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then
- SQLALCHEMY_DATABASE_DRIVER=${SQLALCHEMY_DATABASE_DRIVER:-"pymysql"}
- fi
-fi
register_database mysql
@@ -30,6 +24,14 @@
# Functions
# ---------
+function get_database_type_mysql {
+ if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then
+ echo mysql+pymysql
+ else
+ echo mysql
+ fi
+}
+
# Get rid of everything enough to cleanly change database backends
function cleanup_database_mysql {
stop_service $MYSQL
@@ -92,14 +94,12 @@
# Change bind-address from localhost (127.0.0.1) to any (::) and
# set default db type to InnoDB
- sudo bash -c "source $TOP_DIR/functions && \
- iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \
- iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \
- iniset $my_conf mysqld default-storage-engine InnoDB && \
- iniset $my_conf mysqld max_connections 1024 && \
- iniset $my_conf mysqld query_cache_type OFF && \
- iniset $my_conf mysqld query_cache_size 0"
-
+ iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS"
+ iniset -sudo $my_conf mysqld sql_mode STRICT_ALL_TABLES
+ iniset -sudo $my_conf mysqld default-storage-engine InnoDB
+ iniset -sudo $my_conf mysqld max_connections 1024
+ iniset -sudo $my_conf mysqld query_cache_type OFF
+ iniset -sudo $my_conf mysqld query_cache_size 0
if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
echo_summary "Enabling MySQL query logging"
@@ -115,12 +115,10 @@
# Turn on slow query log, log all queries (any query taking longer than
# 0 seconds) and log all non-indexed queries
- sudo bash -c "source $TOP_DIR/functions && \
- iniset $my_conf mysqld slow-query-log 1 && \
- iniset $my_conf mysqld slow-query-log-file $slow_log && \
- iniset $my_conf mysqld long-query-time 0 && \
- iniset $my_conf mysqld log-queries-not-using-indexes 1"
-
+ iniset -sudo $my_conf mysqld slow-query-log 1
+ iniset -sudo $my_conf mysqld slow-query-log-file $slow_log
+ iniset -sudo $my_conf mysqld long-query-time 0
+ iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
fi
restart_service $mysql
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index e087a1e..78c7bed 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -21,6 +21,10 @@
# Functions
# ---------
+function get_database_type_postgresql {
+ echo postgresql
+}
+
# Get rid of everything enough to cleanly change database backends
function cleanup_database_postgresql {
stop_service postgresql
diff --git a/lib/dstat b/lib/dstat
index f11bfa5..fe4790b 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -19,8 +19,7 @@
# start_dstat() - Start running processes, including screen
function start_dstat {
# A better kind of sysstat, with the top process per time slice
- DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv"
- run_process dstat "dstat $DSTAT_OPTS"
+ run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR"
# To enable peakmem_tracker add:
# enable_service peakmem_tracker
diff --git a/lib/glance b/lib/glance
index b1b0f32..2eb93a4 100644
--- a/lib/glance
+++ b/lib/glance
@@ -75,9 +75,6 @@
GLANCE_SEARCH_PORT_INT=${GLANCE_SEARCH_PORT_INT:-19393}
GLANCE_SEARCH_HOSTPORT=${GLANCE_SEARCH_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SEARCH_PORT}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,glance
-
# Functions
# ---------
@@ -109,7 +106,8 @@
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
- local dburl=`database_connection_url glance`
+ local dburl
+ dburl=`database_connection_url glance`
iniset $GLANCE_REGISTRY_CONF database connection $dburl
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
@@ -268,7 +266,8 @@
# required for swift access
if is_service_enabled s-proxy; then
- local glance_swift_user=$(get_or_create_user "glance-swift" \
+ local glance_swift_user
+ glance_swift_user=$(get_or_create_user "glance-swift" \
"$SERVICE_PASSWORD" "default" "glance-swift@example.com")
get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
fi
diff --git a/lib/heat b/lib/heat
index cedddd2..f3f0548 100644
--- a/lib/heat
+++ b/lib/heat
@@ -16,6 +16,7 @@
# - install_heat
# - configure_heatclient
# - configure_heat
+# - _config_heat_apache_wsgi
# - init_heat
# - start_heat
# - stop_heat
@@ -32,6 +33,9 @@
# set up default directories
GITDIR["python-heatclient"]=$DEST/python-heatclient
+# Toggle for deploying Heat-API under HTTPD + mod_wsgi
+HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False}
+
HEAT_DIR=$DEST/heat
HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
@@ -53,21 +57,19 @@
HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
HEAT_API_PORT=${HEAT_API_PORT:-8004}
+# Support entry points installation of console scripts
+HEAT_BIN_DIR=$(get_python_exec_prefix)
# other default options
if [[ "$HEAT_STANDALONE" = "True" ]]; then
# for standalone, use defaults which require no service user
- HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN`
+ HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN)
HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password}
else
- HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+ HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
fi
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,heat
-
-
# Functions
# ---------
@@ -119,13 +121,17 @@
# logging
iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG
- if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ] ; then
# Add color to logging output
setup_colorized_logging $HEAT_CONF DEFAULT tenant user
fi
iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH
+ if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+ _config_heat_apache_wsgi
+ fi
+
# NOTE(jamielennox): heat re-uses specific values from the
# keystone_authtoken middleware group and so currently fails when using the
# auth plugin setup. This should be fixed in heat. Heat is also the only
@@ -190,7 +196,7 @@
# (re)create heat database
recreate_database heat
- $HEAT_DIR/bin/heat-manage db_sync
+ $HEAT_BIN_DIR/heat-manage db_sync
create_heat_cache_dir
}
@@ -213,6 +219,9 @@
function install_heat {
git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
setup_develop $HEAT_DIR
+ if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+ install_apache_wsgi
+ fi
}
# install_heat_other() - Collect source and prepare
@@ -227,21 +236,107 @@
# start_heat() - Start running processes, including screen
function start_heat {
- run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF"
- run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF"
- run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF"
- run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
+ run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF"
+
+ # If the site is not enabled then we are in a grenade scenario
+ local enabled_site_file
+ enabled_site_file=$(apache_site_config_for heat-api)
+ if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+ enable_apache_site heat-api
+ enable_apache_site heat-api-cfn
+ enable_apache_site heat-api-cloudwatch
+ restart_apache_server
+ tail_log heat-api /var/log/$APACHE_NAME/heat-api.log
+ tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log
+ tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log
+ else
+ run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
+ run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
+ run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
+ fi
}
# stop_heat() - Stop running processes
function stop_heat {
# Kill the screen windows
- local serv
- for serv in h-eng h-api h-api-cfn h-api-cw; do
- stop_process $serv
- done
+ stop_process h-eng
+
+ if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+ disable_apache_site heat-api
+ disable_apache_site heat-api-cfn
+ disable_apache_site heat-api-cloudwatch
+ restart_apache_server
+ else
+ local serv
+ for serv in h-api h-api-cfn h-api-cw; do
+ stop_process $serv
+ done
+ fi
+
}
+# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_heat_apache_wsgi {
+ sudo rm -f $(apache_site_config_for heat-api)
+ sudo rm -f $(apache_site_config_for heat-api-cfn)
+ sudo rm -f $(apache_site_config_for heat-api-cloudwatch)
+}
+
+# _config_heat_apache_wsgi() - Set WSGI config files of Heat
+function _config_heat_apache_wsgi {
+
+ local heat_apache_conf
+ heat_apache_conf=$(apache_site_config_for heat-api)
+ local heat_cfn_apache_conf
+ heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn)
+ local heat_cloudwatch_apache_conf
+ heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch)
+ local heat_ssl=""
+ local heat_certfile=""
+ local heat_keyfile=""
+ local heat_api_port=$HEAT_API_PORT
+ local heat_cfn_api_port=$HEAT_API_CFN_PORT
+ local heat_cw_api_port=$HEAT_API_CW_PORT
+ local venv_path=""
+
+ sudo cp $FILES/apache-heat-api.template $heat_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$heat_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+ s|%SSLENGINE%|$heat_ssl|g;
+ s|%SSLCERTFILE%|$heat_certfile|g;
+ s|%SSLKEYFILE%|$heat_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $heat_apache_conf
+
+ sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$heat_cfn_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+ s|%SSLENGINE%|$heat_ssl|g;
+ s|%SSLCERTFILE%|$heat_certfile|g;
+ s|%SSLKEYFILE%|$heat_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $heat_cfn_apache_conf
+
+ sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$heat_cw_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+ s|%SSLENGINE%|$heat_ssl|g;
+ s|%SSLCERTFILE%|$heat_certfile|g;
+ s|%SSLKEYFILE%|$heat_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $heat_cloudwatch_apache_conf
+}
+
+
# create_heat_accounts() - Set up common required heat accounts
function create_heat_accounts {
if [[ "$HEAT_STANDALONE" != "True" ]]; then
@@ -323,7 +418,8 @@
echo "</body></html>" >> $HEAT_PIP_REPO/index.html
- local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+ local heat_pip_repo_apache_conf
+ heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
sudo sed -e "
diff --git a/lib/horizon b/lib/horizon
index 9fe0aa8..6ecd755 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -35,10 +35,6 @@
# The example file in Horizon repo is used by default.
HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,horizon
-
-
# Functions
# ---------
@@ -53,7 +49,8 @@
sed -e "/^$option/d" -i $local_settings
echo -e "\n$option=$value" >> $file
elif grep -q "^$section" $file; then
- local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+ local line
+ line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
if [ -n "$line" ]; then
sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
else
@@ -72,7 +69,8 @@
# cleanup_horizon() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_horizon {
- local horizon_conf=$(apache_site_config_for horizon)
+ local horizon_conf
+ horizon_conf=$(apache_site_config_for horizon)
sudo rm -f $horizon_conf
}
@@ -116,7 +114,8 @@
# Create an empty directory that apache uses as docroot
sudo mkdir -p $HORIZON_DIR/.blackhole
- local horizon_conf=$(apache_site_config_for horizon)
+ local horizon_conf
+ horizon_conf=$(apache_site_config_for horizon)
# Configure apache to run horizon
sudo sh -c "sed -e \"
diff --git a/lib/infra b/lib/infra
index 89397de..ab32efe 100644
--- a/lib/infra
+++ b/lib/infra
@@ -41,7 +41,7 @@
# Install pbr
if use_library_from_git "pbr"; then
git_clone_by_name "pbr"
- setup_lib "pbr"
+ setup_dev_lib "pbr"
else
# Always upgrade pbr to latest version as we may have pulled it
# in via system packages.
diff --git a/lib/ironic b/lib/ironic
index b3ad586..74e2f93 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -31,6 +31,7 @@
# Set up default directories
GITDIR["python-ironicclient"]=$DEST/python-ironicclient
+GITDIR["ironic-lib"]=$DEST/ironic-lib
IRONIC_DIR=$DEST/ironic
IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent
@@ -114,9 +115,6 @@
IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385}
IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,ironic
-
# Enable iPXE
IRONIC_IPXE_ENABLED=$(trueorfalse False IRONIC_IPXE_ENABLED)
IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot}
@@ -191,6 +189,12 @@
die $LINENO "$srv should be enabled for Ironic."
fi
done
+
+ if use_library_from_git "ironic-lib"; then
+ git_clone_by_name "ironic-lib"
+ setup_dev_lib "ironic-lib"
+ fi
+
git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
setup_develop $IRONIC_DIR
@@ -221,7 +225,8 @@
# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
function _config_ironic_apache_wsgi {
- local ironic_apache_conf=$(apache_site_config_for ironic)
+ local ironic_apache_conf
+ ironic_apache_conf=$(apache_site_config_for ironic)
sudo cp $FILES/apache-ironic.template $ironic_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
@@ -321,11 +326,13 @@
function configure_ironic_conductor {
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
- local ironic_rootwrap=$(get_rootwrap_location ironic)
+ local ironic_rootwrap
+ ironic_rootwrap=$(get_rootwrap_location ironic)
local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for ironic
- local tempfile=`mktemp`
+ local tempfile
+ tempfile=`mktemp`
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
chmod 0440 $tempfile
sudo chown root:root $tempfile
@@ -366,7 +373,8 @@
fi
iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
iniset $IRONIC_CONF_FILE glance swift_api_version v1
- local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
+ local tenant_id
+ tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
iniset $IRONIC_CONF_FILE glance swift_container glance
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
@@ -375,7 +383,8 @@
fi
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
- local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
+ local pxebin
+ pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
@@ -441,7 +450,8 @@
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
function _ironic_bm_vm_names {
local idx
- local num_vms=$(($IRONIC_VM_COUNT - 1))
+ local num_vms
+ num_vms=$(($IRONIC_VM_COUNT - 1))
for idx in $(seq 0 $num_vms); do
echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}"
done
@@ -494,22 +504,27 @@
}
function create_ovs_taps {
- local ironic_net_id=$(neutron net-list | grep private | get_field 1)
+ local ironic_net_id
+ ironic_net_id=$(neutron net-list | grep private | get_field 1)
# Work around: No netns exists on host until a Neutron port is created. We
# need to create one in Neutron to know what netns to tap into prior to the
# first node booting.
- local port_id=$(neutron port-create private | grep " id " | get_field 2)
+ local port_id
+ port_id=$(neutron port-create private | grep " id " | get_field 2)
# intentional sleep to make sure the tag has been set to port
sleep 10
if [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -b2-)
+ local tapdev
+ tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
else
- local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -b2-)
+ local tapdev
+ tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
fi
- local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+ local tag_id
+ tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
# make sure veth pair is not existing, otherwise delete its links
sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
@@ -555,6 +570,7 @@
# timing out.
local resource=$1
local expected_count=$2
+ local i
echo_summary "Waiting 2 minutes for Nova resource tracker to pick up $resource >= $expected_count"
for i in $(seq 1 120); do
if [ $(nova hypervisor-stats | grep " $resource " | get_field 2) -ge $expected_count ]; then
@@ -566,7 +582,8 @@
}
function enroll_nodes {
- local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
+ local chassis_id
+ chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
if ! is_ironic_hardware; then
local ironic_node_cpu=$IRONIC_VM_SPECS_CPU
@@ -598,10 +615,14 @@
if ! is_ironic_hardware; then
local mac_address=$hardware_info
elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
- local ipmi_address=$(echo $hardware_info |awk '{print $1}')
- local mac_address=$(echo $hardware_info |awk '{print $2}')
- local ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
- local ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
+ local ipmi_address
+ ipmi_address=$(echo $hardware_info |awk '{print $1}')
+ local mac_address
+ mac_address=$(echo $hardware_info |awk '{print $2}')
+ local ironic_ipmi_username
+ ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
+ local ironic_ipmi_passwd
+ ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
# Currently we require all hardware platform have same CPU/RAM/DISK info
# in future, this can be enhanced to support different type, and then
# we create the bare metal flavor with minimum value
@@ -613,9 +634,13 @@
# First node created will be used for testing in ironic w/o glance
# scenario, so we need to know its UUID.
- local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
+ local standalone_node_uuid=""
+ if [ $total_nodes -eq 0 ]; then
+ standalone_node_uuid="--uuid $IRONIC_NODE_UUID"
+ fi
- local node_id=$(ironic node-create $standalone_node_uuid\
+ local node_id
+ node_id=$(ironic node-create $standalone_node_uuid\
--chassis_uuid $chassis_id \
--driver $IRONIC_DEPLOY_DRIVER \
--name node-$total_nodes \
@@ -636,7 +661,8 @@
# NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
# bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the
# bug is fixed in Juno.
- local adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
+ local adjusted_disk
+ adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu
nova flavor-key baremetal set "cpu_arch"="x86_64"
@@ -767,7 +793,8 @@
fi
fi
- local token=$(openstack token issue -c id -f value)
+ local token
+ token=$(openstack token issue -c id -f value)
die_if_not_set $LINENO token "Keystone fail to get token"
# load them into glance
@@ -805,7 +832,8 @@
function cleanup_baremetal_basic_ops {
rm -f $IRONIC_VM_MACS_CSV_FILE
if [ -f $IRONIC_KEY_FILE ]; then
- local key=$(cat $IRONIC_KEY_FILE.pub)
+ local key
+ key=$(cat $IRONIC_KEY_FILE.pub)
# remove public key from authorized_keys
grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
diff --git a/lib/keystone b/lib/keystone
index 31d5448..5a2afbf 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -51,11 +51,6 @@
KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
-if is_suse; then
- KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/srv/www/htdocs/keystone}
-else
- KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone}
-fi
# Set up additional extensions, such as oauth1, federation
# Example of KEYSTONE_EXTENSIONS=oauth1,federation
@@ -132,15 +127,13 @@
# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
function _cleanup_keystone_apache_wsgi {
- sudo rm -f $KEYSTONE_WSGI_DIR/*
sudo rm -f $(apache_site_config_for keystone)
}
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
function _config_keystone_apache_wsgi {
- sudo mkdir -p $KEYSTONE_WSGI_DIR
-
- local keystone_apache_conf=$(apache_site_config_for keystone)
+ local keystone_apache_conf
+ keystone_apache_conf=$(apache_site_config_for keystone)
local keystone_ssl=""
local keystone_certfile=""
local keystone_keyfile=""
@@ -161,22 +154,17 @@
venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
fi
- # copy proxy vhost and wsgi file
- sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main
- sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/admin
-
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
s|%PUBLICPORT%|$keystone_service_port|g;
s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
- s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g;
- s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g;
s|%SSLENGINE%|$keystone_ssl|g;
s|%SSLCERTFILE%|$keystone_certfile|g;
s|%SSLKEYFILE%|$keystone_keyfile|g;
s|%USER%|$STACK_USER|g;
s|%VIRTUALENV%|$venv_path|g
+ s|%KEYSTONE_BIN%|$KEYSTONE_BIN_DIR|g
" -i $keystone_apache_conf
}
@@ -353,16 +341,19 @@
# Group Users Roles Tenant
# ------------------------------------------------------------------
# admins admin admin admin
-# nonadmin demo Member, anotherrole demo
+# nonadmins demo Member, anotherrole demo
# Migrated from keystone_data.sh
function create_keystone_accounts {
# admin
- local admin_tenant=$(get_or_create_project "admin" default)
- local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
- local admin_role=$(get_or_create_role "admin")
+ local admin_tenant
+ admin_tenant=$(get_or_create_project "admin" default)
+ local admin_user
+ admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
+ local admin_role
+ admin_role=$(get_or_create_role "admin")
get_or_add_user_project_role $admin_role $admin_user $admin_tenant
# Create service project/role
@@ -378,18 +369,23 @@
get_or_create_role ResellerAdmin
# The Member role is used by Horizon and Swift so we need to keep it:
- local member_role=$(get_or_create_role "Member")
+ local member_role
+ member_role=$(get_or_create_role "Member")
# another_role demonstrates that an arbitrary role may be created and used
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
- local another_role=$(get_or_create_role "anotherrole")
+ local another_role
+ another_role=$(get_or_create_role "anotherrole")
# invisible tenant - admin can't see this one
- local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
+ local invis_tenant
+ invis_tenant=$(get_or_create_project "invisible_to_admin" default)
# demo
- local demo_tenant=$(get_or_create_project "demo" default)
- local demo_user=$(get_or_create_user "demo" \
+ local demo_tenant
+ demo_tenant=$(get_or_create_project "demo" default)
+ local demo_user
+ demo_user=$(get_or_create_user "demo" \
"$ADMIN_PASSWORD" "default" "demo@example.com")
get_or_add_user_project_role $member_role $demo_user $demo_tenant
@@ -397,9 +393,11 @@
get_or_add_user_project_role $another_role $demo_user $demo_tenant
get_or_add_user_project_role $member_role $demo_user $invis_tenant
- local admin_group=$(get_or_create_group "admins" \
+ local admin_group
+ admin_group=$(get_or_create_group "admins" \
"default" "openstack admin group")
- local non_admin_group=$(get_or_create_group "nonadmins" \
+ local non_admin_group
+ non_admin_group=$(get_or_create_group "nonadmins" \
"default" "non-admin group")
get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
@@ -428,7 +426,8 @@
function create_service_user {
local role=${2:-service}
- local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
+ local user
+ user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
}
diff --git a/lib/ldap b/lib/ldap
index d2dbc3b..0414fea 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -82,7 +82,8 @@
function init_ldap {
local keystone_ldif
- local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+ local tmp_ldap_dir
+ tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
# Remove data but not schemas
clear_ldap_state
@@ -113,7 +114,8 @@
echo "Installing LDAP inside function"
echo "os_VENDOR is $os_VENDOR"
- local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+ local tmp_ldap_dir
+ tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
printf "installing OpenLDAP"
if is_ubuntu; then
@@ -129,7 +131,8 @@
fi
echo "LDAP_PASSWORD is $LDAP_PASSWORD"
- local slappass=$(slappasswd -s $LDAP_PASSWORD)
+ local slappass
+ slappass=$(slappasswd -s $LDAP_PASSWORD)
printf "LDAP secret is $slappass\n"
# Create manager.ldif and add to olcdb
diff --git a/lib/lvm b/lib/lvm
index 8afd543..468a99a 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -56,7 +56,8 @@
# If the backing physical device is a loop device, it was probably setup by DevStack
if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
- local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
+ local vg_dev
+ vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
sudo losetup -d $vg_dev
rm -f $backing_file
fi
@@ -89,7 +90,8 @@
if ! sudo vgs $vg; then
# Only create if the file doesn't already exists
[[ -f $backing_file ]] || truncate -s $size $backing_file
- local vg_dev=`sudo losetup -f --show $backing_file`
+ local vg_dev
+ vg_dev=`sudo losetup -f --show $backing_file`
# Only create volume group if it doesn't already exist
if ! sudo vgs $vg; then
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index d0eb0c0..4e51425 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -158,8 +158,6 @@
Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
# The name of the default q-l3 router
Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
-# nova vif driver that all plugins should use
-NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
@@ -328,7 +326,9 @@
# ---------------------------------
# Please refer to ``lib/neutron_plugins/README.md`` for details.
-source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
+ source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+fi
# Agent loadbalancer service plugin functions
# -------------------------------------------
@@ -358,10 +358,6 @@
Q_USE_SECGROUP=False
fi
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,neutron
-
-
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
@@ -472,19 +468,13 @@
function create_nova_conf_neutron {
iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API"
-
- if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
- iniset $NOVA_CONF neutron auth_plugin "v3password"
- iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
- iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
- iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
- iniset $NOVA_CONF neutron user_domain_name "default"
- else
- iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME"
- iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD"
- iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME"
- fi
+ iniset $NOVA_CONF neutron auth_plugin "v3password"
+ iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+ iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
+ iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
+ iniset $NOVA_CONF neutron user_domain_name "Default"
+ iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME"
+ iniset $NOVA_CONF neutron project_domain_name "Default"
iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $NOVA_CONF neutron region_name "$REGION_NAME"
iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
@@ -495,10 +485,9 @@
iniset $NOVA_CONF DEFAULT security_group_api neutron
fi
- # set NOVA_VIF_DRIVER and optionally set options in nova_conf
+ # optionally set options in nova_conf
neutron_plugin_create_nova_conf
- iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER"
iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
if is_service_enabled q-meta; then
iniset $NOVA_CONF neutron service_metadata_proxy "True"
@@ -619,16 +608,6 @@
recreate_database $Q_DB_NAME
# Run Neutron db migrations
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
- for svc in fwaas lbaas vpnaas; do
- if [ "$svc" = "vpnaas" ]; then
- q_svc="q-vpn"
- else
- q_svc="q-$svc"
- fi
- if is_service_enabled $q_svc; then
- $NEUTRON_BIN_DIR/neutron-db-manage --service $svc --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
- fi
- done
}
# install_neutron() - Collect source and prepare
@@ -702,7 +681,7 @@
service_protocol="http"
fi
# Start the Neutron service
- run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+ run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
echo "Waiting for Neutron to start..."
if is_ssl_enabled_service "neutron"; then
ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
@@ -720,7 +699,7 @@
# Control of the l2 agent is separated out to make it easier to test partial
# upgrades (everything upgraded except the L2 agent)
function start_neutron_l2_agent {
- run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
if is_provider_network; then
sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
@@ -738,23 +717,23 @@
}
function start_neutron_other_agents {
- run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
+ run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
if is_service_enabled neutron-vpnaas; then
: # Started by plugin
elif is_service_enabled q-vpn; then
run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
else
- run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
+ run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
fi
- run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
- run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
- run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+ run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
+ run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
# For XenServer, start an agent for the domU openvswitch
- run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+ run_process q-domua "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
}
@@ -821,15 +800,16 @@
local IP_ADD=""
local IP_DEL=""
- local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
+ local DEFAULT_ROUTE_GW
+ DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
local ADD_OVS_PORT=""
if [[ $af == "inet" ]]; then
- IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }')
+ IP_BRD=$(ip -f $af a s dev $from_intf | grep inet | awk '{ print $2, $3, $4; exit }')
fi
if [[ $af == "inet6" ]]; then
- IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }')
+ IP_BRD=$(ip -f $af a s dev $from_intf | grep inet6 | awk '{ print $2, $3, $4; exit }')
fi
if [ "$DEFAULT_ROUTE_GW" != "" ]; then
@@ -841,11 +821,11 @@
fi
if [[ "$IP_BRD" != "" ]]; then
- IP_ADD="sudo ip addr del $IP_BRD dev $from_intf"
- IP_DEL="sudo ip addr add $IP_BRD dev $to_intf"
+ IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
+ IP_ADD="sudo ip addr add $IP_BRD dev $to_intf"
fi
- $IP_ADD; $IP_DEL; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
+ $IP_DEL; $IP_ADD; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
fi
}
@@ -853,18 +833,20 @@
# runs that a clean run would need to clean up
function cleanup_neutron {
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
+ if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
- if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
- _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
- fi
+ if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+ _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
+ fi
- if is_provider_network && is_ironic_hardware; then
- for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
- sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
- sudo ip addr add $IP dev $PUBLIC_INTERFACE
- done
- sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ if is_provider_network && is_ironic_hardware; then
+ for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+ sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+ sudo ip addr add $IP dev $PUBLIC_INTERFACE
+ done
+ sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+ fi
fi
if is_neutron_ovs_base_plugin; then
@@ -916,7 +898,11 @@
# If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
mkdir -p /$Q_PLUGIN_CONF_PATH
Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
- cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+ # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
+ # there is no config file in Neutron tree. They should prepare the file in each plugin.
+ if [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
+ cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+ fi
iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
@@ -1176,6 +1162,9 @@
# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
function _neutron_deploy_rootwrap_filters {
+ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+ return
+ fi
local srcdir=$1
sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
@@ -1255,7 +1244,8 @@
subnet_params+="--gateway $NETWORK_GATEWAY "
subnet_params+="--name $PRIVATE_SUBNET_NAME "
subnet_params+="$NET_ID $FIXED_RANGE"
- local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+ local subnet_id
+ subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID"
echo $subnet_id
}
@@ -1270,7 +1260,8 @@
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
- local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+ local ipv6_subnet_id
+ ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID"
echo $ipv6_subnet_id
}
@@ -1283,7 +1274,8 @@
subnet_params+="--name $PUBLIC_SUBNET_NAME "
subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
subnet_params+="-- --enable_dhcp=False"
- local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ local id_and_ext_gw_ip
+ id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
echo $id_and_ext_gw_ip
}
@@ -1295,7 +1287,8 @@
subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
subnet_params+="-- --enable_dhcp=False"
- local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+ local ipv6_id_and_ext_gw_ip
+ ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
echo $ipv6_id_and_ext_gw_ip
}
@@ -1304,8 +1297,10 @@
function _neutron_configure_router_v4 {
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create a public subnet on the external network
- local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
- local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
+ local id_and_ext_gw_ip
+ id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
+ local ext_gw_ip
+ ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
# Configure the external network as the default router gateway
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
@@ -1342,9 +1337,12 @@
function _neutron_configure_router_v6 {
neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
# Create a public subnet on the external network
- local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
- local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
- local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
+ local ipv6_id_and_ext_gw_ip
+ ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
+ local ipv6_ext_gw_ip
+ ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
+ local ipv6_pub_subnet_id
+ ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
# If the external network has not already been set as the default router
# gateway when configuring an IPv4 public subnet, do so now
@@ -1362,7 +1360,8 @@
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
- local ext_gw_interface=$(_neutron_get_ext_gw_interface)
+ local ext_gw_interface
+ ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
# Configure interface for public bridge
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index 4b220d3..f03000e 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -16,9 +16,7 @@
``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled
* ``neutron_plugin_create_nova_conf`` :
- set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf
- e.g.
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ optionally set options in nova_conf
* ``neutron_plugin_install_agent_packages`` :
install packages that is specific to plugin agent
e.g.
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index b8166d9..557b94d 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -12,7 +12,7 @@
}
function neutron_plugin_create_nova_conf {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ :
}
function neutron_plugin_install_agent_packages {
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 6b4819e..2028496 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -10,7 +10,8 @@
source $TOP_DIR/lib/neutron_plugins/openvswitch
function save_function {
- local ORIG_FUNC=$(declare -f $1)
+ local ORIG_FUNC
+ ORIG_FUNC=$(declare -f $1)
local NEW_FUNC="$2${ORIG_FUNC#$1}"
eval "$NEW_FUNC"
}
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
index 3660a9f..dd5cfa6 100644
--- a/lib/neutron_plugins/ibm
+++ b/lib/neutron_plugins/ibm
@@ -42,7 +42,6 @@
}
function neutron_plugin_create_nova_conf {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
# if n-cpu is enabled, then setup integration bridge
if is_service_enabled n-cpu; then
neutron_setup_integration_bridge
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
old mode 100755
new mode 100644
index fefc1c3..bd4438d
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -69,6 +69,18 @@
fi
AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent"
iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
+
+ # Configure vxlan tunneling
+ if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
+ if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True"
+ iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP
+ else
+ iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False"
+ fi
+ else
+ iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False"
+ fi
}
function neutron_plugin_setup_interface_driver {
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
old mode 100755
new mode 100644
index 13ffee9..ace5335
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -19,7 +19,9 @@
# Default openvswitch L2 agent
Q_AGENT=${Q_AGENT:-openvswitch}
-source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
+if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then
+ source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
+fi
# List of MechanismDrivers to load
Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge}
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 7bce233..9e5307b 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -10,7 +10,6 @@
function neutron_plugin_create_nova_conf {
NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
iniset $NOVA_CONF neutron ovs_bridge $NOVA_OVS_BRIDGE
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
}
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
deleted file mode 100644
index 0bc9bff..0000000
--- a/lib/neutron_plugins/ofagent_agent
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-# REVISIT(yamamoto): This file is intentionally left empty
-# in order to keep Q_AGENT=ofagent_agent work.
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
deleted file mode 100644
index 48a368a..0000000
--- a/lib/neutron_plugins/oneconvergence
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-#
-# Neutron One Convergence plugin
-# ------------------------------
-
-# Save trace setting
-OC_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-Q_L3_ENABLED=true
-Q_L3_ROUTER_PER_TENANT=true
-Q_USE_NAMESPACE=true
-
-function neutron_plugin_install_agent_packages {
- _neutron_ovs_base_install_agent_packages
-}
-# Configure common parameters
-function neutron_plugin_configure_common {
-
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
- Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
- Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
-}
-
-# Configure plugin specific information
-function neutron_plugin_configure_service {
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
- iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
-}
-
-function neutron_plugin_configure_debug_command {
- _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_setup_interface_driver {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
-function has_neutron_plugin_security_group {
- # 1 means False here
- return 0
-}
-
-function setup_integration_bridge {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-}
-
-function neutron_plugin_configure_dhcp_agent {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function neutron_plugin_configure_l3_agent {
- _neutron_ovs_base_configure_l3_agent
- iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function neutron_plugin_configure_plugin_agent {
-
- AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
-
- _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_create_nova_conf {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
- if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
- setup_integration_bridge
- fi
-}
-
-# Restore xtrace
-$OC_XTRACE
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
old mode 100755
new mode 100644
index 1ff3a40..48e47b3
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -105,6 +105,7 @@
iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND"
fi
iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
+ iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE
}
function neutron_plugin_setup_interface_driver {
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
old mode 100755
new mode 100644
index f1f7f85..d3fd198
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -8,7 +8,8 @@
set +o xtrace
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""}
+# OVS recognize default 'system' datapath or 'netdev' for userspace datapath
+OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system}
OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun}
function is_neutron_ovs_base_plugin {
@@ -20,7 +21,7 @@
local bridge=$1
local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge"
- if [ "$OVS_DATAPATH_TYPE" != "" ] ; then
+ if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
fi
@@ -48,8 +49,10 @@
function _neutron_ovs_base_install_ubuntu_dkms {
# install Dynamic Kernel Module Support packages if needed
- local kernel_version=$(uname -r)
- local kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
+ local kernel_version
+ kernel_version=$(uname -r)
+ local kernel_major_minor
+ kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
# From kernel 3.13 on, openvswitch-datapath-dkms is not needed
if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
deleted file mode 100644
index 0d711fe..0000000
--- a/lib/neutron_plugins/plumgrid
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-#
-# PLUMgrid Neutron Plugin
-# Edgar Magana emagana@plumgrid.com
-# ------------------------------------
-
-# Save trace settings
-PG_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function neutron_plugin_create_nova_conf {
- :
-}
-
-function neutron_plugin_setup_interface_driver {
- :
-}
-
-function neutron_plugin_configure_common {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
- Q_PLUGIN_CONF_FILENAME=plumgrid.ini
- Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
- PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
- PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
- PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
- PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
- PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
- PLUMGRID_DRIVER=${PLUMGRID_DRIVER:-neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib}
-}
-
-function neutron_plugin_configure_service {
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server $PLUMGRID_DIRECTOR_IP
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server_port $PLUMGRID_DIRECTOR_PORT
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector username $PLUMGRID_ADMIN
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector password $PLUMGRID_PASSWORD
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector servertimeout $PLUMGRID_TIMEOUT
- iniset /$Q_PLUGIN_CONF_FILE plumgriddirector driver $PLUMGRID_DRIVER
-}
-
-function neutron_plugin_configure_debug_command {
- :
-}
-
-function is_neutron_ovs_base_plugin {
- # False
- return 1
-}
-
-function has_neutron_plugin_security_group {
- # return 0 means enabled
- return 0
-}
-
-function neutron_plugin_check_adv_test_requirements {
- is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-# Restore xtrace
-$PG_XTRACE
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 61a148e..3496da8 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -1,3 +1,5 @@
+#!/bin/bash
+
# Neutron firewall plugin
# ---------------------------
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 34190f9..7865f6f 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -1,3 +1,5 @@
+#!/bin/bash
+
# Neutron loadbalancer plugin
# ---------------------------
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 37ba019..c75ab19 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -1,3 +1,5 @@
+#!/bin/bash
+
# Neutron metering plugin
# ---------------------------
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 4d6a2bf..c0e7457 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -1,3 +1,5 @@
+#!/bin/bash
+
# Neutron VPN plugin
# ---------------------------
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 03853a9..e182fca 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,2 +1,4 @@
+#!/bin/bash
+
# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
# continues to work.
diff --git a/lib/nova b/lib/nova
index 6441a89..6c41403 100644
--- a/lib/nova
+++ b/lib/nova
@@ -56,13 +56,11 @@
NOVA_API_DB=${NOVA_API_DB:-nova_api}
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
-# NOVA_API_VERSION valid options
-# - default - setup API end points as nova does out of the box
-# - v21default - make v21 the default on /v2
-#
-# NOTE(sdague): this is for transitional testing of the Nova v21 API.
-# Expect to remove in L or M.
-NOVA_API_VERSION=${NOVA_API_VERSION-default}
+
+# NOVA_V2_LEGACY defines whether we force the Nova v2.0 enpoint onto
+# the Nova v2.0 legacy code base. Remove this option once the Nova
+# v2.0 legacy codebase is removed.
+NOVA_V2_LEGACY=$(trueorfalse False NOVA_V2_LEGACY)
if is_suse; then
NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova}
@@ -167,10 +165,6 @@
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,nova
-
-
# Functions
# ---------
@@ -208,14 +202,16 @@
clean_iptables
# Destroy old instances
- local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+ local instances
+ instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
fi
# Logout and delete iscsi sessions
- local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+ local tgts
+ tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
local target
for target in $tgts; do
sudo iscsiadm --mode node -T $target --logout || true
@@ -251,8 +247,10 @@
function _config_nova_apache_wsgi {
sudo mkdir -p $NOVA_WSGI_DIR
- local nova_apache_conf=$(apache_site_config_for nova-api)
- local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+ local nova_apache_conf
+ nova_apache_conf=$(apache_site_config_for nova-api)
+ local nova_ec2_apache_conf
+ nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
local nova_ssl=""
local nova_certfile=""
local nova_keyfile=""
@@ -313,9 +311,11 @@
# Get the sample configuration file in place
cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
- # For testing v21 is equivalent to v2
- if [[ "$NOVA_API_VERSION" == "v21default" ]]; then
- sed -i s/": openstack_compute_api_v2$"/": openstack_compute_api_v21"/ "$NOVA_API_PASTE_INI"
+ # For setting up an environment where v2.0 is running on the
+ # v2.0 legacy code base.
+ if [[ "$NOVA_V2_LEGACY" == "True" ]]; then
+ sed -i s@"^/v2: openstack_compute_api_v21_legacy_v2_compatible$"@"/v2: openstack_compute_api_legacy_v2"@ \
+ "$NOVA_API_PASTE_INI"
fi
fi
@@ -332,7 +332,7 @@
if [ ! -e /dev/kvm ]; then
echo "WARNING: Switching to QEMU"
LIBVIRT_TYPE=qemu
- if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
+ if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then
# https://bugzilla.redhat.com/show_bug.cgi?id=753589
sudo setsebool virt_use_execmem on
fi
@@ -354,6 +354,12 @@
sudo mount /cgroup
fi
fi
+
+ # enable nbd for lxc unless you're using an lvm backend
+ # otherwise you can't boot instances
+ if [[ "$NOVA_BACKEND" != "LVM" ]]; then
+ sudo modprobe nbd
+ fi
fi
fi
fi
@@ -411,15 +417,16 @@
nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute"
fi
- get_or_create_service "nova" "compute" "Nova Compute Service"
- get_or_create_endpoint "compute" \
+ get_or_create_service "nova_legacy" "compute_legacy" \
+ "Nova Compute Service (Legacy 2.0)"
+ get_or_create_endpoint "compute_legacy" \
"$REGION_NAME" \
"$nova_api_url/v2/\$(tenant_id)s" \
"$nova_api_url/v2/\$(tenant_id)s" \
"$nova_api_url/v2/\$(tenant_id)s"
- get_or_create_service "novav21" "computev21" "Nova Compute Service V2.1"
- get_or_create_endpoint "computev21" \
+ get_or_create_service "nova" "compute" "Nova Compute Service"
+ get_or_create_endpoint "compute" \
"$REGION_NAME" \
"$nova_api_url/v2.1/\$(tenant_id)s" \
"$nova_api_url/v2.1/\$(tenant_id)s" \
@@ -437,13 +444,18 @@
# EC2
if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-
+ local nova_ec2_api_url
+ if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
+ nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:$EC2_SERVICE_PORT/"
+ else
+ nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST/ec2"
+ fi
get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer"
get_or_create_endpoint "ec2" \
"$REGION_NAME" \
- "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
- "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
- "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
+ "$nova_ec2_api_url" \
+ "$nova_ec2_api_url" \
+ "$nova_ec2_api_url"
fi
fi
@@ -605,6 +617,8 @@
iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
+ # don't let the conductor get out of control now that we're using a pure python db driver
+ iniset $NOVA_CONF conductor workers "$API_WORKERS"
iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
@@ -779,7 +793,8 @@
export PATH=$NOVA_BIN_DIR:$PATH
# If the site is not enabled then we are in a grenade scenario
- local enabled_site_file=$(apache_site_config_for nova-api)
+ local enabled_site_file
+ enabled_site_file=$(apache_site_config_for nova-api)
if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
enable_apache_site nova-api
enable_apache_site nova-ec2-api
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
old mode 100755
new mode 100644
diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere
index c406e09..698f836 100644
--- a/lib/nova_plugins/hypervisor-vsphere
+++ b/lib/nova_plugins/hypervisor-vsphere
@@ -42,9 +42,6 @@
iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER"
iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD"
iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER"
- if is_service_enabled neutron; then
- iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE
- fi
}
# install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index efce383..e097990 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -79,7 +79,7 @@
# Create a cron job that will rotate guest logs
$ssh_dom0 crontab - << CRONTAB
-* * * * * /root/rotate_xen_guest_logs.sh
+* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1
CRONTAB
# Create directories for kernels and images
diff --git a/lib/oslo b/lib/oslo
index 123572c..56615fa 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -36,6 +36,7 @@
GITDIR["oslo.messaging"]=$DEST/oslo.messaging
GITDIR["oslo.middleware"]=$DEST/oslo.middleware
GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.privsep"]=$DEST/oslo.privsep
GITDIR["oslo.reports"]=$DEST/oslo.reports
GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
GITDIR["oslo.serialization"]=$DEST/oslo.serialization
@@ -59,7 +60,7 @@
local name=$1
if use_library_from_git "$name"; then
git_clone_by_name "$name"
- setup_lib "$name"
+ setup_dev_lib "$name"
fi
}
@@ -79,6 +80,7 @@
_do_install_oslo_lib "oslo.messaging"
_do_install_oslo_lib "oslo.middleware"
_do_install_oslo_lib "oslo.policy"
+ _do_install_oslo_lib "oslo.privsep"
_do_install_oslo_lib "oslo.reports"
_do_install_oslo_lib "oslo.rootwrap"
_do_install_oslo_lib "oslo.serialization"
diff --git a/lib/swift b/lib/swift
index 4c2b292..3a8e80d 100644
--- a/lib/swift
+++ b/lib/swift
@@ -131,9 +131,9 @@
# Port bases used in port number calclution for the service "nodes"
# The specified port number will be used, the additinal ports calculated by
# base_port + node_num * 10
-OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013}
-CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
-ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
+OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613}
+CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611}
+ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612}
# Enable tempurl feature
SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
@@ -142,10 +142,6 @@
# Toggle for deploying Swift under HTTPD + mod_wsgi
SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False}
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,swift
-
-
# Functions
# ---------
@@ -210,9 +206,12 @@
# copy apache vhost file and set name and port
local node_number
for node_number in ${SWIFT_REPLICAS_SEQ}; do
- local object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
- local container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
- local account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
+ local object_port
+ object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
+ local container_port
+ container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
+ local account_port
+ account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
sudo sed -e "
@@ -456,6 +455,7 @@
[filter:swift3]
use = egg:swift3#swift3
+location = ${REGION_NAME}
EOF
fi
@@ -508,7 +508,8 @@
if is_service_enabled keystone; then
iniuncomment ${testfile} func_test auth_version
- local auth_vers=$(iniget ${testfile} func_test auth_version)
+ local auth_vers
+ auth_vers=$(iniget ${testfile} func_test auth_version)
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
if [[ $auth_vers == "3" ]]; then
@@ -518,7 +519,8 @@
fi
fi
- local user_group=$(id -g ${STACK_USER})
+ local user_group
+ user_group=$(id -g ${STACK_USER})
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
local swift_log_dir=${SWIFT_DATA_DIR}/logs
@@ -544,7 +546,8 @@
# First do a bit of setup by creating the directories and
# changing the permissions so we can run it as our user.
- local user_group=$(id -g ${STACK_USER})
+ local user_group
+ user_group=$(id -g ${STACK_USER})
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
# Create a loopback disk and format it to XFS.
@@ -611,7 +614,8 @@
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
- local another_role=$(get_or_create_role "anotherrole")
+ local another_role
+ another_role=$(get_or_create_role "anotherrole")
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
# temp urls, which break when uploaded by a non-admin role
@@ -627,33 +631,40 @@
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s"
fi
- local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
+ local swift_tenant_test1
+ swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
"default" "test@example.com")
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
- local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+ local swift_user_test3
+ swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
"default" "test3@example.com")
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
- local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
+ local swift_tenant_test2
+ swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
- local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+ local swift_user_test2
+ swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
"default" "test2@example.com")
die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
- local swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
+ local swift_domain
+ swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
- local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
+ local swift_tenant_test4
+ swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
- local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
+ local swift_user_test4
+ swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
$swift_domain "test4@example.com")
die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
@@ -803,9 +814,10 @@
function swift_configure_tempurls {
OS_USERNAME=swift \
- OS_TENANT_NAME=$SERVICE_TENANT_NAME \
+ OS_PROJECT_NAME=$SERVICE_TENANT_NAME \
OS_PASSWORD=$SERVICE_PASSWORD \
- swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
+ OS_AUTH_URL=$SERVICE_ENDPOINT \
+ swift post --auth-version 3 -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
}
# Restore xtrace
diff --git a/lib/tempest b/lib/tempest
index be24da6..10dd652 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -119,10 +119,6 @@
pip_install_gr testrepository
fi
- # Used during configuration so make sure we have the correct
- # version installed
- pip_install_gr python-openstackclient
-
local image_lines
local images
local num_images
@@ -205,21 +201,12 @@
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
available_flavors=$(nova flavor-list)
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
- if is_arch "ppc64"; then
- # Qemu needs at least 128MB of memory to boot on ppc64
- nova flavor-create m1.nano 42 128 0 1
- else
- nova flavor-create m1.nano 42 64 0 1
- fi
+ nova flavor-create m1.nano 42 64 0 1
fi
flavor_ref=42
boto_instance_type=m1.nano
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
- if is_arch "ppc64"; then
- nova flavor-create m1.micro 84 256 0 1
- else
- nova flavor-create m1.micro 84 128 0 1
- fi
+ nova flavor-create m1.micro 84 128 0 1
fi
flavor_ref_alt=84
else
@@ -340,7 +327,6 @@
if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
fi
- iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true
# Image Features
iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
@@ -361,10 +347,19 @@
iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
fi
+ # Set the service catalog entry for Tempest to run on. Typically
+ # used to try different compute API version targets. The tempest
+ # default if 'compute', which is typically valid, so only set this
+ # if you want to change it.
+ if [[ -n "$TEMPEST_COMPUTE_TYPE" ]]; then
+ iniset $TEMPEST_CONFIG compute catalog_type $TEMPEST_COMPUTE_TYPE
+ fi
+
# Compute Features
# Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
# NOTE(mtreinish): This must be done after auth settings are added to the tempest config
- local tmp_cfg_file=$(mktemp)
+ local tmp_cfg_file
+ tmp_cfg_file=$(mktemp)
cd $TEMPEST_DIR
tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
@@ -390,6 +385,12 @@
# neutron.allow_duplicate_networks option was removed from nova in Liberty
# and is now the default behavior.
iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True}
+ if is_service_enabled n-cell; then
+ # Cells doesn't support shelving/unshelving
+ iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+ # Cells doesn't support hot-plugging virtual interfaces.
+ iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
+ fi
# Network
iniset $TEMPEST_CONFIG network api_version 2.0
@@ -418,7 +419,6 @@
iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml
iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
iniset $TEMPEST_CONFIG boto http_socket_timeout 30
- iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
# Orchestration Tests
if is_service_enabled heat; then
@@ -447,9 +447,6 @@
iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
# Telemetry
- # Ceilometer API optimization happened in Juno that allows to run more tests in tempest.
- # Once Tempest retires support for icehouse this flag can be removed.
- iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False"
iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True"
# Object Store
@@ -468,6 +465,8 @@
# Volume
# TODO(dkranz): Remove the bootable flag when Juno is end of life.
iniset $TEMPEST_CONFIG volume-feature-enabled bootable True
+ # TODO(jordanP): Remove the extend_with_snapshot flag when Juno is end of life.
+ iniset $TEMPEST_CONFIG volume-feature-enabled extend_with_snapshot True
local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"}
if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then
@@ -536,6 +535,11 @@
fi
# ``service_available``
+ #
+ # this tempest service list needs to be all the services that
+ # tempest supports, otherwise we can have an erroneous set of
+ # defaults (something defaulting true in Tempest, but not listed here).
+ TEMPEST_SERVICES="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove"
for service in ${TEMPEST_SERVICES//,/ }; do
if is_service_enabled $service ; then
iniset $TEMPEST_CONFIG service_available $service "True"
@@ -546,7 +550,7 @@
if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
# Use the ``BOTO_CONFIG`` environment variable to point to this file
- iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
+ iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
sudo chown $STACK_USER $BOTO_CONF
fi
diff --git a/lib/tls b/lib/tls
index 8ff2027..f4740b8 100644
--- a/lib/tls
+++ b/lib/tls
@@ -346,7 +346,8 @@
# we need to change it.
function fix_system_ca_bundle_path {
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
- local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
+ local capath
+ capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
if is_fedora; then
diff --git a/lib/zaqar b/lib/zaqar
deleted file mode 100644
index fdab3a2..0000000
--- a/lib/zaqar
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/bin/bash
-#
-# lib/zaqar
-# Install and start **Zaqar** service
-
-# To enable a minimal set of Zaqar services, add the following to localrc:
-#
-# enable_service zaqar-server
-#
-# Dependencies:
-# - functions
-# - OS_AUTH_URL for auth in api
-# - DEST set to the destination directory
-# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
-# - STACK_USER service user
-
-# stack.sh
-# ---------
-# install_zaqar
-# configure_zaqar
-# init_zaqar
-# start_zaqar
-# stop_zaqar
-# cleanup_zaqar
-# cleanup_zaqar_mongodb
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-ZAQAR_DIR=$DEST/zaqar
-ZAQARCLIENT_DIR=$DEST/python-zaqarclient
-ZAQAR_CONF_DIR=/etc/zaqar
-ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf
-ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar}
-
-# Support potential entry-points console scripts
-ZAQAR_BIN_DIR=$(get_python_exec_prefix)
-
-# Set up database backend
-ZAQAR_BACKEND=${ZAQAR_BACKEND:-mongodb}
-
-
-# Set Zaqar repository
-ZAQAR_REPO=${ZAQAR_REPO:-${GIT_BASE}/openstack/zaqar.git}
-ZAQAR_BRANCH=${ZAQAR_BRANCH:-master}
-
-# Set client library repository
-ZAQARCLIENT_REPO=${ZAQARCLIENT_REPO:-${GIT_BASE}/openstack/python-zaqarclient.git}
-ZAQARCLIENT_BRANCH=${ZAQARCLIENT_BRANCH:-master}
-
-# Set Zaqar Connection Info
-ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST}
-ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888}
-ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,zaqar
-
-
-# Functions
-# ---------
-
-# Test if any Zaqar services are enabled
-# is_zaqar_enabled
-function is_zaqar_enabled {
- [[ ,${ENABLED_SERVICES} =~ ,"zaqar-" ]] && return 0
- return 1
-}
-
-# cleanup_zaqar() - Cleans up general things from previous
-# runs and storage specific left overs.
-function cleanup_zaqar {
- if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then
- cleanup_zaqar_mongodb
- fi
-}
-
-# cleanup_zaqar_mongodb() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_zaqar_mongodb {
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then
- die $LINENO "Mongo DB did not start"
- else
- full_version=$(mongo zaqar --eval 'db.dropDatabase();')
- mongo_version=`echo $full_version | cut -d' ' -f4`
- required_mongo_version='2.2'
- if [[ $mongo_version < $required_mongo_version ]]; then
- die $LINENO "Zaqar needs Mongo DB version >= 2.2 to run."
- fi
- fi
-}
-
-# configure_zaqarclient() - Set config files, create data dirs, etc
-function configure_zaqarclient {
- setup_develop $ZAQARCLIENT_DIR
-}
-
-# configure_zaqar() - Set config files, create data dirs, etc
-function configure_zaqar {
- setup_develop $ZAQAR_DIR
-
- sudo install -d -o $STACK_USER -m 755 $ZAQAR_CONF_DIR
-
- iniset $ZAQAR_CONF DEFAULT debug True
- iniset $ZAQAR_CONF DEFAULT verbose True
- iniset $ZAQAR_CONF DEFAULT admin_mode True
- iniset $ZAQAR_CONF DEFAULT use_syslog $SYSLOG
- iniset $ZAQAR_CONF 'drivers:transport:wsgi' bind $ZAQAR_SERVICE_HOST
-
- configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR
-
- if [ "$ZAQAR_BACKEND" = 'mysql' ] || [ "$ZAQAR_BACKEND" = 'postgresql' ] ; then
- iniset $ZAQAR_CONF drivers storage sqlalchemy
- iniset $ZAQAR_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url zaqar`
- elif [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then
- iniset $ZAQAR_CONF drivers storage mongodb
- iniset $ZAQAR_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/zaqar
- configure_mongodb
- elif [ "$ZAQAR_BACKEND" = 'redis' ] ; then
- iniset $ZAQAR_CONF drivers storage redis
- iniset $ZAQAR_CONF 'drivers:storage:redis' uri redis://localhost:6379
- configure_redis
- fi
-
- iniset $ZAQAR_CONF DEFAULT notification_driver messaging
- iniset $ZAQAR_CONF DEFAULT control_exchange zaqar
-
- iniset_rpc_backend zaqar $ZAQAR_CONF
-
- cleanup_zaqar
-}
-
-function configure_redis {
- if is_ubuntu; then
- install_package redis-server
- pip_install_gr redis
- elif is_fedora; then
- install_package redis
- pip_install_gr redis
- else
- exit_distro_not_supported "redis installation"
- fi
-}
-
-function configure_mongodb {
- # Set nssize to 2GB. This increases the number of namespaces supported
- # # per database.
- if is_ubuntu; then
- sudo sed -i -e "
- s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1|
- s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047|
- " /etc/mongodb.conf
- restart_service mongodb
- elif is_fedora; then
- sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
- restart_service mongod
- fi
-}
-
-# init_zaqar() - Initialize etc.
-function init_zaqar {
- # Create cache dir
- sudo install -d -o $STACK_USER $ZAQAR_AUTH_CACHE_DIR
- rm -f $ZAQAR_AUTH_CACHE_DIR/*
-}
-
-# install_zaqar() - Collect source and prepare
-function install_zaqar {
- git_clone $ZAQAR_REPO $ZAQAR_DIR $ZAQAR_BRANCH
- setup_develop $ZAQAR_DIR
-}
-
-# install_zaqarclient() - Collect source and prepare
-function install_zaqarclient {
- git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH
- setup_develop $ZAQARCLIENT_DIR
-}
-
-# start_zaqar() - Start running processes, including screen
-function start_zaqar {
- if [[ "$USE_SCREEN" = "False" ]]; then
- run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon"
- else
- run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF"
- fi
-
- echo "Waiting for Zaqar to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT/v1/health; do sleep 1; done"; then
- die $LINENO "Zaqar did not start"
- fi
-}
-
-# stop_zaqar() - Stop running processes
-function stop_zaqar {
- local serv
- # Kill the zaqar screen windows
- for serv in zaqar-server; do
- screen -S $SCREEN_NAME -p $serv -X kill
- done
-}
-
-function create_zaqar_accounts {
- create_service_user "zaqar"
-
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
- get_or_create_service "zaqar" "messaging" "Zaqar Service"
- get_or_create_endpoint "messaging" \
- "$REGION_NAME" \
- "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
- "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
- "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT"
- fi
-
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/samples/local.conf b/samples/local.conf
index ce70073..cb293b6 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -28,7 +28,7 @@
# and they will be added to ``local.conf``.
SERVICE_TOKEN=azertytoken
ADMIN_PASSWORD=nomoresecrete
-MYSQL_PASSWORD=stackdb
+DATABASE_PASSWORD=stackdb
RABBIT_PASSWORD=stackqueue
SERVICE_PASSWORD=$ADMIN_PASSWORD
diff --git a/setup.cfg b/setup.cfg
index 5887134..e4b2888 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@
README.md
author = OpenStack
author-email = openstack-dev@lists.openstack.org
-home-page = http://devstack.org
+home-page = http://docs.openstack.org/developer/devstack
classifier =
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
diff --git a/stack.sh b/stack.sh
index 639f72b..8024731 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,9 +1,8 @@
#!/usr/bin/env bash
# ``stack.sh`` is an opinionated OpenStack developer installation. It
-# installs and configures various combinations of **Ceilometer**, **Cinder**,
-# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**,
-# and **Swift**
+# installs and configures various combinations of **Cinder**, **Glance**,
+# **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift**
# This script's options can be changed by setting appropriate environment
# variables. You can configure things like which git repositories to use,
@@ -21,16 +20,13 @@
# Learn more and get the most recent version at http://devstack.org
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
# Make sure custom grep options don't get in the way
unset GREP_OPTIONS
-# Sanitize language settings to avoid commands bailing out
-# with "unsupported locale setting" errors.
-unset LANG
-unset LANGUAGE
-LC_ALL=C
-export LC_ALL
-
# Make sure umask is sane
umask 022
@@ -97,6 +93,15 @@
exit 1
fi
+# Provide a safety switch for devstack. If you do a lot of devstack,
+# on a lot of different environments, you sometimes run it on the
+# wrong box. This makes there be a way to prevent that.
+if [[ -e $HOME/.no-devstack ]]; then
+ echo "You've marked this host as a no-devstack host, to save yourself from"
+ echo "running devstack accidentally. If this is in error, please remove the"
+ echo "~/.no-devstack file"
+ exit 1
+fi
# Prepare the environment
# -----------------------
@@ -282,14 +287,7 @@
# ... and also optional to be enabled
sudo yum-config-manager --enable rhel-7-server-optional-rpms
- RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"}
- RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"}
-
- if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then
- echo "RDO repo not detected; installing"
- yum_install $RHEL_RDO_REPO_RPM || \
- die $LINENO "Error installing RDO repo, cannot continue"
- fi
+ sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm
if is_oraclelinux; then
sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
@@ -310,9 +308,6 @@
safe_chown -R $STACK_USER $DEST
safe_chmod 0755 $DEST
-# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
-check_path_perm_sanity ${DEST}
-
# Destination path for service data
DATA_DIR=${DATA_DIR:-${DEST}/data}
sudo mkdir -p $DATA_DIR
@@ -431,7 +426,7 @@
# Set up logging of screen windows
# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
-# directory specified in ``SCREEN_LOGDIR``, we will log to the the file
+# directory specified in ``SCREEN_LOGDIR``, we will log to the file
# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
# ``screen-$SERVICE_NAME.log`` to the latest log file.
# Logs are kept for as long specified in ``LOGDAYS``.
@@ -447,6 +442,8 @@
fi
fi
+# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
+check_path_perm_sanity ${DEST}
# Configure Error Traps
# ---------------------
@@ -494,10 +491,6 @@
# Begin trapping error exit codes
set -o errexit
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following along as the install occurs.
-set -o xtrace
-
# Print the kernel version
uname -a
@@ -522,7 +515,7 @@
# Clone all external plugins
fetch_plugins
-# Plugin Phase 0: override_defaults - allow pluggins to override
+# Plugin Phase 0: override_defaults - allow plugins to override
# defaults before other services are run
run_phase override_defaults
@@ -542,7 +535,6 @@
source $TOP_DIR/lib/nova
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/neutron-legacy
source $TOP_DIR/lib/ldap
@@ -554,6 +546,7 @@
# Phase: source
run_phase source
+
# Interactive Configuration
# -------------------------
@@ -569,7 +562,7 @@
if [[ -f $RC_DIR/localrc ]]; then
localrc=$TOP_DIR/localrc
else
- localrc=$TOP_DIR/.localrc.auto
+ localrc=$TOP_DIR/.localrc.password
fi
# If the password is not defined yet, proceed to prompt user for a password.
@@ -579,13 +572,15 @@
touch $localrc
fi
- # Presumably if we got this far it can only be that our localrc is missing
- # the required password. Prompt user for a password and write to localrc.
+ # Presumably if we got this far it can only be that our
+ # localrc is missing the required password. Prompt user for a
+ # password and write to localrc.
+
echo ''
echo '################################################################################'
echo $msg
echo '################################################################################'
- echo "This value will be written to your localrc file so you don't have to enter it "
+ echo "This value will be written to ${localrc} file so you don't have to enter it "
echo "again. Use only alphanumeric characters."
echo "If you leave this blank, a random default value will be used."
pw=" "
@@ -736,6 +731,8 @@
if is_service_enabled $DATABASE_BACKENDS; then
install_database
+fi
+if [ -n "$DATABASE_TYPE" ]; then
install_database_python
fi
@@ -826,13 +823,6 @@
configure_horizon
fi
-if is_service_enabled ceilometer; then
- install_ceilometerclient
- stack_install_service ceilometer
- echo_summary "Configuring Ceilometer"
- configure_ceilometer
-fi
-
if is_service_enabled heat; then
stack_install_service heat
install_heat_other
@@ -989,13 +979,15 @@
start_keystone
fi
+ export OS_IDENTITY_API_VERSION=3
+
# Set up a temporary admin URI for Keystone
- SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v2.0
+ SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3
if is_service_enabled tls-proxy; then
export OS_CACERT=$INT_CA_DIR/ca-chain.pem
# Until the client support is fixed, just use the internal endpoint
- SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
+ SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3
fi
# Setup OpenStackClient token-endpoint auth
@@ -1008,10 +1000,6 @@
create_cinder_accounts
create_neutron_accounts
- if is_service_enabled ceilometer; then
- create_ceilometer_accounts
- fi
-
if is_service_enabled swift; then
create_swift_accounts
fi
@@ -1023,17 +1011,32 @@
# Begone token auth
unset OS_TOKEN OS_URL
- # force set to use v2 identity authentication even with v3 commands
- export OS_AUTH_TYPE=v2password
+ # Rather than just export these, we write them out to a
+ # intermediate userrc file that can also be used to debug if
+ # something goes wrong between here and running
+ # tools/create_userrc.sh (this script relies on services other
+ # than keystone being available, so we can't call it right now)
+ cat > $TOP_DIR/userrc_early <<EOF
+# Use this for debugging issues before files in accrc are created
- # Set up password auth credentials now that Keystone is bootstrapped
- export OS_AUTH_URL=$SERVICE_ENDPOINT
- export OS_TENANT_NAME=admin
- export OS_USERNAME=admin
- export OS_PASSWORD=$ADMIN_PASSWORD
- export OS_REGION_NAME=$REGION_NAME
+# Set up password auth credentials now that Keystone is bootstrapped
+export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_USERNAME=admin
+export OS_USER_DOMAIN_ID=default
+export OS_PASSWORD=$ADMIN_PASSWORD
+export OS_PROJECT_NAME=admin
+export OS_PROJECT_DOMAIN_ID=default
+export OS_REGION_NAME=$REGION_NAME
+
+EOF
+
+ source $TOP_DIR/userrc_early
+
fi
+# Write a clouds.yaml file
+write_clouds_yaml
+
# Horizon
# -------
@@ -1183,8 +1186,6 @@
# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
if is_service_enabled g-reg; then
- TOKEN=$(openstack token issue -c id -f value)
- die_if_not_set $LINENO TOKEN "Keystone fail to get token"
echo_summary "Uploading images"
@@ -1194,7 +1195,7 @@
fi
for image_url in ${IMAGE_URLS//,/ }; do
- upload_image $image_url $TOKEN
+ upload_image $image_url
done
fi
@@ -1255,11 +1256,6 @@
start_cinder
create_volume_types
fi
-if is_service_enabled ceilometer; then
- echo_summary "Starting Ceilometer"
- init_ceilometer
- start_ceilometer
-fi
# Configure and launch Heat engine, api and metadata
if is_service_enabled heat; then
@@ -1300,43 +1296,6 @@
# Save some values we generated for later use
save_stackenv
-# Update/create user clouds.yaml file.
-# clouds.yaml will have
-# - A `devstack` entry for the `demo` user for the `demo` project.
-# - A `devstack-admin` entry for the `admin` user for the `admin` project.
-
-# The location is a variable to allow for easier refactoring later to make it
-# overridable. There is currently no usecase where doing so makes sense, so
-# it's not currently configurable.
-CLOUDS_YAML=~/.config/openstack/clouds.yaml
-
-mkdir -p $(dirname $CLOUDS_YAML)
-
-CA_CERT_ARG=''
-if [ -f "$SSL_BUNDLE_FILE" ]; then
- CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
-fi
-$TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack \
- --os-region-name $REGION_NAME \
- --os-identity-api-version $IDENTITY_API_VERSION \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
- --os-username demo \
- --os-password $ADMIN_PASSWORD \
- --os-project-name demo
-$TOP_DIR/tools/update_clouds_yaml.py \
- --file $CLOUDS_YAML \
- --os-cloud devstack-admin \
- --os-region-name $REGION_NAME \
- --os-identity-api-version $IDENTITY_API_VERSION \
- $CA_CERT_ARG \
- --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
- --os-username admin \
- --os-password $ADMIN_PASSWORD \
- --os-project-name admin
-
# Wrapup configuration
# ====================
@@ -1373,9 +1332,16 @@
$TOP_DIR/local.sh
fi
+# Sanity checks
+# =============
+
# Check the status of running services
service_check
+# ensure that all the libraries we think we installed from git,
+# actually were.
+check_libs_from_git
+
# Bash completion
# ===============
@@ -1408,6 +1374,8 @@
exec 1>&3
fi
+# Dump out the time totals
+time_totals
# Using the cloud
# ===============
diff --git a/stackrc b/stackrc
index 156cb1f..819aa01 100644
--- a/stackrc
+++ b/stackrc
@@ -2,6 +2,18 @@
#
# stackrc
#
+
+# ensure we don't re-source this in the same environment
+[[ -z "$_DEVSTACK_STACKRC" ]] || return 0
+declare -r _DEVSTACK_STACKRC=1
+
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+
# Find the other rc files
RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
@@ -78,12 +90,6 @@
# services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``)
ENABLE_HTTPD_MOD_WSGI_SERVICES=True
-# Tell Tempest which services are available. The default is set here as
-# Tempest falls late in the configuration sequence. This differs from
-# ``ENABLED_SERVICES`` in that the project names are used here rather than
-# the service names, i.e.: ``TEMPEST_SERVICES="key,glance,nova"``
-TEMPEST_SERVICES=""
-
# Set the default Nova APIs to enable
NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
@@ -97,6 +103,11 @@
# be disabled for automated testing by setting this value to False.
USE_SCREEN=True
+# Passwords generated by interactive devstack runs
+if [[ -r $RC_DIR/.localrc.password ]]; then
+ source $RC_DIR/.localrc.password
+fi
+
# allow local overrides of env variables, including repo config
if [[ -f $RC_DIR/localrc ]]; then
# Old-style user-supplied config
@@ -182,10 +193,6 @@
#
##############
-# telemetry service
-CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
-CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master}
-
# block storage service
CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
CINDER_BRANCH=${CINDER_BRANCH:-master}
@@ -255,14 +262,10 @@
##############
#
-# OpenStack Client Library Componets
+# OpenStack Client Library Components
#
##############
-# ceilometer client library
-GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git}
-GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master}
-
# volume client
GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master}
@@ -368,6 +371,10 @@
GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
+# oslo.privsep
+GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
+GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master}
+
# oslo.reports
GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
@@ -444,17 +451,22 @@
GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master}
# s3 support for swift
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
# ceilometer middleware
GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master}
+GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware
# os-brick library to manage local volume attaches
GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master}
+# ironic common lib
+GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
+GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master}
+
##################
#
@@ -560,40 +572,47 @@
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and
# ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``.
-case "$VIRT_DRIVER" in
- openvz)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
- libvirt)
- case "$LIBVIRT_TYPE" in
- lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz"};;
- *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
- esac
- ;;
- vsphere)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
- IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};;
- xenserver)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk}
- IMAGE_URLS=${IMAGE_URLS:-"http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"}
- IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
- ironic)
- # Ironic can do both partition and full disk images, depending on the driver
- if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk}
- else
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
- fi
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"}
- IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";;
- *) # Default to Cirros with kernel, ramdisk and disk image
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
-esac
+DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES)
+if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then
+ if [[ -n "$IMAGE_URLS" ]]; then
+ IMAGE_URLS+=","
+ fi
+ case "$VIRT_DRIVER" in
+ openvz)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
+ IMAGE_URLS+="http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz";;
+ libvirt)
+ case "$LIBVIRT_TYPE" in
+ lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
+ IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz";;
+ *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+ IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";;
+ esac
+ ;;
+ vsphere)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk}
+ IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk";;
+ xenserver)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk}
+ IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"
+ IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
+ ironic)
+ # Ironic can do both partition and full disk images, depending on the driver
+ if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk}
+ else
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
+ fi
+ IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"
+ IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";;
+ *) # Default to Cirros with kernel, ramdisk and disk image
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+ IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";;
+ esac
+ DOWNLOAD_DEFAULT_IMAGES=False
+fi
# Staging Area for New Images, have them here for at least 24hrs for nodepool
# to cache them otherwise the failure rates in the gate are too high
@@ -606,6 +625,13 @@
fi
fi
+# Detect duplicate values in IMAGE_URLS
+for image_url in ${IMAGE_URLS//,/ }; do
+ if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then
+ die $LINENO "$image_url is duplicate, please remove it from IMAGE_URLS."
+ fi
+done
+
# 10Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
@@ -633,7 +659,12 @@
# the memory used where there are a large number of CPUs present
# (the default number of workers for many services is the number of CPUs)
# Also sets the minimum number of workers to 2.
-API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))}
+if [[ "$VIRT_DRIVER" = 'fake' ]]; then
+ # we need more workers for the large ops job
+ API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))}
+else
+ API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))}
+fi
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index f555de8..be8dc5e 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -245,4 +245,33 @@
passed "OK"
fi
+function test_export_proxy_variables {
+ echo "Testing export_proxy_variables()"
+
+ local expected results
+
+ http_proxy=http_proxy_test
+ https_proxy=https_proxy_test
+ no_proxy=no_proxy_test
+
+ export_proxy_variables
+ expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
+ results=$(env | egrep '(http(s)?|no)_proxy=')
+ if [[ $expected = $results ]]; then
+ passed "OK: Proxy variables are exported when proxy variables are set"
+ else
+ failed "Expected: $expected, Failed: $results"
+ fi
+
+ unset http_proxy https_proxy no_proxy
+ export_proxy_variables
+ results=$(env | egrep '(http(s)?|no)_proxy=')
+ if [[ "" = $results ]]; then
+ passed "OK: Proxy variables aren't exported when proxy variables aren't set"
+ else
+ failed "Expected: '', Failed: $results"
+ fi
+}
+test_export_proxy_variables
+
report_results
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index 3aef6f3..d9cb8d8 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -71,15 +71,23 @@
EOF
-# Test with missing arguments
+# set TEST_SUDO to test writing to root-owned files
+SUDO_ARG=""
+SUDO=""
+if [ -n "$TEST_SUDO" ]; then
+ SUDO="sudo "
+ SUDO_ARG="-sudo "
+ sudo chown -R root:root ${INI_TMP_ETC_DIR}
+fi
+# Test with missing arguments
BEFORE=$(cat ${TEST_INI})
-iniset ${TEST_INI} aaa
+iniset ${SUDO_ARG} ${TEST_INI} aaa
NO_ATTRIBUTE=$(cat ${TEST_INI})
assert_equal "$BEFORE" "$NO_ATTRIBUTE" "test missing attribute argument"
-iniset ${TEST_INI}
+iniset ${SUDO_ARG} ${TEST_INI}
NO_SECTION=$(cat ${TEST_INI})
assert_equal "$BEFORE" "$NO_SECTION" "missing section argument"
@@ -87,7 +95,7 @@
VAL=$(iniget ${TEST_INI} aaa handlers)
assert_equal "$VAL" "aa, bb" "iniget spaces in option"
-iniset ${TEST_INI} aaa handlers "11, 22"
+iniset ${SUDO_ARG} ${TEST_INI} aaa handlers "11, 22"
VAL=$(iniget ${TEST_INI} aaa handlers)
assert_equal "$VAL" "11, 22" "iniset spaces in option"
@@ -95,7 +103,7 @@
VAL=$(iniget ${TEST_INI} " ccc " spaces)
assert_equal "$VAL" "yes" "iniget with section header space"
-iniset ${TEST_INI} "b b" opt_ion 42
+iniset ${SUDO_ARG} ${TEST_INI} "b b" opt_ion 42
VAL=$(iniget ${TEST_INI} "b b" opt_ion)
assert_equal "$VAL" "42" "iniset with section header space"
@@ -103,7 +111,7 @@
VAL=$(iniget ${TEST_INI} bbb handlers)
assert_equal "$VAL" "ee,ff" "iniget at EOF"
-iniset ${TEST_INI} bbb handlers "33,44"
+iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44"
VAL=$(iniget ${TEST_INI} bbb handlers)
assert_equal "$VAL" "33,44" "inset at EOF"
@@ -122,12 +130,12 @@
fi
# test changing empty option
-iniset ${TEST_INI} ddd empty "42"
+iniset ${SUDO_ARG} ${TEST_INI} ddd empty "42"
VAL=$(iniget ${TEST_INI} ddd empty)
assert_equal "$VAL" "42" "change empty option"
# test pipe in option
-iniset ${TEST_INI} aaa handlers "a|b"
+iniset ${SUDO_ARG} ${TEST_INI} aaa handlers "a|b"
VAL=$(iniget ${TEST_INI} aaa handlers)
assert_equal "$VAL" "a|b" "pipe in option"
@@ -146,23 +154,23 @@
fi
# Test comments
-inicomment ${TEST_INI} aaa handlers
+inicomment ${SUDO_ARG} ${TEST_INI} aaa handlers
VAL=$(iniget ${TEST_INI} aaa handlers)
assert_empty VAL "test inicomment"
# Test multiple line iniset/iniget
-iniset_multiline ${TEST_INI} eee multi bar1 bar2
+iniset_multiline ${SUDO_ARG} ${TEST_INI} eee multi bar1 bar2
VAL=$(iniget_multiline ${TEST_INI} eee multi)
assert_equal "$VAL" "bar1 bar2" "iniget_multiline"
# Test iniadd with exiting values
-iniadd ${TEST_INI} eee multi bar3
+iniadd ${SUDO_ARG} ${TEST_INI} eee multi bar3
VAL=$(iniget_multiline ${TEST_INI} eee multi)
assert_equal "$VAL" "bar1 bar2 bar3" "iniadd with existing values"
# Test iniadd with non-exiting values
-iniadd ${TEST_INI} eee non-multi foobar1 foobar2
+iniadd ${SUDO_ARG} ${TEST_INI} eee non-multi foobar1 foobar2
VAL=$(iniget_multiline ${TEST_INI} eee non-multi)
assert_equal "$VAL" "foobar1 foobar2" "iniadd non-existing values"
@@ -176,7 +184,7 @@
del_no_section"
for x in $del_cases; do
- inidelete ${TEST_INI} $x a
+ inidelete ${SUDO_ARG} ${TEST_INI} $x a
VAL=$(iniget_multiline ${TEST_INI} $x a)
assert_empty VAL "inidelete $x"
if [ "$x" = "del_separate_options" -o \
@@ -191,6 +199,11 @@
fi
done
-rm -rf ${INI_TMP_DIR}
+# test file-creation
+iniset $SUDO_ARG ${INI_TMP_ETC_DIR}/test.new.ini test foo bar
+VAL=$(iniget ${INI_TMP_ETC_DIR}/test.new.ini test foo)
+assert_equal "$VAL" "bar" "iniset created file"
+
+$SUDO rm -rf ${INI_TMP_DIR}
report_results
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index d10cd0e..f31560a 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -37,11 +37,11 @@
ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
ALL_LIBS+=" oslo.serialization django_openstack_auth"
ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
-ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
+ALL_LIBS+=" oslo.utils python-swiftclient"
ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
ALL_LIBS+=" oslo.cache oslo.reports"
-ALL_LIBS+=" keystoneauth"
+ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh
index 2689589..03996ce 100755
--- a/tests/test_truefalse.sh
+++ b/tests/test_truefalse.sh
@@ -8,6 +8,14 @@
source $TOP/functions
source $TOP/tests/unittest.sh
+# common mistake is to use $FOO instead of "FOO"; in that case we
+# should die
+bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null
+assert_equal 1 $? "missing test-value"
+
+VAL=$(trueorfalse False MISSING_VARIABLE)
+assert_equal "False" $VAL "blank test-value"
+
function test_trueorfalse {
local one=1
local captrue=True
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 603652a..df7a8b4 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -20,8 +20,10 @@
# pass a test, printing out MSG
# usage: passed message
function passed {
- local lineno=$(caller 0 | awk '{print $1}')
- local function=$(caller 0 | awk '{print $2}')
+ local lineno
+ lineno=$(caller 0 | awk '{print $1}')
+ local function
+ function=$(caller 0 | awk '{print $2}')
local msg="$1"
if [ -z "$msg" ]; then
msg="OK"
@@ -33,8 +35,10 @@
# fail a test, printing out MSG
# usage: failed message
function failed {
- local lineno=$(caller 0 | awk '{print $1}')
- local function=$(caller 0 | awk '{print $2}')
+ local lineno
+ lineno=$(caller 0 | awk '{print $1}')
+ local function
+ function=$(caller 0 | awk '{print $2}')
local msg="$1"
FAILED_FUNCS+="$function:L$lineno\n"
echo "ERROR: $function:L$lineno!"
@@ -45,8 +49,10 @@
# assert string comparision of val1 equal val2, printing out msg
# usage: assert_equal val1 val2 msg
function assert_equal {
- local lineno=`caller 0 | awk '{print $1}'`
- local function=`caller 0 | awk '{print $2}'`
+ local lineno
+ lineno=`caller 0 | awk '{print $1}'`
+ local function
+ function=`caller 0 | awk '{print $2}'`
local msg=$3
if [ -z "$msg" ]; then
@@ -66,8 +72,10 @@
# assert variable is empty/blank, printing out msg
# usage: assert_empty VAR msg
function assert_empty {
- local lineno=`caller 0 | awk '{print $1}'`
- local function=`caller 0 | awk '{print $2}'`
+ local lineno
+ lineno=`caller 0 | awk '{print $1}'`
+ local function
+ function=`caller 0 | awk '{print $2}'`
local msg=$2
if [ -z "$msg" ]; then
diff --git a/tools/build_docs.sh b/tools/build_docs.sh
index fa84343..7dc492e 100755
--- a/tools/build_docs.sh
+++ b/tools/build_docs.sh
@@ -81,7 +81,7 @@
mkdir -p $FQ_HTML_BUILD/`dirname $f`;
$SHOCCO $f > $FQ_HTML_BUILD/$f.html
done
-for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do
+for f in $(find functions functions-common inc lib pkg samples -type f -name \* ! -name *.md ! -name *.conf); do
echo $f
FILES+="$f "
mkdir -p $FQ_HTML_BUILD/`dirname $f`;
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index c2dbe1a..25f713c 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -158,12 +158,12 @@
export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
-EC2_URL=$(openstack endpoint show -f value -c publicurl ec2 || true)
+EC2_URL=$(openstack endpoint list --service ec2 --interface public --os-identity-api-version=3 -c URL -f value || true)
if [[ -z $EC2_URL ]]; then
EC2_URL=http://localhost:8773/
fi
-S3_URL=$(openstack endpoint show -f value -c publicurl s3 || true)
+S3_URL=$(openstack endpoint list --service s3 --interface public --os-identity-api-version=3 -c URL -f value || true)
if [[ -z $S3_URL ]]; then
S3_URL=http://localhost:3333
fi
@@ -190,7 +190,8 @@
local user_passwd=$5
# The admin user can see all user's secret AWS keys, it does not looks good
- local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
+ local line
+ line=$(openstack ec2 credentials list --user $user_id | grep " $project_id " || true)
if [ -z "$line" ]; then
openstack ec2 credentials create --user $user_id --project $project_id 1>&2
line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
diff --git a/tools/dstat.sh b/tools/dstat.sh
new file mode 100755
index 0000000..6ba4515
--- /dev/null
+++ b/tools/dstat.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# **tools/dstat.sh** - Execute instances of DStat to log system load info
+#
+# Multiple instances of DStat are executed in order to take advantage of
+# incompatible features, particularly CSV output and the "top-cpu-adv" and
+# "top-io-adv" flags.
+#
+# Assumes:
+# - dstat command is installed
+
+# Retreive log directory as argument from calling script.
+LOGDIR=$1
+
+# Command line arguments for primary DStat process.
+DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv"
+
+# Command-line arguments for secondary background DStat process.
+DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log"
+
+# Execute and background the secondary dstat process and discard its output.
+dstat $DSTAT_CSV_OPTS >& /dev/null &
+
+# Execute and background the primary dstat process, but keep its output in this
+# TTY.
+dstat $DSTAT_OPTS &
+
+# Catch any exit signals, making sure to also terminate any child processes.
+trap "kill -- -$$" EXIT
+
+# Keep this script running as long as child dstat processes are alive.
+wait
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 4fff57f..9ae2ae7 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -108,7 +108,7 @@
sudo setenforce 0
fi
- FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
+ FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD)
if [[ $FORCE_FIREWALLD == "False" ]]; then
# On Fedora 20 firewalld interacts badly with libvirt and
# slows things down significantly (this issue was fixed in
@@ -134,6 +134,31 @@
sudo systemctl start iptables
fi
fi
+
+ if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "21" ]]; then
+ # requests ships vendored version of chardet/urllib3, but on
+ # fedora these are symlinked back to the primary versions to
+ # avoid duplication of code on disk. This is fine when
+ # maintainers keep things in sync, but since devstack takes
+ # over and installs later versions via pip we can end up with
+ # incompatible versions.
+ #
+ # The rpm package is not removed to preserve the dependent
+ # packages like cloud-init; rather we remove the symlinks and
+ # force a re-install of requests so the vendored versions it
+ # wants are present.
+ #
+ # Realted issues:
+ # https://bugs.launchpad.net/glance/+bug/1476770
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1253823
+
+ base_path=$(get_package_path requests)/packages
+ if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then
+ sudo rm -f $base_path/{chardet,urllib3}
+ # install requests with the bundled urllib3 to avoid conflicts
+ pip_install --upgrade --force-reinstall requests
+ fi
+ fi
fi
# The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 7b42c8c..13c1786 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -42,6 +42,15 @@
function install_get_pip {
+ # If get-pip.py isn't python, delete it. This was probably an
+ # outage on the server.
+ if [[ -r $LOCAL_PIP ]]; then
+ if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then
+ echo "WARNING: Corrupt $LOCAL_PIP found removing"
+ rm $LOCAL_PIP
+ fi
+ fi
+
# The OpenStack gate and others put a cached version of get-pip.py
# for this to find, explicitly to avoid download issues.
#
@@ -53,8 +62,15 @@
# since and only download if a new version is out -- but only if
# it seems we downloaded the file originally.
if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
- curl --retry 6 --retry-delay 5 \
- -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+ # only test freshness if LOCAL_PIP is actually there,
+ # otherwise we generate a scary warning.
+ local timecond=""
+ if [[ -r $LOCAL_PIP ]]; then
+ timecond="-z $LOCAL_PIP"
+ fi
+
+ curl -f --retry 6 --retry-delay 5 \
+ $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \
die $LINENO "Download of get-pip.py failed"
touch $LOCAL_PIP.downloaded
fi
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index cb93e57..2628b40 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -5,7 +5,7 @@
# Create a CA hierarchy (if necessary) and server certificate
#
# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled
-# but in the curent directory unless ``DATA_DIR`` is set
+# but in the current directory unless ``DATA_DIR`` is set
ENABLE_TLS=True
DATA_DIR=${DATA_DIR:-`pwd`/ca-data}
diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh
index 0d5728a..ecbd79a 100755
--- a/tools/peakmem_tracker.sh
+++ b/tools/peakmem_tracker.sh
@@ -41,10 +41,12 @@
# snapshot of current usage; i.e. checking the latest entry in the
# file will give the peak-memory usage
function tracker {
- local low_point=$(get_mem_available)
+ local low_point
+ low_point=$(get_mem_available)
while [ 1 ]; do
- local mem_available=$(get_mem_available)
+ local mem_available
+ mem_available=$(get_mem_available)
if [[ $mem_available -lt $low_point ]]; then
low_point=$mem_available
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 61694e9..a1adf59 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -77,7 +77,7 @@
# NOTE: these need to be specified, otherwise devstack will try
# to prompt for these passwords, blocking the install process.
- MYSQL_PASSWORD=my_super_secret
+ DATABASE_PASSWORD=my_super_secret
SERVICE_TOKEN=my_super_secret
ADMIN_PASSWORD=my_super_secret
SERVICE_PASSWORD=my_super_secret
@@ -94,11 +94,6 @@
XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
- # Download a vhd and a uec image
- IMAGE_URLS="\
- https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\
- http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz"
-
# Explicitly set virt driver
VIRT_DRIVER=xenserver
diff --git a/tools/xen/functions b/tools/xen/functions
index 4e9fede..8c674dc 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -179,7 +179,8 @@
local bridge_or_net_name
bridge_or_net_name=$1
- ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"
+ ip -4 addr show $(bridge_for "$bridge_or_net_name") |\
+ awk '/inet/{split($2, ip, "/"); print ip[1];}'
}
function xenapi_is_listening_on {
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index b49347e..e24d9ed 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -193,7 +193,10 @@
TMP_DIR=/tmp/temp.$RANDOM
mkdir -p $TMP_DIR
mount -o loop $TOOLS_ISO $TMP_DIR
- DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb)
+ # the target deb package maybe *amd64.deb or *all.deb,
+ # so use *amd64.deb by default. If it doesn't exist,
+ # then use *all.deb.
+ DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb)
cp $DEB_FILE $HTTP_SERVER_LOCATION
umount $TMP_DIR
rmdir $TMP_DIR
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 1ebbeaf..66f7ef4 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -100,7 +100,8 @@
{
local v="$1"
echo "Installing VM interface on [$BRIDGE]"
- local out_network_uuid=$(find_network "$BRIDGE")
+ local out_network_uuid
+ out_network_uuid=$(find_network "$BRIDGE")
xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
}
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
index 1ed2494..96dad7e 100755
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ b/tools/xen/scripts/uninstall-os-vpx.sh
@@ -35,9 +35,12 @@
destroy_vdi()
{
local vbd_uuid="$1"
- local type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
- local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
- local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
+ local type
+ type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
+ local dev
+ dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
+ local vdi_uuid
+ vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
xe vdi-destroy uuid=$vdi_uuid
@@ -47,7 +50,8 @@
uninstall()
{
local vm_uuid="$1"
- local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
+ local power_state
+ power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
if [ "$power_state" != "halted" ]; then
xe vm-shutdown vm=$vm_uuid force=true
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 924e773..324e6a1 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -165,7 +165,8 @@
function test_get_local_sr {
setup_xe_response "uuid123"
- local RESULT=$(. mocks && get_local_sr)
+ local RESULT
+ RESULT=$(. mocks && get_local_sr)
[ "$RESULT" == "uuid123" ]
@@ -173,7 +174,8 @@
}
function test_get_local_sr_path {
- local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
+ local RESULT
+ RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
[ "/var/run/sr-mount/uuid1" == "$RESULT" ]
}
diff --git a/tox.ini b/tox.ini
index 788fea9..9279455 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,7 +8,8 @@
install_command = pip install {opts} {packages}
[testenv:bashate]
-deps = bashate
+deps =
+ {env:BASHATE_INSTALL_PATH:bashate==0.3.2}
whitelist_externals = bash
commands = bash -c "find {toxinidir} \
-not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs
@@ -19,12 +20,13 @@
-not -name \*.md \
\( \
-name \*.sh -or \
- -name \*rc -or \
+ -name \*.orig -or \
+ -name \*rc -or \ # openrc files, etc
-name functions\* -or \
-wholename \*/inc/\* -or \ # /inc files and
-wholename \*/lib/\* \ # /lib files are shell, but
\) \ # have no extension
- -print0 | xargs -0 bashate -v"
+ -print0 | xargs -0 bashate -v -iE006 -eE005,E042"
[testenv:docs]
deps =
diff --git a/unstack.sh b/unstack.sh
index 10e5958..30447a7 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -45,7 +45,7 @@
# Configure Projects
# ==================
-# Plugin Phase 0: override_defaults - allow pluggins to override
+# Plugin Phase 0: override_defaults - allow plugins to override
# defaults before other services are run
run_phase override_defaults
@@ -65,7 +65,6 @@
source $TOP_DIR/lib/nova
source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/ceilometer
source $TOP_DIR/lib/heat
source $TOP_DIR/lib/neutron-legacy
source $TOP_DIR/lib/ldap
@@ -104,10 +103,6 @@
stop_heat
fi
-if is_service_enabled ceilometer; then
- stop_ceilometer
-fi
-
if is_service_enabled nova; then
stop_nova
fi