Merge "Prevent setting tempest fixed_network_name for cells"
diff --git a/.gitignore b/.gitignore
index c6900c8..2778a65 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,9 +12,11 @@
doc/build
files/*.gz
files/*.qcow2
+files/*.img
files/images
files/pip-*
files/get-pip.py*
+files/ir-deploy*
local.conf
local.sh
localrc
diff --git a/README.md b/README.md
index 53de970..455e1c6 100644
--- a/README.md
+++ b/README.md
@@ -149,6 +149,10 @@
KEYSTONE_USE_MOD_WSGI="True"
+Example (Nova):
+
+ NOVA_USE_MOD_WSGI="True"
+
Example (Swift):
SWIFT_USE_MOD_WSGI="True"
@@ -264,10 +268,10 @@
# Heat
-Heat is enabled by default (see `stackrc` file). To disable it explicitly
+Heat is disabled by default (see `stackrc` file). To enable it explicitly
you'll need the following settings in your `localrc` section:
- disable_service heat h-api h-api-cfn h-api-cw h-eng
+ enable_service heat h-api h-api-cfn h-api-cw h-eng
Heat can also run in standalone mode, and be configured to orchestrate
on an external OpenStack cloud. To launch only Heat in standalone mode
@@ -328,12 +332,12 @@
You likely want to change your `localrc` section to run a scheduler that
will balance VMs across hosts:
- SCHEDULER=nova.scheduler.simple.SimpleScheduler
+ SCHEDULER=nova.scheduler.filter_scheduler.FilterScheduler
You can then run many compute nodes, each of which should have a `stackrc`
which includes the following, with the IP address of the above controller node:
- ENABLED_SERVICES=n-cpu,rabbit,g-api,neutron,q-agt
+ ENABLED_SERVICES=n-cpu,rabbit,neutron,q-agt
SERVICE_HOST=[IP of controller node]
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
diff --git a/clean.sh b/clean.sh
index 035489c..7db519b 100755
--- a/clean.sh
+++ b/clean.sh
@@ -76,6 +76,7 @@
# ==========
# Phase: clean
+load_plugin_settings
run_phase clean
if [[ -d $TOP_DIR/extras.d ]]; then
@@ -114,9 +115,16 @@
cleanup_rpc_backend
cleanup_database
-# Clean out data, logs and status
-LOGDIR=$(dirname "$LOGFILE")
-sudo rm -rf $DATA_DIR $LOGDIR $DEST/status
+# Clean out data and status
+sudo rm -rf $DATA_DIR $DEST/status
+
+# Clean out the log file and log directories
+if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then
+ sudo rm -f $LOGFILE
+fi
+if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then
+ sudo rm -rf $LOGDIR
+fi
if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then
sudo rm -rf $SCREEN_LOGDIR
fi
diff --git a/doc/source/eucarc.rst b/doc/source/eucarc.rst
index 1284b88..c2ecbc6 100644
--- a/doc/source/eucarc.rst
+++ b/doc/source/eucarc.rst
@@ -13,7 +13,7 @@
::
- EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
+ EC2_URL=$(openstack catalog show ec2 | awk '/ publicURL: / { print $4 }')
S3\_URL
Set the S3 endpoint for euca2ools. The endpoint is extracted from
@@ -21,14 +21,14 @@
::
- export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }')
+ export S3_URL=$(openstack catalog show s3 | awk '/ publicURL: / { print $4 }')
EC2\_ACCESS\_KEY, EC2\_SECRET\_KEY
Create EC2 credentials for the current tenant:user in Keystone.
::
- CREDS=$(keystone ec2-credentials-create)
+ CREDS=$(openstack ec2 credentials create)
export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 610300b..b35492e 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -54,7 +54,7 @@
Configure Nested KVM for AMD-based Machines
---------------------------------------------
+-------------------------------------------
Procedure to enable nested KVM virtualization on AMD-based machines.
@@ -121,7 +121,7 @@
back to `virt_type=qemu`, i.e. plain QEMU emulation.
Optionally, to explicitly set the type of virtualization, to KVM, by the
-libvirt driver in Nova, the below config attribute can be used in
+libvirt driver in nova, the below config attribute can be used in
DevStack's ``local.conf``:
::
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index ff81c93..b2617c9 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -229,10 +229,10 @@
----------------
DevStack creates two OpenStack users (``admin`` and ``demo``) and two
-tenants (also ``admin`` and ``demo``). ``admin`` is exactly what it
+projects (also ``admin`` and ``demo``). ``admin`` is exactly what it
sounds like, a privileged administrative account that is a member of
-both the ``admin`` and ``demo`` tenants. ``demo`` is a normal user
-account that is only a member of the ``demo`` tenant. Creating
+both the ``admin`` and ``demo`` projects. ``demo`` is a normal user
+account that is only a member of the ``demo`` project. Creating
additional OpenStack users can be done through the dashboard, sometimes
it is easier to do them in bulk from a script, especially since they get
blown away every time ``stack.sh`` runs. The following steps are ripe
@@ -243,36 +243,36 @@
# Get admin creds
. openrc admin admin
- # List existing tenants
- keystone tenant-list
+ # List existing projects
+ openstack project list
# List existing users
- keystone user-list
+ openstack user list
- # Add a user and tenant
+ # Add a user and project
NAME=bob
PASSWORD=BigSecrete
- TENANT=$NAME
- keystone tenant-create --name=$NAME
- keystone user-create --name=$NAME --pass=$PASSWORD
- keystone user-role-add --user-id=<bob-user-id> --tenant-id=<bob-tenant-id> --role-id=<member-role-id>
- # member-role-id comes from the existing member role created by stack.sh
- # keystone role-list
+ PROJECT=$NAME
+ openstack project create $PROJECT
+ openstack user create $NAME --password=$PASSWORD --project $PROJECT
+ openstack role add Member --user $NAME --project $PROJECT
+ # The Member role is created by stack.sh
+ # openstack role list
Swift
-----
-Swift requires a significant amount of resources and is disabled by
-default in DevStack. The support in DevStack is geared toward a minimal
-installation but can be used for testing. To implement a true multi-node
-test of Swift required more than DevStack provides. Enabling it is as
+Swift, OpenStack Object Storage, requires a significant amount of resources
+and is disabled by default in DevStack. The support in DevStack is geared
+toward a minimal installation but can be used for testing. To implement a
+true multi-node test of swift, additional steps will be required. Enabling it is as
simple as enabling the ``swift`` service in ``local.conf``:
::
enable_service s-proxy s-object s-container s-account
-Swift will put its data files in ``SWIFT_DATA_DIR`` (default
+Swift, OpenStack Object Storage, will put its data files in ``SWIFT_DATA_DIR`` (default
``/opt/stack/data/swift``). The size of the data 'partition' created
(really a loop-mounted file) is set by ``SWIFT_LOOPBACK_DISK_SIZE``. The
Swift config files are located in ``SWIFT_CONF_DIR`` (default
@@ -334,14 +334,14 @@
set in ``localrc`` it may be necessary to remove the corresponding
directory from ``/opt/stack`` to force git to re-clone the repository.
-For example, to pull Nova from a proposed release candidate in the
-primary Nova repository:
+For example, to pull nova, OpenStack Compute, from a proposed release candidate
+in the primary nova repository:
::
NOVA_BRANCH=rc-proposed
-To pull Glance from an experimental fork:
+To pull glance, OpenStack Image service, from an experimental fork:
::
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 95cde96..3030c7b 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -1,14 +1,14 @@
======================================
-Using DevStack with Neutron Networking
+Using DevStack with neutron Networking
======================================
-This guide will walk you through using OpenStack Neutron with the ML2
+This guide will walk you through using OpenStack neutron with the ML2
plugin and the Open vSwitch mechanism driver.
Network Interface Configuration
===============================
-To use Neutron, it is suggested that two network interfaces be present
+To use neutron, it is suggested that two network interfaces be present
in the host operating system.
The first interface, eth0 is used for the OpenStack management (API,
@@ -62,7 +62,7 @@
Disabling Next Generation Firewall Tools
========================================
-Devstack does not properly operate with modern firewall tools. Specifically
+DevStack does not properly operate with modern firewall tools. Specifically
it will appear as if the guest VM can access the external network via ICMP,
but UDP and TCP packets will not be delivered to the guest VM. The root cause
of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's
@@ -96,13 +96,13 @@
Neutron Networking with Open vSwitch
====================================
-Configuring Neutron networking in DevStack is very similar to
+Configuring neutron, OpenStack Networking in DevStack is very similar to
configuring `nova-network` - many of the same configuration variables
(like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are
-used by Neutron, which is intentional.
+used by neutron, which is intentional.
The only difference is the disabling of `nova-network` in your
-local.conf, and the enabling of the Neutron components.
+local.conf, and the enabling of the neutron components.
Configuration
@@ -134,16 +134,16 @@
Neutron Networking with Open vSwitch and Provider Networks
==========================================================
-In some instances, it is desirable to use Neutron's provider
+In some instances, it is desirable to use neutron's provider
networking extension, so that networks that are configured on an
-external router can be utilized by Neutron, and instances created via
+external router can be utilized by neutron, and instances created via
Nova can attach to the network managed by the external router.
For example, in some lab environments, a hardware router has been
pre-configured by another party, and an OpenStack developer has been
given a VLAN tag and IP address range, so that instances created via
DevStack will use the external router for L3 connectivity, as opposed
-to the Neutron L3 service.
+to the neutron L3 service.
Service Configuration
@@ -152,8 +152,8 @@
**Control Node**
In this example, the control node will run the majority of the
-OpenStack API and management services (Keystone, Glance,
-Nova, Neutron, etc..)
+OpenStack API and management services (keystone, glance,
+nova, neutron)
**Compute Nodes**
@@ -226,4 +226,4 @@
For example, with the above configuration, a bridge is
created, named `br-ex` which is managed by Open vSwitch, and the
second interface on the compute node, `eth1` is attached to the
-bridge, to forward traffic sent by guest vms.
+bridge, to forward traffic sent by guest VMs.
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 0d98f4a..a91e0d1 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -1,15 +1,15 @@
=================
-Nova and devstack
+Nova and DevStack
=================
This is a rough guide to various configuration parameters for nova
-running with devstack.
+running with DevStack.
nova-serialproxy
================
-In Juno nova implemented a `spec
+In Juno, nova implemented a `spec
<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
to allow read/write access to the serial console of an instance via
`nova-serialproxy
@@ -60,7 +60,7 @@
#proxyclient_address=127.0.0.1
-Enabling the service is enough to be functional for a single machine devstack.
+Enabling the service is enough to be functional for a single machine DevStack.
These config options are defined in `nova.console.serial
<https://github.com/openstack/nova/blob/master/nova/console/serial.py#L33-L52>`_
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index ab46d91..c2ce1a3 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -3,10 +3,10 @@
====================
Use the cloud to build the cloud! Use your cloud to launch new versions
-of OpenStack in about 5 minutes. When you break it, start over! The VMs
+of OpenStack in about 5 minutes. If you break it, start over! The VMs
launched in the cloud will be slow as they are running in QEMU
(emulation), but their primary use is testing OpenStack development and
-operation. Speed not required.
+operation.
Prerequisites Cloud & Image
===========================
@@ -15,7 +15,7 @@
---------------
DevStack should run in any virtual machine running a supported Linux
-release. It will perform best with 4Gb or more of RAM.
+release. It will perform best with 4GB or more of RAM.
OpenStack Deployment & cloud-init
---------------------------------
@@ -88,7 +88,7 @@
---------------
At this point you should be able to access the dashboard. Launch VMs and
-if you give them floating IPs access those VMs from other machines on
+if you give them floating IPs, access those VMs from other machines on
your network.
One interesting use case is for developers working on a VM on their
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 23ccf27..d245035 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -7,7 +7,7 @@
well beyond what was originally intended and the majority of
configuration combinations are rarely, if ever, tested. DevStack is not
a general OpenStack installer and was never meant to be everything to
-everyone..
+everyone.
Below is a list of what is specifically is supported (read that as
"tested") going forward.
@@ -58,7 +58,7 @@
OpenStack Network
-----------------
-*Default to Nova Network, optionally use Neutron*
+*Defaults to nova network, optionally use neutron*
- Nova Network: FlatDHCP
- Neutron: A basic configuration approximating the original FlatDHCP
@@ -67,10 +67,10 @@
Services
--------
-The default services configured by DevStack are Identity (Keystone),
-Object Storage (Swift), Image Storage (Glance), Block Storage (Cinder),
-Compute (Nova), Network (Nova), Dashboard (Horizon), Orchestration
-(Heat)
+The default services configured by DevStack are Identity (keystone),
+Object Storage (swift), Image Service (glance), Block Storage (cinder),
+Compute (nova), Networking (nova), Dashboard (horizon), Orchestration
+(heat)
Additional services not included directly in DevStack can be tied in to
``stack.sh`` using the :doc:`plugin mechanism <plugins>` to call
diff --git a/eucarc b/eucarc
index 343f4cc..1e672bd 100644
--- a/eucarc
+++ b/eucarc
@@ -19,7 +19,7 @@
source $RC_DIR/openrc
# Set the ec2 url so euca2ools works
-export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
+export EC2_URL=$(openstack catalog show ec2 | awk '/ publicURL: / { print $4 }')
# Create EC2 credentials for the current user
CREDS=$(openstack ec2 credentials create)
@@ -29,7 +29,7 @@
# Euca2ools Certificate stuff for uploading bundles
# See exercises/bundle.sh to see how to get certs using nova cli
NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR}
-export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }')
+export S3_URL=$(openstack catalog show s3 | awk '/ publicURL: / { print $4 }')
export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem
export EC2_CERT=${NOVA_KEY_DIR}/cert.pem
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index aa34830..d520b9b 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -182,7 +182,7 @@
die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
-ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
+ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
# Clean up
# --------
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 2f85d98..c33ef44 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -69,7 +69,7 @@
STATUS_KEYSTONE="Skipped"
else
echo -e "\nTest Keystone"
- if keystone $TENANT_ARG $ARGS catalog --service identity; then
+ if openstack $TENANT_ARG $ARGS catalog show identity; then
STATUS_KEYSTONE="Succeeded"
else
STATUS_KEYSTONE="Failed"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index df5e233..c2957e2 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -142,7 +142,7 @@
die $LINENO "Failure authorizing rule in $SECGROUP"
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
- ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
+ ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME"
# Revoke pinging
euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 59444e1..4b72a00 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -139,7 +139,7 @@
die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
-ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
+ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
# Floating IPs
# ------------
@@ -158,7 +158,7 @@
die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
-ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
+ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME"
if ! is_service_enabled neutron; then
# Allocate an IP from second floating pool
@@ -182,7 +182,7 @@
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
# Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
- ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail
+ ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail
fi
# Clean up
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 9230587..04892b0 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -281,7 +281,7 @@
local VM_NAME=$1
local NET_NAME=$2
IP=$(get_instance_ip $VM_NAME $NET_NAME)
- ping_check $NET_NAME $IP $BOOT_TIMEOUT
+ ping_check $IP $BOOT_TIMEOUT $NET_NAME
}
function check_vm {
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 3ac2016..f95c81f 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -143,7 +143,7 @@
die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
-ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
+ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME"
# Volumes
# -------
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1d20af7..0b914e2 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -1,5 +1,6 @@
Listen %PUBLICPORT%
Listen %ADMINPORT%
+LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
<VirtualHost *:%PUBLICPORT%>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
@@ -11,7 +12,7 @@
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+ CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
%SSLENGINE%
%SSLCERTFILE%
%SSLKEYFILE%
@@ -27,7 +28,7 @@
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/%APACHE_NAME%/keystone.log
- CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+ CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined
%SSLENGINE%
%SSLCERTFILE%
%SSLKEYFILE%
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
new file mode 100644
index 0000000..70ccedd
--- /dev/null
+++ b/files/apache-nova-api.template
@@ -0,0 +1,16 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess nova-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup nova-api
+ WSGIScriptAlias / %PUBLICWSGI%
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/nova-api.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+</VirtualHost>
\ No newline at end of file
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
new file mode 100644
index 0000000..ae4cf94
--- /dev/null
+++ b/files/apache-nova-ec2-api.template
@@ -0,0 +1,16 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+ WSGIDaemonProcess nova-ec2-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+ WSGIProcessGroup nova-ec2-api
+ WSGIScriptAlias / %PUBLICWSGI%
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
+</VirtualHost>
\ No newline at end of file
diff --git a/files/debs/general b/files/debs/general
index c27b77d..1460526 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -17,6 +17,7 @@
tar
python-dev
python2.7
+python-gdbm # needed for testr
bc
libyaml-dev
libffi-dev
diff --git a/files/debs/swift b/files/debs/swift
index 0089d27..726786e 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -1,8 +1,5 @@
curl
make
memcached
-# NOTE python-nose only exists because of swift functional job, we should probably
-# figure out a more consistent way of installing this from test-requirements.txt instead
-python-nose
sqlite3
xfsprogs
diff --git a/files/debs/tempest b/files/debs/tempest
index f244e4e..bb09529 100644
--- a/files/debs/tempest
+++ b/files/debs/tempest
@@ -1 +1,2 @@
-libxslt1-dev
\ No newline at end of file
+libxml2-dev
+libxslt1-dev
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 2219426..42756d8 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -15,7 +15,6 @@
openssl
psmisc
python-cmd2 # dist:opensuse-12.3
-python-pylint
screen
tar
tcpdump
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index d1f378a..c45eae6 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -12,7 +12,5 @@
python-dateutil
python-eventlet
python-mox
-python-nose
-python-pylint
python-sqlalchemy-migrate
python-xattr
diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift
index 4b14098..9c0d188 100644
--- a/files/rpms-suse/swift
+++ b/files/rpms-suse/swift
@@ -8,7 +8,6 @@
python-eventlet
python-greenlet
python-netifaces
-python-nose
python-simplejson
python-xattr
sqlite3
diff --git a/files/rpms/general b/files/rpms/general
index e17d6d6..7b2c00a 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -14,7 +14,6 @@
libxslt-devel
pkgconfig
psmisc
-pylint
python-devel
screen
tar
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 8d7f037..b2cf0de 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,6 +1,5 @@
Django
httpd # NOPRIME
mod_wsgi # NOPRIME
-pylint
pyxattr
pcre-devel # pyScss
diff --git a/functions b/functions
index 4dc20e7..1668e16 100644
--- a/functions
+++ b/functions
@@ -287,6 +287,10 @@
img_property="--property hw_cdrom_bus=scsi"
fi
+ if is_arch "aarch64"; then
+ img_property="--property hw_machine_type=virt --property hw_cdrom_bus=virtio --property os_command_line='console=ttyAMA0'"
+ fi
+
if [ "$container_format" = "bare" ]; then
if [ "$unpack" = "zcat" ]; then
openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
@@ -340,40 +344,43 @@
# ping check
-# Uses globals ``ENABLED_SERVICES``
-# ping_check from-net ip boot-timeout expected
+# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK``
+# ping_check <ip> [boot-timeout] [from_net] [expected]
function ping_check {
- if is_service_enabled neutron; then
- _ping_check_neutron "$1" $2 $3 $4
- return
- fi
- _ping_check_novanet "$1" $2 $3 $4
-}
+ local ip=$1
+ local timeout=${2:-30}
+ local from_net=${3:-""}
+ local expected=${4:-True}
+ local op="!"
+ local failmsg="[Fail] Couldn't ping server"
+ local ping_cmd="ping"
-# ping check for nova
-# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK``
-function _ping_check_novanet {
- local from_net=$1
- local ip=$2
- local boot_timeout=$3
- local expected=${4:-"True"}
- local check_command=""
- MULTI_HOST=$(trueorfalse False MULTI_HOST)
- if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
- return
- fi
- if [[ "$expected" = "True" ]]; then
- check_command="while ! ping -c1 -w1 $ip; do sleep 1; done"
- else
- check_command="while ping -c1 -w1 $ip; do sleep 1; done"
- fi
- if ! timeout $boot_timeout sh -c "$check_command"; then
- if [[ "$expected" = "True" ]]; then
- die $LINENO "[Fail] Couldn't ping server"
- else
- die $LINENO "[Fail] Could ping server"
+ # if we don't specify a from_net we're expecting things to work
+ # fine from our local box.
+ if [[ -n "$from_net" ]]; then
+ if is_service_enabled neutron; then
+ ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net"
+ elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
+ # there is no way to address the multihost / private case, bail here for compatibility.
+ # TODO: remove this cruft and redo code to handle this at the caller level.
+ return
fi
fi
+
+ # inverse the logic if we're testing no connectivity
+ if [[ "$expected" != "True" ]]; then
+ op=""
+ failmsg="[Fail] Could ping server"
+ fi
+
+ # Because we've transformed this command so many times, print it
+ # out at the end.
+ local check_command="while $op $ping_cmd -c1 -w1 $ip; do sleep 1; done"
+ echo "Checking connectivity with $check_command"
+
+ if ! timeout $timeout sh -c "$check_command"; then
+ die $LINENO $failmsg
+ fi
}
# Get ip of instance
diff --git a/functions-common b/functions-common
index f8543c1..52d80fb 100644
--- a/functions-common
+++ b/functions-common
@@ -51,14 +51,16 @@
function trueorfalse {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
- local default=$1
- local literal=$2
- local testval=${!literal:-}
- [[ -z "$testval" ]] && { echo "$default"; return; }
- [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
- [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
- echo "$default"
+ local default=$1
+ local testval=${!2:-}
+
+ case "$testval" in
+ "1" | [yY]es | "YES" | [tT]rue | "TRUE" ) echo "True" ;;
+ "0" | [nN]o | "NO" | [fF]alse | "FALSE" ) echo "False" ;;
+ * ) echo "$default" ;;
+ esac
+
$xtrace
}
@@ -174,10 +176,7 @@
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
- echo $msg 1>&2;
- if [[ -n ${LOGDIR} ]]; then
- echo $msg >> "${LOGDIR}/error.log"
- fi
+ echo $msg
$xtrace
return $exitcode
}
@@ -509,7 +508,7 @@
fi
count=$(($count + 1))
- warn "timeout ${count} for git call: [git $@]"
+ warn $LINENO "timeout ${count} for git call: [git $@]"
if [ $count -eq 3 ]; then
die $LINENO "Maximum of 3 git retries reached"
fi
@@ -1140,6 +1139,10 @@
# the command.
# _run_process service "command-line" [group]
function _run_process {
+ # disable tracing through the exec redirects, it's just confusing in the logs.
+ xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+
local service=$1
local command="$2"
local group=$3
@@ -1163,6 +1166,9 @@
export PYTHONUNBUFFERED=1
fi
+ # reenable xtrace before we do *real* work
+ $xtrace
+
# Run under ``setsid`` to force the process to become a session and group leader.
# The pid saved can be used with pkill -g to get the entire process group.
if [[ -n "$group" ]]; then
@@ -1235,9 +1241,6 @@
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
- # Append the process to the screen rc file
- screen_rc "$name" "$command"
-
screen -S $SCREEN_NAME -X screen -t $name
local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}"
@@ -1277,6 +1280,10 @@
if [[ -n "$group" ]]; then
command="sg $group '$command'"
fi
+
+ # Append the process to the screen rc file
+ screen_rc "$name" "$command"
+
screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL"
}
@@ -1620,14 +1627,38 @@
# Uses global ``ENABLED_SERVICES``
# disable_negated_services
function disable_negated_services {
- local tmpsvcs="${ENABLED_SERVICES}"
+ local to_remove=""
+ local remaining=""
+ local enabled=""
local service
- for service in ${tmpsvcs//,/ }; do
+
+ # build up list of services that should be removed; i.e. they
+ # begin with "-"
+ for service in ${ENABLED_SERVICES//,/ }; do
if [[ ${service} == -* ]]; then
- tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+ to_remove+=",${service#-}"
+ else
+ remaining+=",${service}"
fi
done
- ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+
+ # go through the service list. if this service appears in the "to
+ # be removed" list, drop it
+ for service in ${remaining//,/ }; do
+ local remove
+ local add=1
+ for remove in ${to_remove//,/ }; do
+ if [[ ${remove} == ${service} ]]; then
+ add=0
+ break
+ fi
+ done
+ if [[ $add == 1 ]]; then
+ enabled="${enabled},$service"
+ fi
+ done
+
+ ENABLED_SERVICES=$(_cleanup_service_list "$enabled")
}
# disable_service() removes the services passed as argument to the
@@ -1868,6 +1899,12 @@
echo $subnet
}
+# Return the current python as "python<major>.<minor>"
+function python_version {
+ local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+ echo "python${python_version}"
+}
+
# Service wrapper to restart services
# restart_service service-name
function restart_service {
diff --git a/inc/meta-config b/inc/meta-config
index c8789bf..e5f902d 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -86,6 +86,14 @@
local matchgroup=$2
local configfile=$3
+ # note, configfile might be a variable (note the iniset, etc
+ # created in the mega-awk below is "eval"ed too, so we just leave
+ # it alone.
+ local real_configfile=$(eval echo $configfile)
+ if [ ! -f $real_configfile ]; then
+ touch $real_configfile
+ fi
+
get_meta_section $file $matchgroup $configfile | \
$CONFIG_AWK_CMD -v configfile=$configfile '
BEGIN {
diff --git a/lib/ceilometer b/lib/ceilometer
index dba92ba..9abdbfe 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -13,6 +13,26 @@
#
# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
#
+# To enable Ceilometer to collect the IPMI based meters, further add to the
+# localrc section of local.conf:
+#
+# enable_service ceilometer-aipmi
+#
+# NOTE: Currently, there are two ways to get the IPMI based meters in
+# OpenStack. One way is to configure Ironic conductor to report those meters
+# for the nodes managed by Ironic and to have Ceilometer notification
+# agent to collect them. Ironic by default does NOT enable that reporting
+# functionality. So in order to do so, users need to set the option of
+# conductor.send_sensor_data to true in the ironic.conf configuration file
+# for the Ironic conductor service, and also enable the
+# ceilometer-anotification service.
+#
+# The other way is to use Ceilometer ipmi agent only to get the IPMI based
+# meters. To avoid duplicated meters, users need to make sure to set the
+# option of conductor.send_sensor_data to false in the ironic.conf
+# configuration file if the node on which Ceilometer ipmi agent is running
+# is also managed by Ironic.
+#
# Several variables set in the localrc section adjust common behaviors
# of Ceilometer (see within for additional settings):
#
@@ -175,7 +195,7 @@
# Install the policy file for the API server
cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
- iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json
+ iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json
cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR
@@ -231,6 +251,11 @@
iniset $CEILOMETER_CONF api pecan_debug "False"
_config_ceilometer_apache_wsgi
fi
+
+ if is_service_enabled ceilometer-aipmi; then
+ # Configure rootwrap for the ipmi agent
+ configure_rootwrap ceilometer $CEILOMETER_BIN_DIR/ceilometer-rootwrap $CEILOMETER_DIR/etc/ceilometer
+ fi
}
function configure_mongodb {
@@ -327,6 +352,7 @@
run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
+ run_process ceilometer-aipmi "ceilometer-agent-ipmi --config-file $CEILOMETER_CONF"
if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
@@ -366,7 +392,7 @@
restart_apache_server
fi
# Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
+ for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
stop_process $serv
done
}
diff --git a/lib/ceph b/lib/ceph
index 76747cc..4068e26 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -279,7 +279,7 @@
# configure Nova service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
diff --git a/lib/cinder b/lib/cinder
index de41bc5..7ad7ef9 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -77,9 +77,20 @@
# Should cinder perform secure deletion of volumes?
-# Defaults to true, can be set to False to avoid this bug when testing:
+# Defaults to zero. Can also be set to none or shred.
+# This was previously CINDER_SECURE_DELETE (True or False).
+# Equivalents using CINDER_VOLUME_CLEAR are zero and none, respectively.
+# Set to none to avoid this bug when testing:
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
-CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE)
+if [[ -n $CINDER_SECURE_DELETE ]]; then
+ CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE)
+ if [[ $CINDER_SECURE_DELETE == "False" ]]; then
+ CINDER_VOLUME_CLEAR_DEFAULT="none"
+ fi
+ DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n"
+fi
+CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
+CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
# Cinder reports allocations back to the scheduler on periodic intervals
# it turns out we can get an "out of space" issue when we run tests too
@@ -256,14 +267,16 @@
iniset_rpc_backend cinder $CINDER_CONF
- if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
- iniset $CINDER_CONF DEFAULT secure_delete False
- iniset $CINDER_CONF DEFAULT volume_clear none
+ if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then
+ iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
fi
# Format logging
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id"
+ else
+ # Set req-id, project-name and resource in log format
+ iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s"
fi
if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
@@ -286,6 +299,11 @@
iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY"
fi
+ # Set os_privileged_user credentials (used for os-assisted-snapshots)
+ iniset $CINDER_CONF DEFAULT os_privileged_user_name nova
+ iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
+ iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_TENANT_NAME"
+
}
# create_cinder_accounts() - Set up common required cinder accounts
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index d369c0c..35ad209 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -39,6 +39,7 @@
# Campsite rule: leave behind a volume group at least as clean as we found it
clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name
+ clean_lvm_filter
}
# configure_cinder_backend_lvm - Set config files, create data dirs, etc
diff --git a/lib/dstat b/lib/dstat
index c8faa65..f11bfa5 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -16,34 +16,22 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
-
-# Defaults
-# --------
-# for DSTAT logging
-DSTAT_FILE=${DSTAT_FILE:-"dstat.log"}
-
-
# start_dstat() - Start running processes, including screen
function start_dstat {
# A better kind of sysstat, with the top process per time slice
DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv"
- if [[ -n ${LOGDIR} ]]; then
- screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $LOGDIR/$DSTAT_FILE"
- if [[ -n ${SCREEN_LOGDIR} && ${SCREEN_LOGDIR} != ${LOGDIR} ]]; then
- # Drop the backward-compat symlink
- ln -sf $LOGDIR/$DSTAT_FILE ${SCREEN_LOGDIR}/$DSTAT_FILE
- fi
- else
- screen_it dstat "dstat $DSTAT_OPTS"
- fi
+ run_process dstat "dstat $DSTAT_OPTS"
+
+ # To enable peakmem_tracker add:
+ # enable_service peakmem_tracker
+ # to your localrc
+ run_process peakmem_tracker "$TOP_DIR/tools/peakmem_tracker.sh"
}
# stop_dstat() stop dstat process
function stop_dstat {
- # dstat runs as a console, not as a service, and isn't trackable
- # via the normal mechanisms for DevStack. So lets just do a
- # killall and move on.
- killall dstat || /bin/true
+ stop_process dstat
+ stop_process peakmem_tracker
}
# Restore xtrace
diff --git a/lib/glance b/lib/glance
index 578c88a..4e1bd24 100644
--- a/lib/glance
+++ b/lib/glance
@@ -49,8 +49,10 @@
GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
+GLANCE_SEARCH_CONF=$GLANCE_CONF_DIR/glance-search.conf
GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
+GLANCE_SEARCH_PASTE_INI=$GLANCE_CONF_DIR/glance-search-paste.ini
GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
@@ -67,6 +69,9 @@
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
+GLANCE_SEARCH_PORT=${GLANCE_SEARCH_PORT:-9393}
+GLANCE_SEARCH_PORT_INT=${GLANCE_SEARCH_PORT_INT:-19393}
+GLANCE_SEARCH_HOSTPORT=${GLANCE_SEARCH_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SEARCH_PORT}
# Tell Tempest this project is present
TEMPEST_SERVICES+=,glance
@@ -87,6 +92,10 @@
# kill instances (nova)
# delete image files (glance)
sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
+
+ if is_service_enabled g-search; then
+ ${TOP_DIR}/pkg/elasticsearch.sh stop
+ fi
}
# configure_glance() - Set config files, create data dirs, etc
@@ -100,6 +109,7 @@
local dburl=`database_connection_url glance`
iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
+ iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
@@ -128,26 +138,12 @@
fi
# Store specific configs
- iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
-
- # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
- # sections.
iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
# Store the images in swift if enabled.
if is_service_enabled s-proxy; then
- iniset $GLANCE_API_CONF DEFAULT default_store swift
- iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
- iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift
- iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
- iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
-
- iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
-
- # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
- # sections.
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift
@@ -201,9 +197,6 @@
iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
# Store specific confs
- # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
- # sections.
- iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
@@ -218,14 +211,38 @@
iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
fi
+
+ # Configure search
+ if is_service_enabled g-search; then
+ cp $GLANCE_DIR/etc/glance-search.conf $GLANCE_SEARCH_CONF
+ iniset $GLANCE_SEARCH_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ inicomment $GLANCE_SEARCH_CONF DEFAULT log_file
+ iniset $GLANCE_SEARCH_CONF DEFAULT use_syslog $SYSLOG
+ iniset $GLANCE_SEARCH_CONF DEFAULT sql_connection $dburl
+ iniset $GLANCE_SEARCH_CONF paste_deploy flavor keystone
+ configure_auth_token_middleware $GLANCE_SEARCH_CONF glance $GLANCE_AUTH_CACHE_DIR/search
+
+ if is_service_enabled tls-proxy; then
+ iniset $GLANCE_SEARCH_CONF DEFAULT bind_port $GLANCE_SEARCH_PORT_INT
+ fi
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service glance; then
+ ensure_certificates GLANCE
+ iniset $GLANCE_SEARCH_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
+ iniset $GLANCE_SEARCH_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
+ fi
+
+ cp $GLANCE_DIR/etc/glance-search-paste.ini $GLANCE_SEARCH_PASTE_INI
+ fi
}
# create_glance_accounts() - Set up common required glance accounts
-# Project User Roles
-# ------------------------------------------------------------------
-# SERVICE_TENANT_NAME glance service
-# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled)
+# Project User Roles
+# ---------------------------------------------------------------------
+# SERVICE_TENANT_NAME glance service
+# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled)
+# SERVICE_TENANT_NAME glance-search search (if Search is enabled)
function create_glance_accounts {
if is_service_enabled g-api; then
@@ -251,13 +268,27 @@
"$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
fi
fi
+
+ # Add glance-search service and endpoints
+ if is_service_enabled g-search; then
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ local glance_search_service=$(get_or_create_service "glance-search" \
+ "search" "EXPERIMENTAL - Glance Graffiti Search Service")
+
+ get_or_create_endpoint $glance_search_service \
+ "$REGION_NAME" \
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT"
+ fi
+ fi
}
# create_glance_cache_dir() - Part of the init_glance() process
function create_glance_cache_dir {
# Create cache dir
- sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry
- rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/*
+ sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search
+ rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/*
}
# init_glance() - Initialize databases, etc.
@@ -280,6 +311,12 @@
$GLANCE_BIN_DIR/glance-manage db_load_metadefs
create_glance_cache_dir
+
+ # Init glance search by exporting found metadefs/images to elasticsearch
+ if is_service_enabled g-search; then
+ ${TOP_DIR}/pkg/elasticsearch.sh start
+ $GLANCE_BIN_DIR/glance-index
+ fi
}
# install_glanceclient() - Collect source and prepare
@@ -301,11 +338,13 @@
fi
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
- setup_develop $GLANCE_DIR
- if is_service_enabled g-graffiti; then
+
+ if is_service_enabled g-search; then
${TOP_DIR}/pkg/elasticsearch.sh download
${TOP_DIR}/pkg/elasticsearch.sh install
fi
+
+ setup_develop $GLANCE_DIR
}
# start_glance() - Start running processes, including screen
@@ -314,18 +353,29 @@
if is_service_enabled tls-proxy; then
start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT &
start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT &
+
+ # Handle g-search
+ if is_service_enabled g-search; then
+ start_tls_proxy '*' $GLANCE_SEARCH_PORT $GLANCE_SERVICE_HOST $GLANCE_SEARCH_PORT_INT &
+ fi
fi
run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
- if is_service_enabled g-graffiti; then
- ${TOP_DIR}/pkg/elasticsearch.sh start
- fi
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then
die $LINENO "g-api did not start"
fi
+
+ # Start g-search after g-reg/g-api
+ if is_service_enabled g-search; then
+ run_process g-search "$GLANCE_BIN_DIR/glance-search --config-file=$GLANCE_CONF_DIR/glance-search.conf"
+ echo "Waiting for g-search ($GLANCE_SEARCH_HOSTPORT) to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT; then
+ die $LINENO "g-search did not start"
+ fi
+ fi
}
# stop_glance() - Stop running processes
@@ -333,6 +383,10 @@
# Kill the Glance screen windows
stop_process g-api
stop_process g-reg
+
+ if is_service_enabled g-search; then
+ stop_process g-search
+ fi
}
# Restore xtrace
diff --git a/lib/ironic b/lib/ironic
index 7afed05..4a37f0a 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -296,7 +296,7 @@
# API specific configuration.
function configure_ironic_api {
iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
- iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON
+ iniset $IRONIC_CONF_FILE oslo_policy policy_file $IRONIC_POLICY_JSON
# TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632).
# These codes need to be changed to use the function of configure_auth_token_middleware
@@ -765,7 +765,7 @@
fi
fi
- local token=$(keystone token-get | grep ' id ' | get_field 2)
+ local token=$(openstack token issue -c id -f value)
die_if_not_set $LINENO token "Keystone fail to get token"
# load them into glance
diff --git a/lib/keystone b/lib/keystone
index 31659f4..0f369af 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -164,7 +164,7 @@
keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
fi
if [[ ${USE_VENV} = True ]]; then
- venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/python2.7/site-packages"
+ venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
fi
# copy proxy vhost and wsgi file
@@ -237,9 +237,6 @@
iniset_rpc_backend keystone $KEYSTONE_CONF
- # Set the URL advertised in the ``versions`` structure returned by the '/' route
- iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/"
- iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/"
iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
# Register SSL certificates if provided
diff --git a/lib/lvm b/lib/lvm
index 54976a3..1fe2683 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -145,6 +145,13 @@
fi
}
+# clean_lvm_filter() Remove the filter rule set in set_lvm_filter()
+#
+# Usage: clean_lvm_filter()
+function clean_lvm_filter {
+ sudo sed -i "s/^.*# from devstack$//" /etc/lvm/lvm.conf
+}
+
# set_lvm_filter() Gather all devices configured for LVM and
# use them to build a global device filter
# set_lvm_filter() Create a device filter
@@ -154,7 +161,7 @@
#
# Usage: set_lvm_filter()
function set_lvm_filter {
- local filter_suffix='"r|.*|" ]'
+ local filter_suffix='"r|.*|" ] # from devstack'
local filter_string="global_filter = [ "
local pv
local vg
@@ -167,6 +174,7 @@
done
filter_string=$filter_string$filter_suffix
+ clean_lvm_filter
sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf
echo_summary "set lvm.conf device global_filter to: $filter_string"
}
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index c6d9296..18b0942 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -57,15 +57,12 @@
# Settings
# --------
-# Timeout value in seconds to wait for IPv6 gateway configuration
-GATEWAY_TIMEOUT=30
-
# Neutron Network Configuration
# -----------------------------
# Subnet IP version
-IP_VERSION=${IP_VERSION:-4}
+IP_VERSION=${IP_VERSION:-"4+6"}
# Validate IP_VERSION
if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then
die $LINENO "IP_VERSION must be either 4, 6, or 4+6"
@@ -90,12 +87,9 @@
IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet}
FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64}
IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1}
-IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-fe80:cafe:cafe::/64}
-IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-fe80:cafe:cafe::2}
-# IPV6_ROUTER_GW_IP must be defined when IP_VERSION=4+6 as it cannot be
-# obtained conventionally until the l3-agent has support for dual-stack
-# TODO (john-davidge) Remove once l3-agent supports dual-stack
-IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-fe80:cafe:cafe::1}
+IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64}
+IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2}
+IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1}
# Set up default directories
GITDIR["python-neutronclient"]=$DEST/python-neutronclient
@@ -1087,7 +1081,7 @@
iniset $NEUTRON_CONF DEFAULT verbose True
iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
- iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE
+ iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
@@ -1151,6 +1145,8 @@
sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
fi
sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
+ sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
+
# Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
@@ -1291,20 +1287,12 @@
# This logic is specific to using the l3-agent for layer 3
if is_service_enabled q-l3; then
- local ipv6_router_gw_port
# Ensure IPv6 forwarding is enabled on the host
sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Configure and enable public bridge
- if [[ "$IP_VERSION" = "6" ]]; then
- # Override global IPV6_ROUTER_GW_IP with the true value from neutron
- IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'`
- die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
- ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'`
- die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port"
- else
- ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'`
- die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port"
- fi
+ # Override global IPV6_ROUTER_GW_IP with the true value from neutron
+ IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'`
+ die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
# The ovs_base_configure_l3_agent function flushes the public
# bridge's ip addresses, so turn IPv6 support in the host off
@@ -1321,28 +1309,8 @@
local ext_gw_interface=$(_neutron_get_ext_gw_interface)
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
- # Define router_ns based on whether DVR is enabled
- local router_ns=qrouter
- if [[ "$Q_DVR_MODE" == "dvr_snat" ]]; then
- router_ns=snat
- fi
-
# Configure interface for public bridge
sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
-
- # Wait until layer 3 agent has configured the gateway port on
- # the public bridge, then add gateway address to the interface
- # TODO (john-davidge) Remove once l3-agent supports dual-stack
- if [[ "$IP_VERSION" == "4+6" ]]; then
- if ! timeout $GATEWAY_TIMEOUT sh -c "until sudo ip netns exec $router_ns-$ROUTER_ID ip addr show qg-${ipv6_router_gw_port:0:11} | grep $ROUTER_GW_IP; do sleep 1; done"; then
- die $LINENO "Timeout retrieving ROUTER_GW_IP"
- fi
- # Configure the gateway port with the public IPv6 adress
- sudo ip netns exec $router_ns-$ROUTER_ID ip -6 addr add $IPV6_ROUTER_GW_IP/$ipv6_cidr_len dev qg-${ipv6_router_gw_port:0:11}
- # Add a default IPv6 route to the neutron router as the
- # l3-agent does not add one in the dual-stack case
- sudo ip netns exec $router_ns-$ROUTER_ID ip -6 route replace default via $ipv6_ext_gw_ip dev qg-${ipv6_router_gw_port:0:11}
- fi
sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
fi
_neutron_set_router_id
@@ -1404,27 +1372,6 @@
echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
}
-function _ping_check_neutron {
- local from_net=$1
- local ip=$2
- local timeout_sec=$3
- local expected=${4:-"True"}
- local check_command=""
- probe_cmd=`_get_probe_cmd_prefix $from_net`
- if [[ "$expected" = "True" ]]; then
- check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done"
- else
- check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done"
- fi
- if ! timeout $timeout_sec sh -c "$check_command"; then
- if [[ "$expected" = "True" ]]; then
- die $LINENO "[Fail] Couldn't ping server"
- else
- die $LINENO "[Fail] Could ping server"
- fi
- fi
-}
-
# ssh check
function _ssh_check_neutron {
local from_net=$1
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index e3b2c4d..8853777 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -89,7 +89,7 @@
# Allow for setup the flat type network
if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" && -n "$PHYSICAL_NETWORK" ]]; then
- Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$Q_ML2_FLAT_PHYSNET_OPTIONS"
+ Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$PHYSICAL_NETWORK"
fi
# REVISIT(rkukura): Setting firewall_driver here for
# neutron.agent.securitygroups_rpc.is_firewall_enabled() which is
@@ -104,8 +104,10 @@
iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
fi
- # Since we enable the tunnel TypeDrivers, also enable a local_ip
- iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
+ if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
+ # Set local_ip if TENANT_TUNNELS are enabled.
+ iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
+ fi
populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 2997c6c..51999c6 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -93,11 +93,8 @@
sudo ip link set $Q_PUBLIC_VETH_EX up
sudo ip addr flush dev $Q_PUBLIC_VETH_EX
else
- # --no-wait causes a race condition if $PUBLIC_BRIDGE is not up when ip addr flush is called
sudo ovs-vsctl -- --may-exist add-br $PUBLIC_BRIDGE
sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE
- # ensure no IP is configured on the public bridge
- sudo ip addr flush dev $PUBLIC_BRIDGE
fi
}
diff --git a/lib/nova b/lib/nova
index 807dfce..7d2145b 100644
--- a/lib/nova
+++ b/lib/nova
@@ -16,6 +16,7 @@
#
# - install_nova
# - configure_nova
+# - _config_nova_apache_wsgi
# - create_nova_conf
# - init_nova
# - start_nova
@@ -62,6 +63,15 @@
# Expect to remove in L or M.
NOVA_API_VERSION=${NOVA_API_VERSION-default}
+if is_suse; then
+ NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova}
+else
+ NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova}
+fi
+
+# Toggle for deploying Nova-API under HTTPD + mod_wsgi
+NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False}
+
if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
NOVA_SERVICE_PROTOCOL="https"
EC2_SERVICE_PROTOCOL="https"
@@ -223,6 +233,64 @@
#fi
}
+# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_nova_apache_wsgi {
+ sudo rm -f $NOVA_WSGI_DIR/*
+ sudo rm -f $(apache_site_config_for nova-api)
+ sudo rm -f $(apache_site_config_for nova-ec2-api)
+}
+
+# _config_nova_apache_wsgi() - Set WSGI config files of Keystone
+function _config_nova_apache_wsgi {
+ sudo mkdir -p $NOVA_WSGI_DIR
+
+ local nova_apache_conf=$(apache_site_config_for nova-api)
+ local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+ local nova_ssl=""
+ local nova_certfile=""
+ local nova_keyfile=""
+ local nova_api_port=$NOVA_SERVICE_PORT
+ local nova_ec2_api_port=$EC2_SERVICE_PORT
+ local venv_path=""
+
+ if is_ssl_enabled_service nova-api; then
+ nova_ssl="SSLEngine On"
+ nova_certfile="SSLCertificateFile $NOVA_SSL_CERT"
+ nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY"
+ fi
+ if [[ ${USE_VENV} = True ]]; then
+ venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
+ fi
+
+ # copy proxy vhost and wsgi helper files
+ sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api
+ sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api
+
+ sudo cp $FILES/apache-nova-api.template $nova_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$nova_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g;
+ s|%SSLENGINE%|$nova_ssl|g;
+ s|%SSLCERTFILE%|$nova_certfile|g;
+ s|%SSLKEYFILE%|$nova_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $nova_apache_conf
+
+ sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf
+ sudo sed -e "
+ s|%PUBLICPORT%|$nova_ec2_api_port|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-ec2-api|g;
+ s|%SSLENGINE%|$nova_ssl|g;
+ s|%SSLCERTFILE%|$nova_certfile|g;
+ s|%SSLKEYFILE%|$nova_keyfile|g;
+ s|%USER%|$STACK_USER|g;
+ s|%VIRTUALENV%|$venv_path|g
+ " -i $nova_ec2_apache_conf
+}
+
# configure_nova() - Set config files, create data dirs, etc
function configure_nova {
# Put config files in ``/etc/nova`` for everyone to find
@@ -392,7 +460,6 @@
iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
- iniset $NOVA_CONF DEFAULT allow_migrate_to_same_host "True"
fi
iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
@@ -453,12 +520,16 @@
iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
fi
# Format logging
- if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$NOVA_USE_MOD_WSGI" == "False" ] ; then
setup_colorized_logging $NOVA_CONF DEFAULT
else
# Show user_name and project_name instead of user_id and project_id
iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
fi
+ if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+ _config_nova_apache_wsgi
+ fi
+
if is_service_enabled ceilometer; then
iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
@@ -655,6 +726,13 @@
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
setup_develop $NOVA_DIR
sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
+
+ if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+ install_apache_wsgi
+ if is_ssl_enabled_service "nova-api"; then
+ enable_mod_ssl
+ fi
+ fi
}
# start_nova_api() - Start the API process ahead of other things
@@ -671,7 +749,18 @@
local old_path=$PATH
export PATH=$NOVA_BIN_DIR:$PATH
- run_process n-api "$NOVA_BIN_DIR/nova-api"
+ # If the site is not enabled then we are in a grenade scenario
+ local enabled_site_file=$(apache_site_config_for nova-api)
+ if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+ enable_apache_site nova-api
+ enable_apache_site nova-ec2-api
+ restart_apache_server
+ tail_log nova /var/log/$APACHE_NAME/nova-api.log
+ tail_log nova /var/log/$APACHE_NAME/nova-ec2-api.log
+ else
+ run_process n-api "$NOVA_BIN_DIR/nova-api"
+ fi
+
echo "Waiting for nova-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
die $LINENO "nova-api did not start"
@@ -780,6 +869,13 @@
}
function stop_nova_rest {
+ if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+ disable_apache_site nova-api
+ disable_apache_site nova-ec2-api
+ restart_apache_server
+ else
+ stop_process n-api
+ fi
# Kill the nova screen windows
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 4d1eb6c..a6a87f9 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -54,6 +54,12 @@
iniset $NOVA_CONF DEFAULT vnc_enabled "false"
fi
+ # arm64-specific configuration
+ if is_arch "aarch64"; then
+ # arm64 architecture currently does not support graphical consoles.
+ iniset $NOVA_CONF DEFAULT vnc_enabled "false"
+ fi
+
ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION)
if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then
# When libguestfs is available for file injection, enable using
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 2b7c6cb..297ebac 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -200,10 +200,7 @@
[[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password"
- if is_fedora || is_suse; then
- # service is not started by default
- restart_service rabbitmq-server
- fi
+ restart_service rabbitmq-server
rabbit_setuser "$RABBIT_USERID" "$RABBIT_PASSWORD" || rc=$?
if [ $rc -ne 0 ]; then
@@ -273,6 +270,12 @@
iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST
iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
iniset $file oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
+ if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then
+ iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD
+ fi
+ if [ -n "$RABBIT_HEARTBEAT_RATE" ]; then
+ iniset $file oslo_messaging_rabbit heartbeat_rate $RABBIT_HEARTBEAT_RATE
+ fi
fi
}
diff --git a/lib/sahara b/lib/sahara
index 0651b0a..6d4e864 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -33,8 +33,12 @@
SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
+if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then
+ SAHARA_SERVICE_PROTOCOL="https"
+fi
SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
+SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386}
SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
@@ -165,6 +169,14 @@
iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE
fi
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service sahara; then
+ ensure_certificates SAHARA
+
+ iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT"
+ iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY"
+ fi
+
iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
# Format logging
@@ -172,6 +184,11 @@
setup_colorized_logging $SAHARA_CONF_FILE DEFAULT
fi
+ if is_service_enabled tls-proxy; then
+ # Set the service port for a proxy to take the original
+ iniset $SAHARA_CONF DEFAULT port $SAHARA_SERVICE_PORT_INT
+ fi
+
recreate_database sahara
$SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
}
@@ -203,9 +220,26 @@
# start_sahara() - Start running processes, including screen
function start_sahara {
+ local service_port=$SAHARA_SERVICE_PORT
+ local service_protocol=$SAHARA_SERVICE_PROTOCOL
+ if is_service_enabled tls-proxy; then
+ service_port=$SAHARA_SERVICE_PORT_INT
+ service_protocol="http"
+ fi
+
run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE"
+
+ echo "Waiting for Sahara to start..."
+ if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then
+ die $LINENO "Sahara did not start"
+ fi
+
+ # Start proxies if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT &
+ fi
}
# stop_sahara() - Stop running processes
diff --git a/lib/swift b/lib/swift
index 456dde4..820042d 100644
--- a/lib/swift
+++ b/lib/swift
@@ -439,7 +439,7 @@
if is_service_enabled swift3; then
cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
[filter:s3token]
-paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory
+paste.filter_factory = keystonemiddleware.s3_token:filter_factory
auth_port = ${KEYSTONE_AUTH_PORT}
auth_host = ${KEYSTONE_AUTH_HOST}
auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
diff --git a/lib/tempest b/lib/tempest
index dc5fb51..9c22716 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -106,6 +106,10 @@
pip_install_gr testrepository
fi
+ # Used during configuration so make sure we have the correct
+ # version installed
+ pip_install_gr python-openstackclient
+
local image_lines
local images
local num_images
@@ -366,6 +370,8 @@
iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions
# TODO(mriedem): Remove the preserve_ports flag when Juno is end of life.
iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True
+ # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
+ iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
# Network
iniset $TEMPEST_CONFIG network api_version 2.0
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 29dc22f..79f67a0 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -7,6 +7,8 @@
TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
FILES=$TOP_DIR/files
source $TOP_DIR/functions
+DEST=${DEST:-/opt/stack}
+source $TOP_DIR/lib/infra
# Package source and version, all pkg files are expected to have
# something like this, as well as a way to override them.
diff --git a/run_tests.sh b/run_tests.sh
index c6b7da6..a9a3d0b 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -17,8 +17,6 @@
PASSES=""
FAILURES=""
-# Test that no one is trying to land crazy refs as branches
-
for testfile in tests/test_*.sh; do
$testfile
if [[ $? -eq 0 ]]; then
diff --git a/stack.sh b/stack.sh
index 7a5cbcc..f20af21 100755
--- a/stack.sh
+++ b/stack.sh
@@ -505,7 +505,7 @@
check_rpc_backend
# Service to enable with SSL if ``USE_SSL`` is True
-SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara"
if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
die $LINENO "tls-proxy and SSL are mutually exclusive"
@@ -514,6 +514,9 @@
# Configure Projects
# ==================
+# Clone all external plugins
+fetch_plugins
+
# Plugin Phase 0: override_defaults - allow pluggins to override
# defaults before other services are run
run_phase override_defaults
@@ -540,9 +543,6 @@
source $TOP_DIR/lib/ldap
source $TOP_DIR/lib/dstat
-# Clone all external plugins
-fetch_plugins
-
# Extras Source
# --------------
@@ -705,6 +705,9 @@
# Virtual Environment
# -------------------
+# Install required infra support libraries
+install_infra
+
# Pre-build some problematic wheels
if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
source $TOP_DIR/tools/build_wheels.sh
@@ -713,10 +716,6 @@
# Extras Pre-install
# ------------------
-
-# Install required infra support libraries
-install_infra
-
# Phase: pre-install
run_phase stack pre-install
@@ -1173,7 +1172,7 @@
# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
if is_service_enabled g-reg; then
- TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+ TOKEN=$(openstack token issue -c id -f value)
die_if_not_set $LINENO TOKEN "Keystone fail to get token"
echo_summary "Uploading images"
diff --git a/stackrc b/stackrc
index abedb00..3c08b15 100644
--- a/stackrc
+++ b/stackrc
@@ -49,7 +49,7 @@
# Keystone - nothing works without keystone
ENABLED_SERVICES=key
# Nova - services to support libvirt based openstack clouds
- ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc
+ ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-crt,n-cauth
# Glance services needed for Nova
ENABLED_SERVICES+=,g-api,g-reg
# Cinder
@@ -536,7 +536,7 @@
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
-CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.3.4"}
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
diff --git a/tests/functions.sh b/tests/functions.sh
deleted file mode 100755
index 126080f..0000000
--- a/tests/functions.sh
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/usr/bin/env bash
-
-# Tests for DevStack functions
-
-TOP=$(cd $(dirname "$0")/.. && pwd)
-
-# Import common functions
-source $TOP/functions
-
-# Import configuration
-source $TOP/openrc
-
-
-echo "Testing die_if_not_set()"
-
-bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'"
-if [[ $? != 0 ]]; then
- echo "die_if_not_set [X='Y' true] Failed"
-else
- echo 'OK'
-fi
-
-bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'"
-if [[ $? = 0 ]]; then
- echo "die_if_not_set [X='' true] Failed"
-fi
-
-bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'"
-if [[ $? != 0 ]]; then
- echo "die_if_not_set [X='Y' false] Failed"
-else
- echo 'OK'
-fi
-
-bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'"
-if [[ $? = 0 ]]; then
- echo "die_if_not_set [X='' false] Failed"
-fi
-
-
-# Enabling/disabling services
-
-echo "Testing enable_service()"
-
-function test_enable_service {
- local start="$1"
- local add="$2"
- local finish="$3"
-
- ENABLED_SERVICES="$start"
- enable_service $add
- if [ "$ENABLED_SERVICES" = "$finish" ]; then
- echo "OK: $start + $add -> $ENABLED_SERVICES"
- else
- echo "changing $start to $finish with $add failed: $ENABLED_SERVICES"
- fi
-}
-
-test_enable_service '' a 'a'
-test_enable_service 'a' b 'a,b'
-test_enable_service 'a,b' c 'a,b,c'
-test_enable_service 'a,b' c 'a,b,c'
-test_enable_service 'a,b,' c 'a,b,c'
-test_enable_service 'a,b' c,d 'a,b,c,d'
-test_enable_service 'a,b' "c d" 'a,b,c,d'
-test_enable_service 'a,b,c' c 'a,b,c'
-
-test_enable_service 'a,b,-c' c 'a,b'
-test_enable_service 'a,b,c' -c 'a,b'
-
-function test_disable_service {
- local start="$1"
- local del="$2"
- local finish="$3"
-
- ENABLED_SERVICES="$start"
- disable_service "$del"
- if [ "$ENABLED_SERVICES" = "$finish" ]; then
- echo "OK: $start - $del -> $ENABLED_SERVICES"
- else
- echo "changing $start to $finish with $del failed: $ENABLED_SERVICES"
- fi
-}
-
-echo "Testing disable_service()"
-test_disable_service 'a,b,c' a 'b,c'
-test_disable_service 'a,b,c' b 'a,c'
-test_disable_service 'a,b,c' c 'a,b'
-
-test_disable_service 'a,b,c' a 'b,c'
-test_disable_service 'b,c' b 'c'
-test_disable_service 'c' c ''
-test_disable_service '' d ''
-
-test_disable_service 'a,b,c,' c 'a,b'
-test_disable_service 'a,b' c 'a,b'
-
-
-echo "Testing disable_all_services()"
-ENABLED_SERVICES=a,b,c
-disable_all_services
-
-if [[ -z "$ENABLED_SERVICES" ]]; then
- echo "OK"
-else
- echo "disabling all services FAILED: $ENABLED_SERVICES"
-fi
-
-echo "Testing disable_negated_services()"
-
-
-function test_disable_negated_services {
- local start="$1"
- local finish="$2"
-
- ENABLED_SERVICES="$start"
- disable_negated_services
- if [ "$ENABLED_SERVICES" = "$finish" ]; then
- echo "OK: $start + $add -> $ENABLED_SERVICES"
- else
- echo "changing $start to $finish failed: $ENABLED_SERVICES"
- fi
-}
-
-test_disable_negated_services '-a' ''
-test_disable_negated_services '-a,a' ''
-test_disable_negated_services '-a,-a' ''
-test_disable_negated_services 'a,-a' ''
-test_disable_negated_services 'b,a,-a' 'b'
-test_disable_negated_services 'a,b,-a' 'b'
-test_disable_negated_services 'a,-a,b' 'b'
-
-
-echo "Testing is_package_installed()"
-
-if [[ -z "$os_PACKAGE" ]]; then
- GetOSVersion
-fi
-
-if [[ "$os_PACKAGE" = "deb" ]]; then
- is_package_installed dpkg
- VAL=$?
-elif [[ "$os_PACKAGE" = "rpm" ]]; then
- is_package_installed rpm
- VAL=$?
-else
- VAL=1
-fi
-if [[ "$VAL" -eq 0 ]]; then
- echo "OK"
-else
- echo "is_package_installed() on existing package failed"
-fi
-
-if [[ "$os_PACKAGE" = "deb" ]]; then
- is_package_installed dpkg bash
- VAL=$?
-elif [[ "$os_PACKAGE" = "rpm" ]]; then
- is_package_installed rpm bash
- VAL=$?
-else
- VAL=1
-fi
-if [[ "$VAL" -eq 0 ]]; then
- echo "OK"
-else
- echo "is_package_installed() on more than one existing package failed"
-fi
-
-is_package_installed zzzZZZzzz
-VAL=$?
-if [[ "$VAL" -ne 0 ]]; then
- echo "OK"
-else
- echo "is_package_installed() on non-existing package failed"
-fi
-
-# test against removed package...was a bug on Ubuntu
-if is_ubuntu; then
- PKG=cowsay
- if ! (dpkg -s $PKG >/dev/null 2>&1); then
- # it was never installed...set up the condition
- sudo apt-get install -y cowsay >/dev/null 2>&1
- fi
- if (dpkg -s $PKG >/dev/null 2>&1); then
- # remove it to create the 'un' status
- sudo dpkg -P $PKG >/dev/null 2>&1
- fi
-
- # now test the installed check on a deleted package
- is_package_installed $PKG
- VAL=$?
- if [[ "$VAL" -ne 0 ]]; then
- echo "OK"
- else
- echo "is_package_installed() on deleted package failed"
- fi
-fi
-
-# test isset function
-echo "Testing isset()"
-you_should_not_have_this_variable=42
-
-if isset "you_should_not_have_this_variable"; then
- echo "OK"
-else
- echo "\"you_should_not_have_this_variable\" not declared. failed"
-fi
-
-unset you_should_not_have_this_variable
-if isset "you_should_not_have_this_variable"; then
- echo "\"you_should_not_have_this_variable\" looks like declared variable. failed"
-else
- echo "OK"
-fi
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index e57948a..1d82792 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -1,34 +1,223 @@
#!/usr/bin/env bash
-# Tests for DevStack meta-config functions
+# Tests for DevStack functions
TOP=$(cd $(dirname "$0")/.. && pwd)
# Import common functions
source $TOP/functions
+
source $TOP/tests/unittest.sh
-function test_truefalse {
- local one=1
- local captrue=True
- local lowtrue=true
- local abrevtrue=t
- local zero=0
- local capfalse=False
- local lowfalse=false
- local abrevfalse=f
- for against in True False; do
- for name in one captrue lowtrue abrevtrue; do
- assert_equal "True" $(trueorfalse $against $name) "\$(trueorfalse $against $name)"
- done
- done
- for against in True False; do
- for name in zero capfalse lowfalse abrevfalse; do
- assert_equal "False" $(trueorfalse $against $name) "\$(trueorfalse $against $name)"
- done
- done
+echo "Testing die_if_not_set()"
+
+bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set $LINENO X 'not OK'"
+if [[ $? != 0 ]]; then
+ failed "die_if_not_set [X='Y' true] Failed"
+else
+ passed 'OK'
+fi
+
+bash -c "source $TOP/functions; X=`true`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1
+if [[ $? = 0 ]]; then
+ failed "die_if_not_set [X='' true] Failed"
+fi
+
+bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set $LINENO X 'not OK'"
+if [[ $? != 0 ]]; then
+ failed "die_if_not_set [X='Y' false] Failed"
+else
+ passed 'OK'
+fi
+
+bash -c "source $TOP/functions; X=`false`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1
+if [[ $? = 0 ]]; then
+ failed "die_if_not_set [X='' false] Failed"
+fi
+
+
+# Enabling/disabling services
+
+echo "Testing enable_service()"
+
+function test_enable_service {
+ local start="$1"
+ local add="$2"
+ local finish="$3"
+
+ ENABLED_SERVICES="$start"
+ enable_service $add
+ if [ "$ENABLED_SERVICES" = "$finish" ]; then
+ passed "OK: $start + $add -> $ENABLED_SERVICES"
+ else
+ failed "changing $start to $finish with $add failed: $ENABLED_SERVICES"
+ fi
}
-test_truefalse
+test_enable_service '' a 'a'
+test_enable_service 'a' b 'a,b'
+test_enable_service 'a,b' c 'a,b,c'
+test_enable_service 'a,b' c 'a,b,c'
+test_enable_service 'a,b,' c 'a,b,c'
+test_enable_service 'a,b' c,d 'a,b,c,d'
+test_enable_service 'a,b' "c d" 'a,b,c,d'
+test_enable_service 'a,b,c' c 'a,b,c'
+
+test_enable_service 'a,b,-c' c 'a,b'
+test_enable_service 'a,b,c' -c 'a,b'
+
+function test_disable_service {
+ local start="$1"
+ local del="$2"
+ local finish="$3"
+
+ ENABLED_SERVICES="$start"
+ disable_service "$del"
+ if [ "$ENABLED_SERVICES" = "$finish" ]; then
+ passed "OK: $start - $del -> $ENABLED_SERVICES"
+ else
+ failed "changing $start to $finish with $del failed: $ENABLED_SERVICES"
+ fi
+}
+
+echo "Testing disable_service()"
+test_disable_service 'a,b,c' a 'b,c'
+test_disable_service 'a,b,c' b 'a,c'
+test_disable_service 'a,b,c' c 'a,b'
+
+test_disable_service 'a,b,c' a 'b,c'
+test_disable_service 'b,c' b 'c'
+test_disable_service 'c' c ''
+test_disable_service '' d ''
+
+test_disable_service 'a,b,c,' c 'a,b'
+test_disable_service 'a,b' c 'a,b'
+
+
+echo "Testing disable_all_services()"
+ENABLED_SERVICES=a,b,c
+disable_all_services
+
+if [[ -z "$ENABLED_SERVICES" ]]; then
+ passed "OK"
+else
+ failed "disabling all services FAILED: $ENABLED_SERVICES"
+fi
+
+echo "Testing disable_negated_services()"
+
+
+function test_disable_negated_services {
+ local start="$1"
+ local finish="$2"
+
+ ENABLED_SERVICES="$start"
+ disable_negated_services
+ if [ "$ENABLED_SERVICES" = "$finish" ]; then
+ passed "OK: $start + $add -> $ENABLED_SERVICES"
+ else
+ failed "changing $start to $finish failed: $ENABLED_SERVICES"
+ fi
+}
+
+test_disable_negated_services '-a' ''
+test_disable_negated_services '-a,a' ''
+test_disable_negated_services '-a,-a' ''
+test_disable_negated_services 'a,-a' ''
+test_disable_negated_services 'b,a,-a' 'b'
+test_disable_negated_services 'a,b,-a' 'b'
+test_disable_negated_services 'a,-a,b' 'b'
+test_disable_negated_services 'a,aa,-a' 'aa'
+test_disable_negated_services 'aa,-a' 'aa'
+test_disable_negated_services 'a_a, -a_a' ''
+test_disable_negated_services 'a-b, -a-b' ''
+test_disable_negated_services 'a-b, b, -a-b' 'b'
+test_disable_negated_services 'a,-a,av2,b' 'av2,b'
+test_disable_negated_services 'a,aa,-a' 'aa'
+test_disable_negated_services 'a,av2,-a,a' 'av2'
+test_disable_negated_services 'a,-a,av2' 'av2'
+
+echo "Testing is_package_installed()"
+
+if [[ -z "$os_PACKAGE" ]]; then
+ GetOSVersion
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+ is_package_installed dpkg
+ VAL=$?
+elif [[ "$os_PACKAGE" = "rpm" ]]; then
+ is_package_installed rpm
+ VAL=$?
+else
+ VAL=1
+fi
+if [[ "$VAL" -eq 0 ]]; then
+ passed "OK"
+else
+ failed "is_package_installed() on existing package failed"
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+ is_package_installed dpkg bash
+ VAL=$?
+elif [[ "$os_PACKAGE" = "rpm" ]]; then
+ is_package_installed rpm bash
+ VAL=$?
+else
+ VAL=1
+fi
+if [[ "$VAL" -eq 0 ]]; then
+ passed "OK"
+else
+ failed "is_package_installed() on more than one existing package failed"
+fi
+
+is_package_installed zzzZZZzzz
+VAL=$?
+if [[ "$VAL" -ne 0 ]]; then
+ passed "OK"
+else
+ failed "is_package_installed() on non-existing package failed"
+fi
+
+# test against removed package...was a bug on Ubuntu
+if is_ubuntu; then
+ PKG=cowsay
+ if ! (dpkg -s $PKG >/dev/null 2>&1); then
+ # it was never installed...set up the condition
+ sudo apt-get install -y cowsay >/dev/null 2>&1
+ fi
+ if (dpkg -s $PKG >/dev/null 2>&1); then
+ # remove it to create the 'un' status
+ sudo dpkg -P $PKG >/dev/null 2>&1
+ fi
+
+ # now test the installed check on a deleted package
+ is_package_installed $PKG
+ VAL=$?
+ if [[ "$VAL" -ne 0 ]]; then
+ passed "OK"
+ else
+ failed "is_package_installed() on deleted package failed"
+ fi
+fi
+
+# test isset function
+echo "Testing isset()"
+you_should_not_have_this_variable=42
+
+if isset "you_should_not_have_this_variable"; then
+ passed "OK"
+else
+ failed "\"you_should_not_have_this_variable\" not declared. failed"
+fi
+
+unset you_should_not_have_this_variable
+if isset "you_should_not_have_this_variable"; then
+ failed "\"you_should_not_have_this_variable\" looks like declared variable."
+else
+ passed "OK"
+fi
report_results
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index 4a0ae33..b2529ac 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -7,6 +7,9 @@
# Import config functions
source $TOP/inc/ini-config
+source $TOP/tests/unittest.sh
+
+set -e
echo "Testing INI functions"
@@ -70,86 +73,86 @@
iniset test.ini aaa
NO_ATTRIBUTE=$(cat test.ini)
if [[ "$BEFORE" == "$NO_ATTRIBUTE" ]]; then
- echo "OK"
+ passed
else
- echo "failed"
+ failed "failed"
fi
echo -n "iniset: test missing section argument: "
iniset test.ini
NO_SECTION=$(cat test.ini)
if [[ "$BEFORE" == "$NO_SECTION" ]]; then
- echo "OK"
+ passed
else
- echo "failed"
+ failed "failed"
fi
# Test with spaces
VAL=$(iniget test.ini aaa handlers)
if [[ "$VAL" == "aa, bb" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
iniset test.ini aaa handlers "11, 22"
VAL=$(iniget test.ini aaa handlers)
if [[ "$VAL" == "11, 22" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# Test with spaces in section header
VAL=$(iniget test.ini " ccc " spaces)
if [[ "$VAL" == "yes" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
iniset test.ini "b b" opt_ion 42
VAL=$(iniget test.ini "b b" opt_ion)
if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# Test without spaces, end of file
VAL=$(iniget test.ini bbb handlers)
if [[ "$VAL" == "ee,ff" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
iniset test.ini bbb handlers "33,44"
VAL=$(iniget test.ini bbb handlers)
if [[ "$VAL" == "33,44" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# test empty option
if ini_has_option test.ini ddd empty; then
- echo "OK: ddd.empty present"
+ passed "OK: ddd.empty present"
else
- echo "ini_has_option failed: ddd.empty not found"
+ failed "ini_has_option failed: ddd.empty not found"
fi
# test non-empty option
if ini_has_option test.ini bbb handlers; then
- echo "OK: bbb.handlers present"
+ passed "OK: bbb.handlers present"
else
- echo "ini_has_option failed: bbb.handlers not found"
+ failed "ini_has_option failed: bbb.handlers not found"
fi
# test changing empty option
@@ -157,9 +160,9 @@
VAL=$(iniget test.ini ddd empty)
if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# test pipe in option
@@ -167,9 +170,9 @@
VAL=$(iniget test.ini aaa handlers)
if [[ "$VAL" == "a|b" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# test space in option
@@ -177,51 +180,51 @@
VAL="$(iniget test.ini aaa handlers)"
if [[ "$VAL" == "a b" ]]; then
- echo "OK: $VAL"
+ passed "OK: $VAL"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# Test section not exist
VAL=$(iniget test.ini zzz handlers)
if [[ -z "$VAL" ]]; then
- echo "OK: zzz not present"
+ passed "OK: zzz not present"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
iniset test.ini zzz handlers "999"
VAL=$(iniget test.ini zzz handlers)
if [[ -n "$VAL" ]]; then
- echo "OK: zzz not present"
+ passed "OK: zzz not present"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# Test option not exist
VAL=$(iniget test.ini aaa debug)
if [[ -z "$VAL" ]]; then
- echo "OK aaa.debug not present"
+ passed "OK aaa.debug not present"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
if ! ini_has_option test.ini aaa debug; then
- echo "OK aaa.debug not present"
+ passed "OK aaa.debug not present"
else
- echo "ini_has_option failed: aaa.debug"
+ failed "ini_has_option failed: aaa.debug"
fi
iniset test.ini aaa debug "999"
VAL=$(iniget test.ini aaa debug)
if [[ -n "$VAL" ]]; then
- echo "OK aaa.debug present"
+ passed "OK aaa.debug present"
else
- echo "iniget failed: $VAL"
+ failed "iniget failed: $VAL"
fi
# Test comments
@@ -230,9 +233,9 @@
VAL=$(iniget test.ini aaa handlers)
if [[ -z "$VAL" ]]; then
- echo "OK"
+ passed "OK"
else
- echo "inicomment failed: $VAL"
+ failed "inicomment failed: $VAL"
fi
# Test multiple line iniset/iniget
@@ -242,25 +245,25 @@
if [[ "$VAL" == "bar1 bar2" ]]; then
echo "OK: iniset_multiline"
else
- echo "iniset_multiline failed: $VAL"
+ failed "iniset_multiline failed: $VAL"
fi
# Test iniadd with exiting values
iniadd test.ini eee multi bar3
VAL=$(iniget_multiline test.ini eee multi)
if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
- echo "OK: iniadd"
+ passed "OK: iniadd"
else
- echo "iniadd failed: $VAL"
+ failed "iniadd failed: $VAL"
fi
# Test iniadd with non-exiting values
iniadd test.ini eee non-multi foobar1 foobar2
VAL=$(iniget_multiline test.ini eee non-multi)
if [[ "$VAL" == "foobar1 foobar2" ]]; then
- echo "OK: iniadd with non-exiting value"
+ passed "OK: iniadd with non-exiting value"
else
- echo "iniadd with non-exsting failed: $VAL"
+ failed "iniadd with non-exsting failed: $VAL"
fi
# Test inidelete
@@ -276,20 +279,22 @@
inidelete test.ini $x a
VAL=$(iniget_multiline test.ini $x a)
if [ -z "$VAL" ]; then
- echo "OK: inidelete $x"
+ passed "OK: inidelete $x"
else
- echo "inidelete $x failed: $VAL"
+ failed "inidelete $x failed: $VAL"
fi
if [ "$x" = "del_separate_options" -o \
"$x" = "del_missing_option" -o \
"$x" = "del_missing_option_multi" ]; then
VAL=$(iniget_multiline test.ini $x b)
if [ "$VAL" = "c" -o "$VAL" = "c d" ]; then
- echo "OK: inidelete other_options $x"
+ passed "OK: inidelete other_options $x"
else
- echo "inidelete other_option $x failed: $VAL"
+ failed "inidelete other_option $x failed: $VAL"
fi
fi
done
rm test.ini
+
+report_results
diff --git a/tests/test_ip.sh b/tests/test_ip.sh
index add8d1a..c53e80d 100755
--- a/tests/test_ip.sh
+++ b/tests/test_ip.sh
@@ -8,108 +8,111 @@
# Import common functions
source $TOP/functions
+source $TOP/tests/unittest.sh
echo "Testing IP addr functions"
if [[ $(cidr2netmask 4) == 240.0.0.0 ]]; then
- echo "cidr2netmask(): /4...OK"
+ passed "cidr2netmask(): /4...OK"
else
- echo "cidr2netmask(): /4...failed"
+ failed "cidr2netmask(): /4...failed"
fi
if [[ $(cidr2netmask 8) == 255.0.0.0 ]]; then
- echo "cidr2netmask(): /8...OK"
+ passed "cidr2netmask(): /8...OK"
else
- echo "cidr2netmask(): /8...failed"
+ failed "cidr2netmask(): /8...failed"
fi
if [[ $(cidr2netmask 12) == 255.240.0.0 ]]; then
- echo "cidr2netmask(): /12...OK"
+ passed "cidr2netmask(): /12...OK"
else
- echo "cidr2netmask(): /12...failed"
+ failed "cidr2netmask(): /12...failed"
fi
if [[ $(cidr2netmask 16) == 255.255.0.0 ]]; then
- echo "cidr2netmask(): /16...OK"
+ passed "cidr2netmask(): /16...OK"
else
- echo "cidr2netmask(): /16...failed"
+ failed "cidr2netmask(): /16...failed"
fi
if [[ $(cidr2netmask 20) == 255.255.240.0 ]]; then
- echo "cidr2netmask(): /20...OK"
+ passed "cidr2netmask(): /20...OK"
else
- echo "cidr2netmask(): /20...failed"
+ failed "cidr2netmask(): /20...failed"
fi
if [[ $(cidr2netmask 24) == 255.255.255.0 ]]; then
- echo "cidr2netmask(): /24...OK"
+ passed "cidr2netmask(): /24...OK"
else
- echo "cidr2netmask(): /24...failed"
+ failed "cidr2netmask(): /24...failed"
fi
if [[ $(cidr2netmask 28) == 255.255.255.240 ]]; then
- echo "cidr2netmask(): /28...OK"
+ passed "cidr2netmask(): /28...OK"
else
- echo "cidr2netmask(): /28...failed"
+ failed "cidr2netmask(): /28...failed"
fi
if [[ $(cidr2netmask 30) == 255.255.255.252 ]]; then
- echo "cidr2netmask(): /30...OK"
+ passed "cidr2netmask(): /30...OK"
else
- echo "cidr2netmask(): /30...failed"
+ failed "cidr2netmask(): /30...failed"
fi
if [[ $(cidr2netmask 32) == 255.255.255.255 ]]; then
- echo "cidr2netmask(): /32...OK"
+ passed "cidr2netmask(): /32...OK"
else
- echo "cidr2netmask(): /32...failed"
+ failed "cidr2netmask(): /32...failed"
fi
if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then
- echo "maskip(): /4...OK"
+ passed "maskip(): /4...OK"
else
- echo "maskip(): /4...failed"
+ failed "maskip(): /4...failed"
fi
if [[ $(maskip 169.254.169.254 255.0.0.0) == 169.0.0.0 ]]; then
- echo "maskip(): /8...OK"
+ passed "maskip(): /8...OK"
else
- echo "maskip(): /8...failed"
+ failed "maskip(): /8...failed"
fi
if [[ $(maskip 169.254.169.254 255.240.0.0) == 169.240.0.0 ]]; then
- echo "maskip(): /12...OK"
+ passed "maskip(): /12...OK"
else
- echo "maskip(): /12...failed"
+ failed "maskip(): /12...failed"
fi
if [[ $(maskip 169.254.169.254 255.255.0.0) == 169.254.0.0 ]]; then
- echo "maskip(): /16...OK"
+ passed "maskip(): /16...OK"
else
- echo "maskip(): /16...failed"
+ failed "maskip(): /16...failed"
fi
if [[ $(maskip 169.254.169.254 255.255.240.0) == 169.254.160.0 ]]; then
- echo "maskip(): /20...OK"
+ passed "maskip(): /20...OK"
else
- echo "maskip(): /20...failed"
+ failed "maskip(): /20...failed"
fi
if [[ $(maskip 169.254.169.254 255.255.255.0) == 169.254.169.0 ]]; then
- echo "maskip(): /24...OK"
+ passed "maskip(): /24...OK"
else
- echo "maskip(): /24...failed"
+ failed "maskip(): /24...failed"
fi
if [[ $(maskip 169.254.169.254 255.255.255.240) == 169.254.169.240 ]]; then
- echo "maskip(): /28...OK"
+ passed "maskip(): /28...OK"
else
- echo "maskip(): /28...failed"
+ failed "maskip(): /28...failed"
fi
if [[ $(maskip 169.254.169.254 255.255.255.255) == 169.254.169.254 ]]; then
- echo "maskip(): /32...OK"
+ passed "maskip(): /32...OK"
else
- echo "maskip(): /32...failed"
+ failed "maskip(): /32...failed"
fi
for mask in 8 12 16 20 24 26 28; do
echo -n "address_in_net(): in /$mask..."
if address_in_net 10.10.10.1 10.10.10.0/$mask; then
- echo "OK"
+ passed "OK"
else
- echo "address_in_net() failed on /$mask"
+ failed "address_in_net() failed on /$mask"
fi
echo -n "address_in_net(): not in /$mask..."
if ! address_in_net 10.10.10.1 11.11.11.0/$mask; then
- echo "OK"
+ passed "OK"
else
- echo "address_in_net() failed on /$mask"
+ failed "address_in_net() failed on /$mask"
fi
done
+
+report_results
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index 9d65280..a04c081 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -8,6 +8,8 @@
source $TOP/inc/ini-config
source $TOP/inc/meta-config
+set -e
+
# check_result() tests and reports the result values
# check_result "actual" "expected"
function check_result {
@@ -17,6 +19,7 @@
echo "OK"
else
echo -e "failed: $actual != $expected\n"
+ exit 1
fi
}
diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh
new file mode 100755
index 0000000..ebd9650
--- /dev/null
+++ b/tests/test_truefalse.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# Tests for DevStack meta-config functions
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+source $TOP/tests/unittest.sh
+
+function test_trueorfalse {
+ local one=1
+ local captrue=True
+ local lowtrue=true
+ local uppertrue=TRUE
+ local capyes=Yes
+ local lowyes=yes
+ local upperyes=YES
+
+ for default in True False; do
+ for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do
+ assert_equal "True" $(trueorfalse $default $name) "\$(trueorfalse $default $name)"
+ done
+ done
+
+ local zero=0
+ local capfalse=False
+ local lowfalse=false
+ local upperfalse=FALSE
+ local capno=No
+ local lowno=no
+ local upperno=NO
+
+ for default in True False; do
+ for name in zero capfalse lowfalse upperfalse capno lowno upperno; do
+ assert_equal "False" $(trueorfalse $default $name) "\$(trueorfalse $default $name)"
+ done
+ done
+}
+
+test_trueorfalse
+
+report_results
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 435cc3a..69f19b7 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -14,8 +14,30 @@
# we always start with no errors
ERROR=0
+PASS=0
FAILED_FUNCS=""
+function passed {
+ local lineno=$(caller 0 | awk '{print $1}')
+ local function=$(caller 0 | awk '{print $2}')
+ local msg="$1"
+ if [ -z "$msg" ]; then
+ msg="OK"
+ fi
+ PASS=$((PASS+1))
+ echo $function:L$lineno $msg
+}
+
+function failed {
+ local lineno=$(caller 0 | awk '{print $1}')
+ local function=$(caller 0 | awk '{print $2}')
+ local msg="$1"
+ FAILED_FUNCS+="$function:L$lineno\n"
+ echo "ERROR: $function:L$lineno!"
+ echo " $msg"
+ ERROR=$((ERROR+1))
+}
+
function assert_equal {
local lineno=`caller 0 | awk '{print $1}'`
local function=`caller 0 | awk '{print $2}'`
@@ -24,16 +46,20 @@
FAILED_FUNCS+="$function:L$lineno\n"
echo "ERROR: $1 != $2 in $function:L$lineno!"
echo " $msg"
- ERROR=1
+ ERROR=$((ERROR+1))
else
+ PASS=$((PASS+1))
echo "$function:L$lineno - ok"
fi
}
function report_results {
- if [[ $ERROR -eq 1 ]]; then
- echo "Tests FAILED"
- echo $FAILED_FUNCS
+ echo "$PASS Tests PASSED"
+ if [[ $ERROR -gt 1 ]]; then
+ echo
+ echo "The following $ERROR tests FAILED"
+ echo -e "$FAILED_FUNCS"
+ echo "---"
exit 1
fi
}
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
index c57568f..14c2999 100755
--- a/tools/build_wheels.sh
+++ b/tools/build_wheels.sh
@@ -60,6 +60,18 @@
# Install modern pip and wheel
PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
+# BUG: cffi has a lot of issues. It has no stable ABI, if installed
+# code is built with a different ABI than the one that's detected at
+# load time, it tries to compile on the fly for the new ABI in the
+# install location (which will probably be /usr and not
+# writable). Also cffi is often included via setup_requires by
+# packages, which have different install rules (allowing betas) than
+# pip has.
+#
+# Because of this we must pip install cffi into the venv to build
+# wheels.
+PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi
+
# ``VENV_PACKAGES`` is a list of packages we want to pre-install
VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
if [[ -r $VENV_PACKAGE_FILE ]]; then
diff --git a/tools/image_list.sh b/tools/image_list.sh
index 2042807..a27635e 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -9,8 +9,6 @@
# dummy in the end position to trigger the fall through case.
DRIVERS="openvz ironic libvirt vsphere xenserver dummy"
-CIRROS_ARCHS="x86_64 i386"
-
# Extra variables to trigger getting additional images.
export ENABLED_SERVICES="h-api,tr-api"
HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda"
@@ -19,15 +17,12 @@
# Loop over all the virt drivers and collect all the possible images
ALL_IMAGES=""
for driver in $DRIVERS; do
- for arch in $CIRROS_ARCHS; do
- CIRROS_ARCH=$arch
- VIRT_DRIVER=$driver
- URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
- if [[ ! -z "$ALL_IMAGES" ]]; then
- ALL_IMAGES+=,
- fi
- ALL_IMAGES+=$URLS
- done
+ VIRT_DRIVER=$driver
+ URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS)
+ if [[ ! -z "$ALL_IMAGES" ]]; then
+ ALL_IMAGES+=,
+ fi
+ ALL_IMAGES+=$URLS
done
# Make a nice list
diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh
new file mode 100755
index 0000000..0d5728a
--- /dev/null
+++ b/tools/peakmem_tracker.sh
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+# time to sleep between checks
+SLEEP_TIME=20
+
+# MemAvailable is the best estimation and has built-in heuristics
+# around reclaimable memory. However, it is not available until 3.14
+# kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall
+# back to free+buffers+cache as the available memory.
+USE_MEM_AVAILBLE=0
+if grep -q '^MemAvailable:' /proc/meminfo; then
+ USE_MEM_AVAILABLE=1
+fi
+
+function get_mem_available {
+ if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then
+ awk '/^MemAvailable:/ {print $2}' /proc/meminfo
+ else
+ awk '/^MemFree:/ {free=$2}
+ /^Buffers:/ {buffers=$2}
+ /^Cached:/ {cached=$2}
+ END { print free+buffers+cached }' /proc/meminfo
+ fi
+}
+
+# whenever we see less memory available than last time, dump the
+# snapshot of current usage; i.e. checking the latest entry in the
+# file will give the peak-memory usage
+function tracker {
+ local low_point=$(get_mem_available)
+ while [ 1 ]; do
+
+ local mem_available=$(get_mem_available)
+
+ if [[ $mem_available -lt $low_point ]]; then
+ low_point=$mem_available
+ echo "[[["
+ date
+ echo "---"
+ # always available greppable output; given difference in
+ # meminfo output as described above...
+ echo "peakmem_tracker low_point: $mem_available"
+ echo "---"
+ cat /proc/meminfo
+ echo "---"
+ # would hierarchial view be more useful (-H)? output is
+ # not sorted by usage then, however, and the first
+ # question is "what's using up the memory"
+ #
+ # there are a lot of kernel threads, especially on a 8-cpu
+ # system. do a best-effort removal to improve
+ # signal/noise ratio of output.
+ ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 |
+ grep -v ']$'
+ echo "]]]"
+ fi
+
+ sleep $SLEEP_TIME
+ done
+}
+
+function usage {
+ echo "Usage: $0 [-x] [-s N]" 1>&2
+ exit 1
+}
+
+while getopts ":s:x" opt; do
+ case $opt in
+ s)
+ SLEEP_TIME=$OPTARG
+ ;;
+ x)
+ set -o xtrace
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+tracker
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
new file mode 100755
index 0000000..d36b7f6
--- /dev/null
+++ b/tools/ping_neutron.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Ping a neutron guest using a network namespace probe
+
+set -o errexit
+set -o pipefail
+
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+
+# This *must* be run as the admin tenant
+source $TOP_DIR/openrc admin admin
+
+function usage {
+ cat - <<EOF
+ping_neutron.sh <net_name> [ping args]
+
+This provides a wrapper to ping neutron guests that are on isolated
+tenant networks that the caller can't normally reach. It does so by
+creating a network namespace probe.
+
+It takes arguments like ping, except the first arg must be the network
+name.
+
+Note: in environments with duplicate network names, the results are
+non deterministic.
+
+This should *really* be in the neutron cli.
+
+EOF
+ exit 1
+}
+
+NET_NAME=$1
+
+if [[ -z "$NET_NAME" ]]; then
+ echo "Error: net_name is required"
+ usage
+fi
+
+REMANING_ARGS="${@:2}"
+
+# BUG: with duplicate network names, this fails pretty hard.
+NET_ID=$(neutron net-list $NET_NAME | grep "$NET_NAME" | awk '{print $2}')
+PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
+
+# This runs a command inside the specific netns
+NET_NS_CMD="ip netns exec qprobe-$PROBE_ID"
+
+PING_CMD="sudo $NET_NS_CMD ping $REMAING_ARGS"
+echo "Running $PING_CMD"
+$PING_CMD
diff --git a/tools/upload_image.sh b/tools/upload_image.sh
index 5d23f31..19c6b71 100755
--- a/tools/upload_image.sh
+++ b/tools/upload_image.sh
@@ -32,7 +32,7 @@
fi
# Get a token to authenticate to glance
-TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+TOKEN=$(openstack token issue -c id -f value)
die_if_not_set $LINENO TOKEN "Keystone fail to get token"
# Glance connection info. Note the port must be specified.
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 8dd455c..d846f10 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -18,6 +18,7 @@
import argparse
import datetime
+import fnmatch
import os
import os.path
import sys
@@ -41,12 +42,24 @@
print "WARN: %s" % msg
+def _dump_cmd(cmd):
+ print cmd
+ print "-" * len(cmd)
+ print
+ print os.popen(cmd).read()
+
+
+def _header(name):
+ print
+ print name
+ print "=" * len(name)
+ print
+
+
def disk_space():
# the df output
- print """
-File System Summary
-===================
-"""
+ _header("File System Summary")
+
dfraw = os.popen("df -Ph").read()
df = [s.split() for s in dfraw.splitlines()]
for fs in df:
@@ -61,13 +74,36 @@
print dfraw
+def iptables_dump():
+ tables = ['filter', 'nat', 'mangle']
+ _header("IP Tables Dump")
+
+ for table in tables:
+ _dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table)
+
+
+def network_dump():
+ _header("Network Dump")
+
+ _dump_cmd("brctl show")
+ _dump_cmd("arp -n")
+ _dump_cmd("ip addr")
+ _dump_cmd("ip link")
+ _dump_cmd("ip route")
+
+
def process_list():
- print """
-Process Listing
-===============
-"""
- psraw = os.popen("ps axo user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args").read()
- print psraw
+ _header("Process Listing")
+ _dump_cmd("ps axo "
+ "user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args")
+
+
+def compute_consoles():
+ _header("Compute consoles")
+ for root, dirnames, filenames in os.walk('/opt/stack'):
+ for filename in fnmatch.filter(filenames, 'console.log'):
+ fullpath = os.path.join(root, filename)
+ _dump_cmd("sudo cat %s" % fullpath)
def main():
@@ -79,6 +115,9 @@
os.dup2(f.fileno(), sys.stdout.fileno())
disk_space()
process_list()
+ network_dump()
+ iptables_dump()
+ compute_consoles()
if __name__ == '__main__':
diff --git a/tools/xen/README.md b/tools/xen/README.md
index c8f47be..61694e9 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -97,7 +97,7 @@
# Download a vhd and a uec image
IMAGE_URLS="\
https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\
- http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-uec.tar.gz"
+ http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz"
# Explicitly set virt driver
VIRT_DRIVER=xenserver
diff --git a/unstack.sh b/unstack.sh
index 30981fd..ed7e617 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -192,3 +192,4 @@
# BUG: maybe it doesn't exist? We should isolate this further down.
clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
+clean_lvm_filter