Merge "Make it possible to upload ploop images"
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index b35492e..c652bac 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -88,7 +88,7 @@
parm: nested:int
To make the above value persistent across reboots, add an entry in
-/etc/modprobe.ddist.conf so it looks as below::
+/etc/modprobe.d/dist.conf so it looks as below::
cat /etc/modprobe.d/dist.conf
options kvm-amd nested=y
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index b166936..1b6f5e3 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -221,7 +221,9 @@
``stackforge/devstack-plugin-FOO`` project.
To enable a plugin to be used in a gate job, the following lines will
-be needed in your project.yaml definition::
+be needed in your ``jenkins/jobs/<project>.yaml`` definition in
+`project-config
+<http://git.openstack.org/cgit/openstack-infra/project-config/>`_::
# Because we are testing a non standard project, add the
# our project repository. This makes zuul do the right
diff --git a/functions-common b/functions-common
index 322bf82..60cf04c 100644
--- a/functions-common
+++ b/functions-common
@@ -270,8 +270,9 @@
# Fedora release 16 (Verne)
# XenServer release 6.2.0-70446c (xenenterprise)
# Oracle Linux release 7
+ # CloudLinux release 7.1
os_CODENAME=""
- for r in "Red Hat" CentOS Fedora XenServer; do
+ for r in "Red Hat" CentOS Fedora XenServer CloudLinux; do
os_VENDOR=$r
if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
@@ -375,7 +376,8 @@
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
- [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ]
+ [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] || \
+ [ "$os_VENDOR" = "CloudLinux" ]
}
@@ -684,9 +686,10 @@
# Gets or creates a domain
# Usage: get_or_create_domain <name> <description>
function get_or_create_domain {
+ local domain_id
local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets domain id
- local domain_id=$(
+ domain_id=$(
# Gets domain id
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 domain show $1 \
@@ -705,8 +708,9 @@
function get_or_create_group {
local desc="${3:-}"
local os_url="$KEYSTONE_SERVICE_URI_V3"
+ local group_id
# Gets group id
- local group_id=$(
+ group_id=$(
# Creates new group with --or-show
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 group create $1 \
@@ -719,13 +723,14 @@
# Gets or creates user
# Usage: get_or_create_user <username> <password> <domain> [<email>]
function get_or_create_user {
+ local user_id
if [[ ! -z "$4" ]]; then
local email="--email=$4"
else
local email=""
fi
# Gets user id
- local user_id=$(
+ user_id=$(
# Creates new user with --or-show
openstack user create \
$1 \
@@ -743,7 +748,8 @@
# Gets or creates project
# Usage: get_or_create_project <name> <domain>
function get_or_create_project {
- local project_id=$(
+ local project_id
+ project_id=$(
# Creates new project with --or-show
openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \
--os-identity-api-version=3 \
@@ -757,7 +763,8 @@
# Gets or creates role
# Usage: get_or_create_role <name>
function get_or_create_role {
- local role_id=$(
+ local role_id
+ role_id=$(
# Creates role with --or-show
openstack role create $1 \
--os-url=$KEYSTONE_SERVICE_URI_V3 \
@@ -770,8 +777,9 @@
# Gets or adds user role to project
# Usage: get_or_add_user_project_role <role> <user> <project>
function get_or_add_user_project_role {
+ local user_role_id
# Gets user role id
- local user_role_id=$(openstack role list \
+ user_role_id=$(openstack role list \
--user $2 \
--os-url=$KEYSTONE_SERVICE_URI_V3 \
--os-identity-api-version=3 \
@@ -795,8 +803,9 @@
# Gets or adds group role to project
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
+ local group_role_id
# Gets group role id
- local group_role_id=$(openstack role list \
+ group_role_id=$(openstack role list \
--os-url=$KEYSTONE_SERVICE_URI_V3 \
--os-identity-api-version=3 \
--group $2 \
@@ -822,8 +831,9 @@
# Gets or creates service
# Usage: get_or_create_service <name> <type> <description>
function get_or_create_service {
+ local service_id
# Gets service id
- local service_id=$(
+ service_id=$(
# Gets service id
openstack service show $2 -f value -c id 2>/dev/null ||
# Creates new service if not exists
@@ -841,7 +851,8 @@
# Create an endpoint with a specific interface
# Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
function _get_or_create_endpoint_with_interface {
- local endpoint_id=$(openstack endpoint list \
+ local endpoint_id
+ endpoint_id=$(openstack endpoint list \
--os-url $KEYSTONE_SERVICE_URI_V3 \
--os-identity-api-version=3 \
--service $1 \
diff --git a/lib/ceilometer b/lib/ceilometer
index 7905384..9226d85 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -211,6 +211,7 @@
cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR
cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR
+ cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR
if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then
sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
diff --git a/lib/cinder b/lib/cinder
index a9a9f0d..e5ed2db 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -485,7 +485,9 @@
local be be_name
for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
be_name=${be##*:}
- openstack volume type create --property volume_backend_name="${be_name}" ${be_name}
+ # FIXME(jamielennox): Remove --os-volume-api-version pinning when
+ # osc supports volume type create on v2 api. bug #1475060
+ openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name}
done
fi
}
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 9c9401e..fb55b60 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -95,9 +95,9 @@
sudo bash -c "source $TOP_DIR/functions && \
iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \
iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \
- iniset $my_conf mysqld default-storage-engine InnoDB \
- iniset $my_conf mysqld max_connections 1024 \
- iniset $my_conf mysqld query_cache_type OFF \
+ iniset $my_conf mysqld default-storage-engine InnoDB && \
+ iniset $my_conf mysqld max_connections 1024 && \
+ iniset $my_conf mysqld query_cache_type OFF && \
iniset $my_conf mysqld query_cache_size 0"
diff --git a/lib/keystone b/lib/keystone
index 428e615..e2448c9 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -35,6 +35,7 @@
# --------
# Set up default directories
+GITDIR["keystoneauth"]=$DEST/keystoneauth
GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient
GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware
KEYSTONE_DIR=$DEST/keystone
@@ -313,6 +314,8 @@
iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS"
# Public workers will use the server default, typically number of CPU.
+
+ iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/"
}
function configure_keystone_extensions {
@@ -475,11 +478,23 @@
$KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}"
done
- if [[ "$KEYSTONE_TOKEN_FORMAT" != "uuid" ]]; then
+ if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
# Set up certificates
rm -rf $KEYSTONE_CONF_DIR/ssl
$KEYSTONE_BIN_DIR/keystone-manage pki_setup
fi
+ if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then
+ rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
+ $KEYSTONE_BIN_DIR/keystone-manage fernet_setup
+ fi
+}
+
+# install_keystoneauth() - Collect source and prepare
+function install_keystoneauth {
+ if use_library_from_git "keystoneauth"; then
+ git_clone_by_name "keystoneauth"
+ setup_dev_lib "keystoneauth"
+ fi
}
# install_keystoneclient() - Collect source and prepare
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 5abe55c..2c9dd1a 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -707,11 +707,10 @@
fi
}
-# Start running processes, including screen
-function start_neutron_agents {
- # Start up the neutron agents if enabled
+# Control of the l2 agent is separated out to make it easier to test partial
+# upgrades (everything upgraded except the L2 agent)
+function start_neutron_l2_agent {
run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
if is_provider_network; then
sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
@@ -726,6 +725,10 @@
sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
fi
fi
+}
+
+function start_neutron_other_agents {
+ run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
if is_service_enabled q-vpn; then
run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
@@ -734,23 +737,27 @@
fi
run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
+ run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
# For XenServer, start an agent for the domU openvswitch
run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
fi
-
- if is_service_enabled q-lbaas; then
- run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
- fi
-
- if is_service_enabled q-metering; then
- run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
- fi
}
-# stop_neutron() - Stop running processes (non-screen)
-function stop_neutron {
+# Start running processes, including screen
+function start_neutron_agents {
+ # Start up the neutron agents if enabled
+ start_neutron_l2_agent
+ start_neutron_other_agents
+}
+
+function stop_neutron_l2_agent {
+ stop_process q-agt
+}
+
+function stop_neutron_other {
if is_service_enabled q-dhcp; then
stop_process q-dhcp
pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
@@ -765,8 +772,6 @@
stop_process q-meta
fi
- stop_process q-agt
-
if is_service_enabled q-lbaas; then
neutron_lbaas_stop
fi
@@ -781,6 +786,12 @@
fi
}
+# stop_neutron() - Stop running processes (non-screen)
+function stop_neutron {
+ stop_neutron_other
+ stop_neutron_l2_agent
+}
+
# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
# on startup, or back to the public interface on cleanup
function _move_neutron_addresses_route {
diff --git a/lib/nova b/lib/nova
index a6cd651..6441a89 100644
--- a/lib/nova
+++ b/lib/nova
@@ -490,7 +490,6 @@
iniset $NOVA_CONF database connection `database_connection_url nova`
iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
- iniset $NOVA_CONF osapi_v3 enabled "True"
iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index f52629d..c54a716 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -64,6 +64,10 @@
if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then
iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
iniset $NOVA_CONF libvirt images_type "ploop"
+ iniset $NOVA_CONF DEFAULT force_raw_images "False"
+ iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address $HOST_IP
+ iniset $NOVA_CONF DEFAULT vncserver_listen $HOST_IP
+ iniset $NOVA_CONF DEFAULT vnc_keymap
fi
}
diff --git a/lib/swift b/lib/swift
index 826f233..96d730e 100644
--- a/lib/swift
+++ b/lib/swift
@@ -46,6 +46,7 @@
SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
+SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
# TODO: add logging to different location.
@@ -361,6 +362,9 @@
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
+ iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
+
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
if is_service_enabled tls-proxy; then
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
@@ -463,17 +467,23 @@
local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
generate_swift_config_services ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object
+ iniuncomment ${swift_node_config} DEFAULT bind_ip
+ iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache
swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
+ iniuncomment ${swift_node_config} DEFAULT bind_ip
+ iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
iniuncomment ${swift_node_config} app:container-server allow_versions
iniset ${swift_node_config} app:container-server allow_versions "true"
swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
generate_swift_config_services ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account
+ iniuncomment ${swift_node_config} DEFAULT bind_ip
+ iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
done
# Set new accounts in tempauth to match keystone tenant/user (to make testing easier)
diff --git a/lib/tempest b/lib/tempest
index 1376c87..68ddd44 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -30,6 +30,7 @@
# - ``DEFAULT_INSTANCE_TYPE``
# - ``DEFAULT_INSTANCE_USER``
# - ``CINDER_ENABLED_BACKENDS``
+# - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
#
# ``stack.sh`` calls the entry points in this order:
#
@@ -380,6 +381,10 @@
# TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
+ # TODO(mriedem): Remove this when kilo-eol happens since the
+ # neutron.allow_duplicate_networks option was removed from nova in Liberty
+ # and is now the default behavior.
+ iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True}
# Network
iniset $TEMPEST_CONFIG network api_version 2.0
diff --git a/stack.sh b/stack.sh
index 49f9415..cc8bc8c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -750,6 +750,7 @@
install_oslo
# Install client libraries
+install_keystoneauth
install_keystoneclient
install_glanceclient
install_cinderclient
diff --git a/stackrc b/stackrc
index d16fcf6..8beef96 100644
--- a/stackrc
+++ b/stackrc
@@ -280,6 +280,10 @@
GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
+# the base authentication plugins that clients use to authenticate
+GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
+GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master}
+
# python keystone client library to nova that horizon uses
GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master}
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 8dc3ba3..d10cd0e 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -41,6 +41,7 @@
ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
ALL_LIBS+=" oslo.cache oslo.reports"
+ALL_LIBS+=" keystoneauth"
# Generate the above list with
# echo ${!GITREPO[@]}
diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh
new file mode 100755
index 0000000..f407d40
--- /dev/null
+++ b/tests/test_worlddump.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Simple test of worlddump.py
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+source $TOP/tests/unittest.sh
+
+OUT_DIR=$(mktemp -d)
+
+$TOP/tools/worlddump.py -d $OUT_DIR
+
+if [[ $? -ne 0 ]]; then
+ fail "worlddump failed"
+else
+
+ # worlddump creates just one output file
+ OUT_FILE=($OUT_DIR/*.txt)
+
+ if [ ! -r $OUT_FILE ]; then
+ failed "worlddump output not seen"
+ else
+ passed "worlddump output $OUT_FILE"
+
+ if [[ $(stat -c %s $OUT_DIR/*.txt) -gt 0 ]]; then
+ passed "worlddump output is not zero sized"
+ fi
+
+ # put more extensive examination here, if required.
+ fi
+fi
+
+rm -rf $OUT_DIR
+
+report_results
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 628a69f..e4ba02b 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -21,9 +21,9 @@
import fnmatch
import os
import os.path
+import subprocess
import sys
-from subprocess import Popen
def get_options():
parser = argparse.ArgumentParser(
@@ -47,7 +47,10 @@
print cmd
print "-" * len(cmd)
print
- Popen(cmd, shell=True)
+ try:
+ subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError:
+ print "*** Failed to run: %s" % cmd
def _header(name):
@@ -109,6 +112,13 @@
def guru_meditation_report():
_header("nova-compute Guru Meditation Report")
+
+ try:
+ subprocess.check_call(["pgrep","nova-compute"])
+ except subprocess.CalledProcessError:
+ print "Skipping as nova-compute does not appear to be running"
+ return
+
_dump_cmd("kill -s USR1 `pgrep nova-compute`")
print "guru meditation report in nova-compute log"