Merge "Update samples"
diff --git a/.gitignore b/.gitignore
index c49b4a3..1840352 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,8 +7,10 @@
localrc
local.sh
files/*.gz
+files/*.qcow2
files/images
files/pip-*
+files/get-pip.py
stack-screenrc
*.pem
accrc
@@ -17,3 +19,5 @@
devstack-docs-*
docs/
docs-files
+.localrc.auto
+local.conf
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index 18bef8b..edcc6d4 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -2,6 +2,22 @@
# **cinder_cert.sh**
+# This script is a simple wrapper around the tempest volume api tests
+# It requires that you have a working and functional devstack install
+# and that you've enabled your device driver by making the necessary
+# modifications to /etc/cinder/cinder.conf
+
+# This script will refresh your openstack repo's and restart the cinder
+# services to pick up your driver changes.
+# please NOTE; this script assumes your devstack install is functional
+# and includes tempest. A good first step is to make sure you can
+# create volumes on your device before you even try and run this script.
+
+# It also assumes default install location (/opt/stack/xxx)
+# to aid in debug, you should also verify that you've added
+# an output directory for screen logs:
+# SCREEN_LOGDIR=/opt/stack/screen-logs
+
CERT_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $CERT_DIR/..; pwd)
@@ -73,9 +89,9 @@
sleep 5
# run tempest api/volume/test_*
-log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True
+log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True
exec 2> >(tee -a $TEMPFILE)
-`./run_tests.sh -N tempest.api.volume.test_*`
+`./tools/pretty_tox.sh api.volume`
if [[ $? = 0 ]]; then
log_message "CONGRATULATIONS!!! Device driver PASSED!", True
log_message "Submit output: ($TEMPFILE)"
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 1b1ac06..d223301 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -43,6 +43,10 @@
# Test as the admin user
. $TOP_DIR/openrc admin admin
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
# Cells does not support aggregates.
is_service_enabled n-cell && exit 55
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index b83678a..5470960 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -39,6 +39,10 @@
rm -f $TOP_DIR/cert.pem
rm -f $TOP_DIR/pk.pem
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
# Get Certificates
nova x509-get-root-cert $TOP_DIR/cacert.pem
nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem
diff --git a/exercises/euca.sh b/exercises/euca.sh
index ed521e4..51b2644 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -41,6 +41,10 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
# Skip if the hypervisor is Docker
[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 7055278..4ca90a5 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -38,6 +38,10 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
# Skip if the hypervisor is Docker
[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 0a100c0..1343f11 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -185,6 +185,14 @@
fi
}
+function neutron_debug_admin {
+ local os_username=$OS_USERNAME
+ local os_tenant_id=$OS_TENANT_ID
+ source $TOP_DIR/openrc admin admin
+ neutron-debug $@
+ source $TOP_DIR/openrc $os_username $os_tenant_id
+}
+
function add_tenant {
local TENANT=$1
local USER=$2
@@ -241,7 +249,7 @@
local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
- neutron-debug probe-create --device-owner compute $NET_ID
+ neutron_debug_admin probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
}
@@ -400,10 +408,10 @@
echo Description
echo
echo Copyright 2012, Cisco Systems
- echo Copyright 2012, Nicira Networks, Inc.
+ echo Copyright 2012, VMware, Inc.
echo Copyright 2012, NTT MCL, Inc.
echo
- echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com
+ echo Please direct any questions to dedutta@cisco.com, dwendlandt@vmware.com, nachi@nttmcl.com
echo
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index eb32cc7..d71a1e0 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -33,6 +33,10 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
# Skip if the hypervisor is Docker
[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
diff --git a/files/apts/cinder b/files/apts/cinder
index f8e3b6d..712fee9 100644
--- a/files/apts/cinder
+++ b/files/apts/cinder
@@ -4,4 +4,4 @@
libpq-dev
python-dev
open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise
+open-iscsi-utils # Deprecated since quantal dist:precise
diff --git a/files/apts/glance b/files/apts/glance
index 26826a5..22787bc 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -9,7 +9,6 @@
python-eventlet
python-routes
python-greenlet
-python-argparse # dist:oneiric
python-sqlalchemy
python-wsgiref
python-pastedeploy
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index 88e0144..29e3760 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -2,7 +2,7 @@
nbd-client
lvm2
open-iscsi
-open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise
+open-iscsi-utils # Deprecated since quantal dist:precise
genisoimage
sysfsutils
sg3-utils
diff --git a/files/apts/neutron b/files/apts/neutron
index 0f4b69f..648716a 100644
--- a/files/apts/neutron
+++ b/files/apts/neutron
@@ -18,8 +18,8 @@
python-pyudev
python-qpid # dist:precise
dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal
+dnsmasq-utils # for dhcp_release only available in dist:precise
rabbitmq-server # NOPRIME
-qpid # NOPRIME
+qpidd # NOPRIME
sqlite3
vlan
diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy
index 0a44015..8fca42d 100644
--- a/files/apts/tls-proxy
+++ b/files/apts/tls-proxy
@@ -1 +1 @@
-stud # only available in dist:precise,quantal
+stud # only available in dist:precise
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index 277904a..e64f68f 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -47,7 +47,17 @@
catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292
catalog.RegionOne.image.name = Image Service
-catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.name = Heat CloudFormation Service
+
+catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
catalog.RegionOne.orchestration.name = Heat Service
+
+catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.name = Telemetry Service
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index ea2d52d..d477c42 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -6,7 +6,6 @@
# ------------------------------------------------------------------
# service glance admin
# service heat service # if enabled
-# service ceilometer admin # if enabled
# Tempest Only:
# alt_demo alt_demo Member
#
@@ -28,16 +27,6 @@
export SERVICE_ENDPOINT=$SERVICE_ENDPOINT
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-function get_id () {
- echo `"$@" | awk '/ id / { print $4 }'`
-}
-
-# Lookups
-SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
-MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }")
-
-
# Roles
# -----
@@ -45,53 +34,52 @@
# The admin role in swift allows a user to act as an admin for their tenant,
# but ResellerAdmin is needed for a user to act as any tenant. The name of this
# role is also configurable in swift-proxy.conf
-RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+keystone role-create --name=ResellerAdmin
# Service role, so service users do not have to be admins
-SERVICE_ROLE=$(get_id keystone role-create --name=service)
+keystone role-create --name=service
# Services
# --------
if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
- NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
# Nova needs ResellerAdmin role to download images when accessing
# swift through the s3 api.
keystone user-role-add \
- --tenant-id $SERVICE_TENANT \
- --user-id $NOVA_USER \
- --role-id $RESELLER_ROLE
+ --tenant $SERVICE_TENANT_NAME \
+ --user nova \
+ --role ResellerAdmin
fi
# Heat
if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
- HEAT_USER=$(get_id keystone user-create --name=heat \
+ keystone user-create --name=heat \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=heat@example.com)
- keystone user-role-add --tenant-id $SERVICE_TENANT \
- --user-id $HEAT_USER \
- --role-id $SERVICE_ROLE
+ --tenant $SERVICE_TENANT_NAME \
+ --email=heat@example.com
+ keystone user-role-add --tenant $SERVICE_TENANT_NAME \
+ --user heat \
+ --role service
# heat_stack_user role is for users created by Heat
keystone role-create --name heat_stack_user
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- HEAT_CFN_SERVICE=$(get_id keystone service-create \
+ keystone service-create \
--name=heat-cfn \
--type=cloudformation \
- --description="Heat CloudFormation Service")
+ --description="Heat CloudFormation Service"
keystone endpoint-create \
--region RegionOne \
- --service_id $HEAT_CFN_SERVICE \
+ --service heat-cfn \
--publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
--adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
--internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
- HEAT_SERVICE=$(get_id keystone service-create \
+ keystone service-create \
--name=heat \
--type=orchestration \
- --description="Heat Service")
+ --description="Heat Service"
keystone endpoint-create \
--region RegionOne \
- --service_id $HEAT_SERVICE \
+ --service heat \
--publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
--adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
--internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
@@ -100,23 +88,23 @@
# Glance
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
- GLANCE_USER=$(get_id keystone user-create \
+ keystone user-create \
--name=glance \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=glance@example.com)
+ --tenant $SERVICE_TENANT_NAME \
+ --email=glance@example.com
keystone user-role-add \
- --tenant-id $SERVICE_TENANT \
- --user-id $GLANCE_USER \
- --role-id $ADMIN_ROLE
+ --tenant $SERVICE_TENANT_NAME \
+ --user glance \
+ --role admin
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- GLANCE_SERVICE=$(get_id keystone service-create \
+ keystone service-create \
--name=glance \
--type=image \
- --description="Glance Image Service")
+ --description="Glance Image Service"
keystone endpoint-create \
--region RegionOne \
- --service_id $GLANCE_SERVICE \
+ --service glance \
--publicurl "http://$SERVICE_HOST:9292" \
--adminurl "http://$SERVICE_HOST:9292" \
--internalurl "http://$SERVICE_HOST:9292"
@@ -124,42 +112,23 @@
fi
# Ceilometer
-if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then
- CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \
- --pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
- --email=ceilometer@example.com)
- keystone user-role-add --tenant-id $SERVICE_TENANT \
- --user-id $CEILOMETER_USER \
- --role-id $ADMIN_ROLE
+if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
# Ceilometer needs ResellerAdmin role to access swift account stats.
- keystone user-role-add --tenant-id $SERVICE_TENANT \
- --user-id $CEILOMETER_USER \
- --role-id $RESELLER_ROLE
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- CEILOMETER_SERVICE=$(get_id keystone service-create \
- --name=ceilometer \
- --type=metering \
- --description="Ceilometer Service")
- keystone endpoint-create \
- --region RegionOne \
- --service_id $CEILOMETER_SERVICE \
- --publicurl "http://$SERVICE_HOST:8777" \
- --adminurl "http://$SERVICE_HOST:8777" \
- --internalurl "http://$SERVICE_HOST:8777"
- fi
+ keystone user-role-add --tenant $SERVICE_TENANT_NAME \
+ --user ceilometer \
+ --role ResellerAdmin
fi
# EC2
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- EC2_SERVICE=$(get_id keystone service-create \
+ keystone service-create \
--name=ec2 \
--type=ec2 \
- --description="EC2 Compatibility Layer")
+ --description="EC2 Compatibility Layer"
keystone endpoint-create \
--region RegionOne \
- --service_id $EC2_SERVICE \
+ --service ec2 \
--publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
--adminurl "http://$SERVICE_HOST:8773/services/Admin" \
--internalurl "http://$SERVICE_HOST:8773/services/Cloud"
@@ -169,13 +138,13 @@
# S3
if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- S3_SERVICE=$(get_id keystone service-create \
+ keystone service-create \
--name=s3 \
--type=s3 \
- --description="S3")
+ --description="S3"
keystone endpoint-create \
--region RegionOne \
- --service_id $S3_SERVICE \
+ --service s3 \
--publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
--adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
--internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
@@ -185,14 +154,14 @@
if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
# Tempest has some tests that validate various authorization checks
# between two regular users in separate tenants
- ALT_DEMO_TENANT=$(get_id keystone tenant-create \
- --name=alt_demo)
- ALT_DEMO_USER=$(get_id keystone user-create \
+ keystone tenant-create \
+ --name=alt_demo
+ keystone user-create \
--name=alt_demo \
--pass="$ADMIN_PASSWORD" \
- --email=alt_demo@example.com)
+ --email=alt_demo@example.com
keystone user-role-add \
- --tenant-id $ALT_DEMO_TENANT \
- --user-id $ALT_DEMO_USER \
- --role-id $MEMBER_ROLE
+ --tenant alt_demo \
+ --user alt_demo \
+ --role Member
fi
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index d7b7ea8..c91bac3 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,3 +1,4 @@
selinux-policy-targeted
mongodb-server
pymongo
+mongodb # NOPRIME
diff --git a/files/rpms/cinder b/files/rpms/cinder
index c4edb68..623c13e 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -4,3 +4,4 @@
python-devel
postgresql-devel
iscsi-initiator-utils
+python-lxml #dist:f18,f19,f20
diff --git a/files/rpms/general b/files/rpms/general
index 2db31d1..40246ea 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -20,6 +20,7 @@
tcpdump
unzip
wget
+which
# [1] : some of installed tools have unversioned dependencies on this,
# but others have versioned (<=0.7). So if a later version (0.7.1)
diff --git a/files/rpms/glance b/files/rpms/glance
index dd66171..fffd9c8 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,6 +1,6 @@
gcc
libffi-devel # testonly
-libxml2-devel
+libxml2-devel # testonly
libxslt-devel # testonly
mysql-devel # testonly
openssl-devel # testonly
@@ -9,7 +9,8 @@
python-devel
python-eventlet
python-greenlet
-python-paste-deploy #dist:f16,f17,f18,f19
+python-lxml #dist:f18,f19,f20
+python-paste-deploy #dist:f18,f19,f20
python-routes
python-sqlalchemy
python-wsgiref
diff --git a/files/rpms/horizon b/files/rpms/horizon
index aa27ab4..59503cc 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,8 +16,8 @@
python-migrate
python-mox
python-nose
-python-paste #dist:f16,f17,f18,f19
-python-paste-deploy #dist:f16,f17,f18,f19
+python-paste #dist:f18,f19,f20
+python-paste-deploy #dist:f18,f19,f20
python-routes
python-sphinx
python-sqlalchemy
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 52dbf47..99e8524 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,11 +1,11 @@
python-greenlet
-python-lxml #dist:f16,f17,f18,f19
-python-paste #dist:f16,f17,f18,f19
-python-paste-deploy #dist:f16,f17,f18,f19
-python-paste-script #dist:f16,f17,f18,f19
+libxslt-devel # dist:f20
+python-lxml #dist:f18,f19,f20
+python-paste #dist:f18,f19,f20
+python-paste-deploy #dist:f18,f19,f20
+python-paste-script #dist:f18,f19,f20
python-routes
python-sqlalchemy
-python-sqlite2
python-webob
sqlite
diff --git a/files/rpms/neutron b/files/rpms/neutron
index a7700f7..67bf523 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
python-iso8601
python-kombu
#rhel6 gets via pip
-python-paste # dist:f16,f17,f18,f19
-python-paste-deploy # dist:f16,f17,f18,f19
+python-paste # dist:f18,f19,f20
+python-paste-deploy # dist:f18,f19,f20
python-qpid
python-routes
python-sqlalchemy
diff --git a/files/rpms/nova b/files/rpms/nova
index c99f3de..ac70ac5 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,11 +28,11 @@
python-lockfile
python-migrate
python-mox
-python-paramiko # dist:f16,f17,f18,f19
+python-paramiko # dist:f18,f19,f20
# ^ on RHEL, brings in python-crypto which conflicts with version from
# pip we need
-python-paste # dist:f16,f17,f18,f19
-python-paste-deploy # dist:f16,f17,f18,f19
+python-paste # dist:f18,f19,f20
+python-paste-deploy # dist:f18,f19,f20
python-qpid
python-routes
python-sqlalchemy
diff --git a/files/rpms/swift b/files/rpms/swift
index b137f30..32432bc 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -9,7 +9,7 @@
python-greenlet
python-netifaces
python-nose
-python-paste-deploy # dist:f16,f17,f18,f19
+python-paste-deploy # dist:f18,f19,f20
python-simplejson
python-webob
pyxattr
diff --git a/files/rpms/tempest b/files/rpms/tempest
index de32b81..e7bbd43 100644
--- a/files/rpms/tempest
+++ b/files/rpms/tempest
@@ -1 +1 @@
-libxslt-dev
\ No newline at end of file
+libxslt-devel
diff --git a/files/rpms/trove b/files/rpms/trove
index 09dcee8..c5cbdea 100644
--- a/files/rpms/trove
+++ b/files/rpms/trove
@@ -1 +1 @@
-libxslt1-dev # testonly
+libxslt-devel # testonly
diff --git a/functions b/functions
index e79e1d5..73d65ce 100644
--- a/functions
+++ b/functions
@@ -1132,10 +1132,42 @@
sleep 1.5
NL=`echo -ne '\015'`
- screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
+ # This fun command does the following:
+ # - the passed server command is backgrounded
+ # - the pid of the background process is saved in the usual place
+ # - the server process is brought back to the foreground
+ # - if the server process exits prematurely the fg command errors
+ # and a message is written to stdout and the service failure file
+ # The pid saved can be used in screen_stop() as a process group
+ # id to kill off all child processes
+ screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
else
# Spawn directly without screen
- run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid
+ run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+ fi
+ fi
+}
+
+
+# Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# screen_stop service
+function screen_stop() {
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+ USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+ if is_service_enabled $1; then
+ # Kill via pid if we have one available
+ if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
+ pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
+ rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
+ fi
+ if [[ "$USE_SCREEN" = "True" ]]; then
+ # Clean up the screen window
+ screen -S $SCREEN_NAME -p $1 -X kill
fi
fi
}
@@ -1272,7 +1304,8 @@
echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir"
# Don't update repo if local changes exist
- (cd $project_dir && git diff --quiet)
+ # Don't use buggy "git diff --quiet"
+ (cd $project_dir && git diff --exit-code >/dev/null)
local update_requirements=$?
if [ $update_requirements -eq 0 ]; then
@@ -1364,11 +1397,11 @@
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+aki style), then extracts it.
if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
- wget -c $image_url -O $FILES/$IMAGE_FNAME
- if [[ $? -ne 0 ]]; then
- echo "Not found: $image_url"
- return
- fi
+ wget -c $image_url -O $FILES/$IMAGE_FNAME
+ if [[ $? -ne 0 ]]; then
+ echo "Not found: $image_url"
+ return
+ fi
fi
IMAGE="$FILES/${IMAGE_FNAME}"
else
@@ -1420,7 +1453,7 @@
vmdk_create_type="${vmdk_create_type%?}"
descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
- `"should use a descriptor-data pair."
+ `"should use a descriptor-data pair."
if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
vmdk_disktype="sparse"
elif [[ "$vmdk_create_type" = "monolithicFlat" || \
@@ -1435,7 +1468,7 @@
path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
flat_url="${image_url:0:$path_len}$flat_fname"
warn $LINENO "$descriptor_data_pair_msg"`
- `" Attempt to retrieve the *-flat.vmdk: $flat_url"
+ `" Attempt to retrieve the *-flat.vmdk: $flat_url"
if [[ $flat_url != file* ]]; then
if [[ ! -f $FILES/$flat_fname || \
"$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
@@ -1474,7 +1507,7 @@
flat_path="${image_url:0:$path_len}"
descriptor_url=$flat_path$descriptor_fname
warn $LINENO "$descriptor_data_pair_msg"`
- `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+ `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
if [[ $flat_path != file* ]]; then
if [[ ! -f $FILES/$descriptor_fname || \
"$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
@@ -1489,8 +1522,8 @@
descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
if [[ ! -f $descriptor_url || \
"$(stat -c "%s" $descriptor_url)" == "0" ]]; then
- warn $LINENO "Descriptor not found $descriptor_url"
- descriptor_found=false
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
fi
fi
if $descriptor_found; then
@@ -1498,10 +1531,10 @@
`"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
vmdk_adapter_type="${vmdk_adapter_type#*\"}"
vmdk_adapter_type="${vmdk_adapter_type%?}"
- fi
- fi
- #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
- vmdk_disktype="preallocated"
+ fi
+ fi
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+ vmdk_disktype="preallocated"
else
#TODO(alegendre): handle streamOptimized once supported by the VMware driver.
vmdk_disktype="preallocated"
@@ -1510,7 +1543,7 @@
# NOTE: For backwards compatibility reasons, colons may be used in place
# of semi-colons for property delimiters but they are not permitted
# characters in NTFS filesystems.
- property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'`
+ property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'`
IFS=':;' read -a props <<< "$property_string"
vmdk_disktype="${props[0]:-$vmdk_disktype}"
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
diff --git a/lib/ceilometer b/lib/ceilometer
index fac3be1..6f3896f 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -48,8 +48,50 @@
# Set up database backend
CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
+# Ceilometer connection info.
+CEILOMETER_SERVICE_PROTOCOL=http
+CEILOMETER_SERVICE_HOST=$SERVICE_HOST
+CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
+#
+
# Functions
# ---------
+#
+# create_ceilometer_accounts() - Set up common required ceilometer accounts
+
+create_ceilometer_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ # Ceilometer
+ if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
+ CEILOMETER_USER=$(keystone user-create \
+ --name=ceilometer \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=ceilometer@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant-id $SERVICE_TENANT \
+ --user-id $CEILOMETER_USER \
+ --role-id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ CEILOMETER_SERVICE=$(keystone service-create \
+ --name=ceilometer \
+ --type=metering \
+ --description="OpenStack Telemetry Service" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $CEILOMETER_SERVICE \
+ --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
+ --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
+ --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
+ fi
+ fi
+}
+
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
@@ -109,6 +151,8 @@
function configure_mongodb() {
if is_fedora; then
+ # install mongodb client
+ install_package mongodb
# ensure smallfiles selected to minimize freespace requirements
sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
@@ -162,7 +206,7 @@
function stop_ceilometer() {
# Kill the ceilometer screen windows
for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
- screen -S $SCREEN_NAME -p $serv -X kill
+ screen_stop $serv
done
}
diff --git a/lib/cinder b/lib/cinder
index cbe732e..5397308 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -163,15 +163,8 @@
fi
}
-# configure_cinder() - Set config files, create data dirs, etc
-function configure_cinder() {
- if [[ ! -d $CINDER_CONF_DIR ]]; then
- sudo mkdir -p $CINDER_CONF_DIR
- fi
- sudo chown $STACK_USER $CINDER_CONF_DIR
-
- cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
-
+# configure_cinder_rootwrap() - configure Cinder's rootwrap
+function configure_cinder_rootwrap() {
# Set the paths of certain binaries
CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
if [[ ! -x $CINDER_ROOTWRAP ]]; then
@@ -214,6 +207,18 @@
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
+}
+
+# configure_cinder() - Set config files, create data dirs, etc
+function configure_cinder() {
+ if [[ ! -d $CINDER_CONF_DIR ]]; then
+ sudo mkdir -p $CINDER_CONF_DIR
+ fi
+ sudo chown $STACK_USER $CINDER_CONF_DIR
+
+ cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
+
+ configure_cinder_rootwrap
cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
@@ -341,7 +346,7 @@
-e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \
/etc/lvm/lvm.conf
fi
- iniset $CINDER_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT
+ configure_API_version $CINDER_CONF $IDENTITY_API_VERSION
iniset $CINDER_CONF keystone_authtoken admin_user cinder
iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -365,7 +370,7 @@
CINDER_USER=$(keystone user-create \
--name=cinder \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=cinder@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
@@ -385,7 +390,7 @@
--adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
--internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
CINDER_V2_SERVICE=$(keystone service-create \
- --name=cinder \
+ --name=cinderv2 \
--type=volumev2 \
--description="Cinder Volume Service V2" \
| grep " id " | get_field 2)
@@ -556,7 +561,7 @@
function stop_cinder() {
# Kill the cinder screen windows
for serv in c-api c-bak c-sch c-vol; do
- screen -S $SCREEN_NAME -p $serv -X kill
+ screen_stop $serv
done
if is_service_enabled c-vol; then
diff --git a/lib/glance b/lib/glance
index 135136d..55d5fb3 100644
--- a/lib/glance
+++ b/lib/glance
@@ -83,7 +83,7 @@
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
- iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -101,7 +101,7 @@
iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
- iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION
iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -125,7 +125,7 @@
iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
- iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store
+ iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
fi
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -206,8 +206,8 @@
# stop_glance() - Stop running processes
function stop_glance() {
# Kill the Glance screen windows
- screen -S $SCREEN_NAME -p g-api -X kill
- screen -S $SCREEN_NAME -p g-reg -X kill
+ screen_stop g-api
+ screen_stop g-reg
}
diff --git a/lib/heat b/lib/heat
index e44a618..0307c64 100644
--- a/lib/heat
+++ b/lib/heat
@@ -95,7 +95,7 @@
iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
- iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ configure_API_version $HEAT_CONF $IDENTITY_API_VERSION
iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $HEAT_CONF keystone_authtoken admin_user heat
@@ -110,15 +110,12 @@
[[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone
# OpenStack API
- iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST
iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
# Cloudformation API
- iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST
iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT
# Cloudwatch API
- iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST
iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
# heat environment
@@ -175,7 +172,7 @@
function stop_heat() {
# Kill the screen windows
for serv in h-eng h-api h-api-cfn h-api-cw; do
- screen -S $SCREEN_NAME -p $serv -X kill
+ screen_stop $serv
done
}
diff --git a/lib/ironic b/lib/ironic
index 099746a..afbc3e0 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -33,7 +33,6 @@
IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf
IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
-IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d
IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
# Support entry points installation of console scripts
@@ -118,7 +117,7 @@
# Sets conductor specific settings.
function configure_ironic_conductor() {
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
- cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS
+ cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
}
@@ -149,11 +148,11 @@
IRONIC_USER=$(keystone user-create \
--name=ironic \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=ironic@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--user_id $IRONIC_USER \
--role_id $ADMIN_ROLE
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
diff --git a/lib/keystone b/lib/keystone
index 29b9604..0850fb2 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -28,7 +28,6 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
-
# Defaults
# --------
@@ -246,14 +245,14 @@
fi
# Set up logging
- LOGGING_ROOT="devel"
if [ "$SYSLOG" != "False" ]; then
- LOGGING_ROOT="$LOGGING_ROOT,production"
+ iniset $KEYSTONE_CONF DEFAULT use_syslog "True"
fi
- KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
- cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
- iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
- iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
+
+ # Format logging
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ setup_colorized_logging $KEYSTONE_CONF DEFAULT
+ fi
if is_apache_enabled_service key; then
_config_keystone_apache_wsgi
@@ -335,6 +334,14 @@
fi
}
+# Configure the API version for the OpenStack projects.
+# configure_API_version conf_file version
+function configure_API_version() {
+ local conf_file=$1
+ local api_version=$2
+ iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version
+}
+
# init_keystone() - Initialize databases, etc.
function init_keystone() {
if is_service_enabled ldap; then
@@ -403,7 +410,7 @@
screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
else
# Start Keystone in a screen window
- screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug"
+ screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
fi
echo "Waiting for keystone to start..."
@@ -421,7 +428,7 @@
# stop_keystone() - Stop running processes
function stop_keystone() {
# Kill the Keystone screen window
- screen -S $SCREEN_NAME -p key -X kill
+ screen_stop key
}
diff --git a/lib/marconi b/lib/marconi
index 742f866..6b9ffdc 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -142,7 +142,7 @@
MARCONI_USER=$(get_id keystone user-create --name=marconi \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=marconi@example.com)
keystone user-role-add --tenant-id $SERVICE_TENANT \
--user-id $MARCONI_USER \
diff --git a/lib/neutron b/lib/neutron
index a7519ad..960f11b 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -1,5 +1,5 @@
# lib/neutron
-# functions - funstions specific to neutron
+# functions - functions specific to neutron
# Dependencies:
# ``functions`` file
@@ -328,7 +328,7 @@
NEUTRON_USER=$(keystone user-create \
--name=neutron \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=neutron@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
@@ -357,7 +357,7 @@
# Create a small network
# Since neutron command is executed in admin context at this point,
- # ``--tenant_id`` needs to be specified.
+ # ``--tenant-id`` needs to be specified.
if is_baremetal; then
if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then
die $LINENO "Neutron settings for baremetal not set.. exiting"
@@ -367,16 +367,16 @@
sudo ip addr del $IP dev $PUBLIC_INTERFACE
sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
done
- NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
+ NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant-id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
- SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
sudo ifconfig $OVS_PHYSICAL_BRIDGE up
sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
else
- NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
- SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ SUBNET_ID=$(neutron subnet-create --tenant-id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
fi
@@ -384,7 +384,7 @@
# Create a router, and add the private subnet as one of its interfaces
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
- ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
@@ -505,8 +505,7 @@
[ ! -z "$pid" ] && sudo kill -9 $pid
fi
if is_service_enabled q-meta; then
- pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }')
- [ ! -z "$pid" ] && sudo kill -9 $pid
+ sudo pkill -9 neutron-ns-metadata-proxy || :
fi
if is_service_enabled q-lbaas; then
@@ -611,9 +610,6 @@
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
- iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
- # Intermediate fix until Neutron patch lands and then line above will
- # be cleaned.
iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND"
_neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE
@@ -958,6 +954,11 @@
_neutron_third_party_do stop
}
+# check_neutron_third_party_integration() - Check that third party integration is sane
+function check_neutron_third_party_integration() {
+ _neutron_third_party_do check
+}
+
# Restore xtrace
$XTRACE
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index f9275ca..8e18d04 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -38,7 +38,7 @@
}
function neutron_plugin_configure_plugin_agent() {
- AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent"
+ AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent"
}
function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira
deleted file mode 100644
index 87d3c3d..0000000
--- a/lib/neutron_plugins/nicira
+++ /dev/null
@@ -1,149 +0,0 @@
-# Neutron Nicira NVP plugin
-# ---------------------------
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function setup_integration_bridge() {
- _neutron_ovs_base_setup_bridge $OVS_BRIDGE
- # Set manager to NVP controller (1st of list)
- if [[ "$NVP_CONTROLLERS" != "" ]]; then
- # Get the first controller
- controllers=(${NVP_CONTROLLERS//,/ })
- OVS_MGR_IP=${controllers[0]}
- else
- die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
- fi
- sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
-}
-
-function is_neutron_ovs_base_plugin() {
- # NVP uses OVS, but not the l3-agent
- return 0
-}
-
-function neutron_plugin_create_nova_conf() {
- # if n-cpu is enabled, then setup integration bridge
- if is_service_enabled n-cpu; then
- setup_integration_bridge
- fi
-}
-
-function neutron_plugin_install_agent_packages() {
- # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents
- _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common() {
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nicira
- Q_PLUGIN_CONF_FILENAME=nvp.ini
- Q_DB_NAME="neutron_nvp"
- Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2"
-}
-
-function neutron_plugin_configure_debug_command() {
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
-}
-
-function neutron_plugin_configure_dhcp_agent() {
- setup_integration_bridge
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
- iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
- iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
-}
-
-function neutron_plugin_configure_l3_agent() {
- # Nicira plugin does not run L3 agent
- die $LINENO "q-l3 should must not be executed with Nicira plugin!"
-}
-
-function neutron_plugin_configure_plugin_agent() {
- # Nicira plugin does not run L2 agent
- die $LINENO "q-agt must not be executed with Nicira plugin!"
-}
-
-function neutron_plugin_configure_service() {
- if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
- fi
- if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
- fi
- if [[ "$FAILOVER_TIME" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME
- fi
- if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS
- fi
-
- if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
- else
- die $LINENO "The nicira plugin won't work without a default transport zone."
- fi
- if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
- iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network
- fi
- if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
- fi
- # NVP_CONTROLLERS must be a comma separated string
- if [[ "$NVP_CONTROLLERS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS
- else
- die $LINENO "The nicira plugin needs at least an NVP controller."
- fi
- if [[ "$NVP_USER" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER
- fi
- if [[ "$NVP_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD
- fi
- if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT
- fi
- if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT
- fi
- if [[ "$NVP_RETRIES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES
- fi
- if [[ "$NVP_REDIRECTS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS
- fi
- if [[ "$AGENT_MODE" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE nvp agent_mode $AGENT_MODE
- if [[ "$AGENT_MODE" == "agentless" ]]; then
- if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID
- else
- die $LINENO "Agentless mode requires a service cluster."
- fi
- iniset /$Q_PLUGIN_CONF_FILE nvp_metadata metadata_server_address $Q_META_DATA_IP
- fi
- fi
-}
-
-function neutron_plugin_setup_interface_driver() {
- local conf_file=$1
- iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
-function has_neutron_plugin_security_group() {
- # 0 means True here
- return 0
-}
-
-function neutron_plugin_check_adv_test_requirements() {
- is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$MY_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
new file mode 100644
index 0000000..d506cb6
--- /dev/null
+++ b/lib/neutron_plugins/vmware_nsx
@@ -0,0 +1,150 @@
+# Neutron VMware NSX plugin
+# -------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+function setup_integration_bridge() {
+ _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+ # Set manager to NSX controller (1st of list)
+ if [[ "$NSX_CONTROLLERS" != "" ]]; then
+ # Get the first controller
+ controllers=(${NSX_CONTROLLERS//,/ })
+ OVS_MGR_IP=${controllers[0]}
+ else
+ die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
+ fi
+ sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
+}
+
+function is_neutron_ovs_base_plugin() {
+ # NSX uses OVS, but not the l3-agent
+ return 0
+}
+
+function neutron_plugin_create_nova_conf() {
+ # if n-cpu is enabled, then setup integration bridge
+ if is_service_enabled n-cpu; then
+ setup_integration_bridge
+ fi
+}
+
+function neutron_plugin_install_agent_packages() {
+ # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents
+ _neutron_ovs_base_install_agent_packages
+}
+
+function neutron_plugin_configure_common() {
+ Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
+ Q_PLUGIN_CONF_FILENAME=nsx.ini
+ Q_DB_NAME="neutron_nsx"
+ # TODO(armando-migliaccio): rename this once the code rename is complete
+ Q_PLUGIN_CLASS="neutron.plugins.nicira.NeutronPlugin.NvpPluginV2"
+}
+
+function neutron_plugin_configure_debug_command() {
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
+ iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
+}
+
+function neutron_plugin_configure_dhcp_agent() {
+ setup_integration_bridge
+ iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
+ iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
+ iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
+}
+
+function neutron_plugin_configure_l3_agent() {
+ # VMware NSX plugin does not run L3 agent
+ die $LINENO "q-l3 should must not be executed with VMware NSX plugin!"
+}
+
+function neutron_plugin_configure_plugin_agent() {
+ # VMware NSX plugin does not run L2 agent
+ die $LINENO "q-agt must not be executed with VMware NSX plugin!"
+}
+
+function neutron_plugin_configure_service() {
+ if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
+ fi
+ if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
+ fi
+ if [[ "$FAILOVER_TIME" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME
+ fi
+ if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS
+ fi
+
+ if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
+ else
+ die $LINENO "The VMware NSX plugin won't work without a default transport zone."
+ fi
+ if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
+ Q_L3_ENABLED=True
+ Q_L3_ROUTER_PER_TENANT=True
+ iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network
+ fi
+ if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
+ fi
+ # NSX_CONTROLLERS must be a comma separated string
+ if [[ "$NSX_CONTROLLERS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS
+ else
+ die $LINENO "The VMware NSX plugin needs at least an NSX controller."
+ fi
+ if [[ "$NSX_USER" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER
+ fi
+ if [[ "$NSX_PASSWORD" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
+ fi
+ if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT
+ fi
+ if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
+ fi
+ if [[ "$NSX_RETRIES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES
+ fi
+ if [[ "$NSX_REDIRECTS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS
+ fi
+ if [[ "$AGENT_MODE" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE
+ if [[ "$AGENT_MODE" == "agentless" ]]; then
+ if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID
+ else
+ die $LINENO "Agentless mode requires a service cluster."
+ fi
+ iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP
+ fi
+ fi
+}
+
+function neutron_plugin_setup_interface_driver() {
+ local conf_file=$1
+ iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
+function neutron_plugin_check_adv_test_requirements() {
+ is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md
index b289f58..2460e5c 100644
--- a/lib/neutron_thirdparty/README.md
+++ b/lib/neutron_thirdparty/README.md
@@ -34,3 +34,6 @@
* ``stop_<third_party>``:
stop running processes (non-screen)
+
+* ``check_<third_party>``:
+ verify that the integration between neutron server and third-party components is sane
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index ebde067..1fd4fd8 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -45,5 +45,9 @@
:
}
+function check_bigswitch_floodlight() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index 7928bca..e672528 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -56,5 +56,9 @@
:
}
+function check_midonet() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira
deleted file mode 100644
index a24392c..0000000
--- a/lib/neutron_thirdparty/nicira
+++ /dev/null
@@ -1,82 +0,0 @@
-# Nicira NVP
-# ----------
-
-# This third-party addition can be used to configure connectivity between a DevStack instance
-# and an NVP Gateway in dev/test environments. In order to use this correctly, the following
-# env variables need to be set (e.g. in your localrc file):
-#
-# * enable_service nicira --> to execute this third-party addition
-# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
-# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway
-# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# This is the interface that connects the Devstack instance
-# to an network that allows it to talk to the gateway for
-# testing purposes
-NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2}
-# Re-declare floating range as it's needed also in stop_nicira, which
-# is invoked by unstack.sh
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-
-function configure_nicira() {
- :
-}
-
-function init_nicira() {
- if ! is_set NVP_GATEWAY_NETWORK_CIDR; then
- NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address to set on br-ex was not specified. "
- echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR
- fi
- # Make sure the interface is up, but not configured
- sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up
- # Save and then flush the IP addresses on the interface
- addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
- sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE
- # Use the PUBLIC Bridge to route traffic to the NVP gateway
- # NOTE(armando-migliaccio): if running in a nested environment this will work
- # only with mac learning enabled, portsecurity and security profiles disabled
- # The public bridge might not exist for the NVP plugin if Q_USE_DEBUG_COMMAND is off
- # Try to create it anyway
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE
- nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE
- for address in $addresses; do
- sudo ip addr add dev $PUBLIC_BRIDGE $address
- done
- sudo ip addr add dev $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR
-}
-
-function install_nicira() {
- :
-}
-
-function start_nicira() {
- :
-}
-
-function stop_nicira() {
- if ! is_set NVP_GATEWAY_NETWORK_CIDR; then
- NVP_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
- echo "The IP address expected on br-ex was not specified. "
- echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR
- fi
- sudo ip addr del $NVP_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE
- # Save and then flush remaining addresses on the interface
- addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'})
- sudo ip addr flush $PUBLIC_BRIDGE
- # Try to detach physical interface from PUBLIC_BRIDGE
- sudo ovs-vsctl del-port $NVP_GATEWAY_NETWORK_INTERFACE
- # Restore addresses on NVP_GATEWAY_NETWORK_INTERFACE
- for address in $addresses; do
- sudo ip addr add dev $NVP_GATEWAY_NETWORK_INTERFACE $address
- done
-}
-
-# Restore xtrace
-$MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 3b825a1..5edf273 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -75,5 +75,9 @@
:
}
+function check_ryu() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index bdc2356..2b12564 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -109,5 +109,9 @@
sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
}
+function check_trema() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
new file mode 100644
index 0000000..4eb177a
--- /dev/null
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -0,0 +1,86 @@
+# VMware NSX
+# ----------
+
+# This third-party addition can be used to configure connectivity between a DevStack instance
+# and an NSX Gateway in dev/test environments. In order to use this correctly, the following
+# env variables need to be set (e.g. in your localrc file):
+#
+# * enable_service vmware_nsx --> to execute this third-party addition
+# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
+# * NSX_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NSX Gateway
+# * NSX_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# This is the interface that connects the Devstack instance
+# to an network that allows it to talk to the gateway for
+# testing purposes
+NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2}
+# Re-declare floating range as it's needed also in stop_vmware_nsx, which
+# is invoked by unstack.sh
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
+
+function configure_vmware_nsx() {
+ :
+}
+
+function init_vmware_nsx() {
+ if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
+ NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
+ echo "The IP address to set on br-ex was not specified. "
+ echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
+ fi
+ # Make sure the interface is up, but not configured
+ sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up
+ # Save and then flush the IP addresses on the interface
+ addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
+ sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE
+ # Use the PUBLIC Bridge to route traffic to the NSX gateway
+ # NOTE(armando-migliaccio): if running in a nested environment this will work
+ # only with mac learning enabled, portsecurity and security profiles disabled
+ # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off
+ # Try to create it anyway
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
+ sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE
+ nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
+ sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE
+ for address in $addresses; do
+ sudo ip addr add dev $PUBLIC_BRIDGE $address
+ done
+ sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR
+}
+
+function install_vmware_nsx() {
+ :
+}
+
+function start_vmware_nsx() {
+ :
+}
+
+function stop_vmware_nsx() {
+ if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
+ NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
+ echo "The IP address expected on br-ex was not specified. "
+ echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR
+ fi
+ sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE
+ # Save and then flush remaining addresses on the interface
+ addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'})
+ sudo ip addr flush $PUBLIC_BRIDGE
+ # Try to detach physical interface from PUBLIC_BRIDGE
+ sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE
+ # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE
+ for address in $addresses; do
+ sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address
+ done
+}
+
+function check_vmware_nsx() {
+ neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/nova b/lib/nova
index e754341..a4edb53 100644
--- a/lib/nova
+++ b/lib/nova
@@ -318,7 +318,7 @@
NOVA_USER=$(keystone user-create \
--name=nova \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=nova@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
@@ -338,7 +338,7 @@
--adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
--internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
NOVA_V3_SERVICE=$(keystone service-create \
- --name=nova \
+ --name=novav3 \
--type=computev3 \
--description="Nova Compute Service V3" \
| grep " id " | get_field 2)
@@ -377,6 +377,7 @@
iniset $NOVA_CONF DEFAULT osapi_compute_workers "4"
iniset $NOVA_CONF DEFAULT ec2_workers "4"
iniset $NOVA_CONF DEFAULT metadata_workers "4"
+ iniset $NOVA_CONF conductor workers "4"
iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova`
iniset $NOVA_CONF DEFAULT fatal_deprecations "True"
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
@@ -648,6 +649,13 @@
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+ # Enable client side traces for libvirt
+ local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+ local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+ # Enable server side traces for libvirtd
+ echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+ echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+
# The group **$LIBVIRT_GROUP** is added to the current user in this script.
# Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
@@ -704,7 +712,7 @@
# Some services are listed here twice since more than one instance
# of a service may be running in certain configs.
for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
- screen -S $SCREEN_NAME -p $serv -X kill
+ screen_stop $serv
done
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
stop_nova_hypervisor
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index ef40e7a..6f90f4a 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -93,9 +93,6 @@
fi
fi
- # Change the libvirtd log level to DEBUG.
- sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf
-
# The user that nova runs as needs to be member of **libvirtd** group otherwise
# nova-compute will be unable to use libvirt.
if ! getent group $LIBVIRT_GROUP >/dev/null; then
diff --git a/lib/rpc_backend b/lib/rpc_backend
index ae83e85..f59c800 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -192,9 +192,8 @@
GetDistro
fi
- # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is
- # not in openSUSE either right now.
- ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) )
+ # Qpid is not in openSUSE
+ ( ! is_suse )
}
diff --git a/lib/savanna b/lib/savanna
index 6794e36..c7d59f7 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -56,7 +56,7 @@
SAVANNA_USER=$(keystone user-create \
--name=savanna \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=savanna@example.com \
| grep " id " | get_field 2)
keystone user-role-add \
@@ -95,9 +95,7 @@
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
- recreate_database savanna utf8
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna`
- inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna`
if is_service_enabled neutron; then
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true
@@ -105,6 +103,9 @@
fi
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+ recreate_database savanna utf8
+ $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head
}
# install_savanna() - Collect source and prepare
diff --git a/lib/swift b/lib/swift
index 96929db..44c230b 100644
--- a/lib/swift
+++ b/lib/swift
@@ -514,7 +514,7 @@
ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2)
+ --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2)
keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
diff --git a/lib/tempest b/lib/tempest
index 95b300c..ef9dfe2 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -329,7 +329,7 @@
iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}"
# service_available
- for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do
+ for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do
if is_service_enabled $service ; then
iniset $TEMPEST_CONF service_available $service "True"
else
diff --git a/lib/trove b/lib/trove
index f8e3edd..8e817f5 100644
--- a/lib/trove
+++ b/lib/trove
@@ -64,7 +64,7 @@
TROVE_USER=$(keystone user-create \
--name=trove \
--pass="$SERVICE_PASSWORD" \
- --tenant_id $SERVICE_TENANT \
+ --tenant-id $SERVICE_TENANT \
--email=trove@example.com \
| grep " id " | get_field 2)
keystone user-role-add --tenant-id $SERVICE_TENANT \
@@ -198,7 +198,7 @@
function stop_trove() {
# Kill the trove screen windows
for serv in tr-api tr-tmgr tr-cond; do
- screen -S $SCREEN_NAME -p $serv -X kill
+ screen_stop $serv
done
}
diff --git a/stack.sh b/stack.sh
index 2438f9f..a2469f1 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
# developer install.
# To keep this script simple we assume you are running on a recent **Ubuntu**
-# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine. (It may work
+# (12.04 Precise or newer) or **Fedora** (F18 or newer) machine. (It may work
# on other platforms but support for those platforms is left to those who added
# them to DevStack.) It should work in a VM or physical server. Additionally
# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration
@@ -23,6 +23,13 @@
# Make sure custom grep options don't get in the way
unset GREP_OPTIONS
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
@@ -131,7 +138,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -291,6 +298,9 @@
SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
+PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"}
+PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"}
+
# Use color for logging output (only available if syslog is not used)
LOG_COLOR=`trueorfalse True $LOG_COLOR`
@@ -305,9 +315,13 @@
# Configure Projects
# ==================
-# Source project function libraries
+# Import apache functions
source $TOP_DIR/lib/apache
+
+# Import TLS functions
source $TOP_DIR/lib/tls
+
+# Source project function libraries
source $TOP_DIR/lib/infra
source $TOP_DIR/lib/oslo
source $TOP_DIR/lib/stackforge
@@ -860,11 +874,27 @@
# -------
# If enabled, systat has to start early to track OpenStack service startup.
-if is_service_enabled sysstat;then
+if is_service_enabled sysstat; then
+ # what we want to measure
+ # -u : cpu statitics
+ # -q : load
+ # -b : io load rates
+ # -w : process creation and context switch rates
+ SYSSTAT_OPTS="-u -q -b -w"
if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
+ screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
else
- screen_it sysstat "sar $SYSSTAT_INTERVAL"
+ screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL"
+ fi
+fi
+
+if is_service_enabled pidstat; then
+ # Per-process stats
+ PIDSTAT_OPTS="-l -p ALL -T ALL"
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE"
+ else
+ screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL"
fi
fi
@@ -901,6 +931,10 @@
create_trove_accounts
fi
+ if is_service_enabled ceilometer; then
+ create_ceilometer_accounts
+ fi
+
if is_service_enabled swift || is_service_enabled s-proxy; then
create_swift_accounts
fi
@@ -1098,6 +1132,15 @@
iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
fi
+# Create a randomized default value for the keymgr's fixed_key
+if is_service_enabled nova; then
+ FIXED_KEY=""
+ for i in $(seq 1 64);
+ do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
+ done;
+ iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY"
+fi
+
if is_service_enabled zeromq; then
echo_summary "Starting zermomq receiver"
screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
@@ -1112,6 +1155,7 @@
if is_service_enabled q-svc; then
echo_summary "Starting Neutron"
start_neutron_service_and_check
+ check_neutron_third_party_integration
elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
NM_CONF=${NOVA_CONF}
if is_service_enabled n-cell; then
@@ -1203,7 +1247,6 @@
# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
#
# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
-# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then
diff --git a/stackrc b/stackrc
index 3fdc566..8a0280e 100644
--- a/stackrc
+++ b/stackrc
@@ -9,6 +9,9 @@
# Destination for working data
DATA_DIR=${DEST}/data
+# Destination for status files
+SERVICE_DIR=${DEST}/status
+
# Determine stack user
if [[ $EUID -eq 0 ]]; then
STACK_USER=stack
@@ -281,6 +284,9 @@
vsphere)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
+ xenserver)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
+ IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
*) # Default to Cirros with kernel, ramdisk and disk image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
diff --git a/tools/build_uec.sh b/tools/build_uec.sh
deleted file mode 100755
index bce051a..0000000
--- a/tools/build_uec.sh
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env bash
-
-# **build_uec.sh**
-
-# Make sure that we have the proper version of ubuntu (only works on oneiric)
-if ! egrep -q "oneiric" /etc/lsb-release; then
- echo "This script only works with ubuntu oneiric."
- exit 1
-fi
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-cd $TOP_DIR
-
-# Source params
-source ./stackrc
-
-# Ubuntu distro to install
-DIST_NAME=${DIST_NAME:-oneiric}
-
-# Configure how large the VM should be
-GUEST_SIZE=${GUEST_SIZE:-10G}
-
-# exit on error to stop unexpected errors
-set -o errexit
-set -o xtrace
-
-# Abort if localrc is not set
-if [ ! -e $TOP_DIR/localrc ]; then
- echo "You must have a localrc with ALL necessary passwords defined before proceeding."
- echo "See stack.sh for required passwords."
- exit 1
-fi
-
-# Install deps if needed
-DEPS="kvm libvirt-bin kpartx cloud-utils curl"
-apt_get install -y --force-yes $DEPS || true # allow this to fail gracefully for concurrent builds
-
-# Where to store files and instances
-WORK_DIR=${WORK_DIR:-/opt/uecstack}
-
-# Where to store images
-image_dir=$WORK_DIR/images/$DIST_NAME
-mkdir -p $image_dir
-
-# Start over with a clean base image, if desired
-if [ $CLEAN_BASE ]; then
- rm -f $image_dir/disk
-fi
-
-# Get the base image if it does not yet exist
-if [ ! -e $image_dir/disk ]; then
- $TOOLS_DIR/get_uec_image.sh -r $GUEST_SIZE $DIST_NAME $image_dir/disk $image_dir/kernel
-fi
-
-# Copy over dev environment if COPY_ENV is set.
-# This will also copy over your current devstack.
-if [ $COPY_ENV ]; then
- cd $TOOLS_DIR
- ./copy_dev_environment_to_uec.sh $image_dir/disk
-fi
-
-# Option to warm the base image with software requirements.
-if [ $WARM_CACHE ]; then
- cd $TOOLS_DIR
- ./warm_apts_for_uec.sh $image_dir/disk
-fi
-
-# Name of our instance, used by libvirt
-GUEST_NAME=${GUEST_NAME:-devstack}
-
-# Mop up after previous runs
-virsh destroy $GUEST_NAME || true
-
-# Where this vm is stored
-vm_dir=$WORK_DIR/instances/$GUEST_NAME
-
-# Create vm dir and remove old disk
-mkdir -p $vm_dir
-rm -f $vm_dir/disk
-
-# Create a copy of the base image
-qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
-
-# Back to devstack
-cd $TOP_DIR
-
-GUEST_NETWORK=${GUEST_NETWORK:-1}
-GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
-GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
-GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
-GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
-GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
-GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
-GUEST_RAM=${GUEST_RAM:-1524288}
-GUEST_CORES=${GUEST_CORES:-1}
-
-# libvirt.xml configuration
-NET_XML=$vm_dir/net.xml
-NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK}
-cat > $NET_XML <<EOF
-<network>
- <name>$NET_NAME</name>
- <bridge name="stackbr%d" />
- <forward/>
- <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
- <dhcp>
- <range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
- </dhcp>
- </ip>
-</network>
-EOF
-
-if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
- virsh net-destroy $NET_NAME || true
- # destroying the network isn't enough to delete the leases
- rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases
- virsh net-create $vm_dir/net.xml
-fi
-
-# libvirt.xml configuration
-LIBVIRT_XML=$vm_dir/libvirt.xml
-cat > $LIBVIRT_XML <<EOF
-<domain type='kvm'>
- <name>$GUEST_NAME</name>
- <memory>$GUEST_RAM</memory>
- <os>
- <type>hvm</type>
- <kernel>$image_dir/kernel</kernel>
- <cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
- </os>
- <features>
- <acpi/>
- </features>
- <clock offset='utc'/>
- <vcpu>$GUEST_CORES</vcpu>
- <devices>
- <disk type='file'>
- <driver type='qcow2'/>
- <source file='$vm_dir/disk'/>
- <target dev='vda' bus='virtio'/>
- </disk>
-
- <interface type='network'>
- <source network='$NET_NAME'/>
- </interface>
-
- <!-- The order is significant here. File must be defined first -->
- <serial type="file">
- <source path='$vm_dir/console.log'/>
- <target port='1'/>
- </serial>
-
- <console type='pty' tty='/dev/pts/2'>
- <source path='/dev/pts/2'/>
- <target port='0'/>
- </console>
-
- <serial type='pty'>
- <source path='/dev/pts/2'/>
- <target port='0'/>
- </serial>
-
- <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
- </devices>
-</domain>
-EOF
-
-
-rm -rf $vm_dir/uec
-cp -r $TOOLS_DIR/uec $vm_dir/uec
-
-# set metadata
-cat > $vm_dir/uec/meta-data<<EOF
-hostname: $GUEST_NAME
-instance-id: i-hop
-instance-type: m1.ignore
-local-hostname: $GUEST_NAME.local
-EOF
-
-# set user-data
-cat > $vm_dir/uec/user-data<<EOF
-#!/bin/bash
-# hostname needs to resolve for rabbit
-sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
-apt-get update
-apt-get install git sudo -y
-# Disable byobu
-sudo apt-get remove -y byobu
-EOF
-
-# Setup stack user with our key
-if [[ -e ~/.ssh/id_rsa.pub ]]; then
- PUB_KEY=`cat ~/.ssh/id_rsa.pub`
- cat >> $vm_dir/uec/user-data<<EOF
-mkdir -p /opt/stack
-if [ ! -d /opt/stack/devstack ]; then
- git clone https://github.com/cloudbuilders/devstack.git /opt/stack/devstack
- cd /opt/stack/devstack
- cat > localrc <<LOCAL_EOF
-ROOTSLEEP=0
-`cat $TOP_DIR/localrc`
-LOCAL_EOF
-fi
-useradd -U -G sudo -s /bin/bash -d /opt/stack -m $STACK_USER
-echo $STACK_USER:pass | chpasswd
-mkdir -p /opt/stack/.ssh
-echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys
-chown -R $STACK_USER /opt/stack
-chmod 700 /opt/stack/.ssh
-chmod 600 /opt/stack/.ssh/authorized_keys
-
-grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
- echo "#includedir /etc/sudoers.d" >> /etc/sudoers
-( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \
- > /etc/sudoers.d/50_stack_sh )
-EOF
-fi
-
-# Run stack.sh
-cat >> $vm_dir/uec/user-data<<EOF
-sudo -u $STACK_USER bash -l -c "cd /opt/stack/devstack && ./stack.sh"
-EOF
-
-# (re)start a metadata service
-(
- pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
- [ -z "$pid" ] || kill -9 $pid
-)
-cd $vm_dir/uec
-python meta.py 192.168.$GUEST_NETWORK.1:4567 &
-
-# Create the instance
-virsh create $vm_dir/libvirt.xml
-
-# Tail the console log till we are done
-WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
-if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
- set +o xtrace
- # Done creating the container, let's tail the log
- echo
- echo "============================================================="
- echo " -- YAY! --"
- echo "============================================================="
- echo
- echo "We're done launching the vm, about to start tailing the"
- echo "stack.sh log. It will take a second or two to start."
- echo
- echo "Just CTRL-C at any time to stop tailing."
- echo
-
- if ! timeout 60 sh -c "while [ ! -s /var/lib/libvirt/dnsmasq/$NET_NAME.leases ]; do sleep 1; done"; then
- echo "Your instance failed to acquire an IP address"
- exit 1
- fi
-
- ip=`cat /var/lib/libvirt/dnsmasq/$NET_NAME.leases | cut -d " " -f3`
- echo "#############################################################"
- echo " -- This is your instance's IP: --"
- echo " $ip"
- echo "#############################################################"
-
- sleep 2
-
- while [ ! -e "$vm_dir/console.log" ]; do
- sleep 1
- done
-
- tail -F $vm_dir/console.log &
-
- TAIL_PID=$!
-
- function kill_tail() {
- kill $TAIL_PID
- exit 1
- }
-
- # Let Ctrl-c kill tail and exit
- trap kill_tail SIGINT
-
- echo "Waiting stack.sh to finish..."
- while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
- sleep 1
- done
-
- set -o xtrace
-
- kill $TAIL_PID
-
- if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
- exit 1
- fi
-
- set +o xtrace
- echo ""
- echo "Finished - Zip-a-dee Doo-dah!"
-fi
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
index 3c62064..da13f4b 100755
--- a/tools/get_uec_image.sh
+++ b/tools/get_uec_image.sh
@@ -64,12 +64,10 @@
KERNEL=$3
case $DIST_NAME in
+ saucy) ;;
+ raring) ;;
quantal) ;;
precise) ;;
- oneiric) ;;
- natty) ;;
- maverick) ;;
- lucid) ;;
*) echo "Unknown release: $DIST_NAME"
usage
;;
diff --git a/tools/sar_filter.py b/tools/sar_filter.py
new file mode 100755
index 0000000..ed8c196
--- /dev/null
+++ b/tools/sar_filter.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Samsung Electronics Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import subprocess
+import sys
+
+
+def is_data_line(line):
+ timestamp, data = parse_line(line)
+ return re.search('\d\.d', data)
+
+
+def parse_line(line):
+ m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line)
+ if m:
+ date = m.group(1)
+ data = m.group(2).rstrip()
+ return date, data
+ else:
+ return None, None
+
+
+process = subprocess.Popen(
+ "sar %s" % " ".join(sys.argv[1:]),
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+# Poll process for new output until finished
+
+start_time = ""
+header = ""
+data_line = ""
+printed_header = False
+current_ts = None
+while True:
+ nextline = process.stdout.readline()
+ if nextline == '' and process.poll() is not None:
+ break
+
+ date, data = parse_line(nextline)
+ # stop until we get to the first set of real lines
+ if not date:
+ continue
+
+ # now we eat the header lines, and only print out the header
+ # if we've never seen them before
+ if not start_time:
+ start_time = date
+ header += "%s %s" % (date, data)
+ elif date == start_time:
+ header += " %s" % data
+ elif not printed_header:
+ printed_header = True
+ print header
+
+ # now we know this is a data line, printing out if the timestamp
+ # has changed, and stacking up otherwise.
+ nextline = process.stdout.readline()
+ date, data = parse_line(nextline)
+ if date != current_ts:
+ current_ts = date
+ print data_line
+ data_line = "%s %s" % (date, data)
+ else:
+ data_line += " %s" % data
+
+ sys.stdout.flush()
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 06192ed..ee1abcc 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -70,6 +70,9 @@
Of course, use real passwords if this machine is exposed.
cat > ./localrc <<EOF
+ # At the moment, we depend on github's snapshot function.
+ GIT_BASE="http://github.com"
+
# Passwords
# NOTE: these need to be specified, otherwise devstack will try
# to prompt for these passwords, blocking the install process.
diff --git a/tools/xen/functions b/tools/xen/functions
index 563303d..97c56bc 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -1,5 +1,14 @@
#!/bin/bash
+function die_with_error {
+ local err_msg
+
+ err_msg="$1"
+
+ echo "$err_msg" >&2
+ exit 1
+}
+
function xapi_plugin_location {
for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do
if [ -d $PLUGIN_DIR ]; then
@@ -11,7 +20,7 @@
}
function zip_snapshot_location {
- echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g"
+ echo $1 | sed "s,^git://,http://,g;s:\.git$::;s:$:/zipball/$2:g"
}
function create_directory_for_kernels {
@@ -41,7 +50,9 @@
local EXTRACTED_FILES=$(mktemp -d)
{
- wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate
+ if ! wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate; then
+ die_with_error "Failed to download [$ZIPBALL_URL]"
+ fi
unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES
rm -f $LOCAL_ZIPBALL
} >&2
diff --git a/tools/xen/mocks b/tools/xen/mocks
index 94b0ca4..3b9b05c 100644
--- a/tools/xen/mocks
+++ b/tools/xen/mocks
@@ -35,7 +35,7 @@
function wget {
if [[ $@ =~ "failurl" ]]; then
- exit 1
+ return 1
fi
echo "wget $@" >> $LIST_OF_ACTIONS
}
@@ -73,10 +73,14 @@
done
return 1
fi
- echo "Mock test does not implement the requested function"
+ echo "Mock test does not implement the requested function: ${1:-}"
exit 1
}
+function die_with_error {
+ echo "$1" >> $DEAD_MESSAGES
+}
+
function xe {
cat $XE_RESPONSE
{
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 0ae2cb7..373d996 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -29,6 +29,9 @@
XE_CALLS=$(mktemp)
truncate -s 0 $XE_CALLS
+
+ DEAD_MESSAGES=$(mktemp)
+ truncate -s 0 $DEAD_MESSAGES
}
# Teardown
@@ -64,6 +67,10 @@
grep -qe "^$1\$" $XE_CALLS
}
+function assert_died_with {
+ diff -u <(echo "$1") $DEAD_MESSAGES
+}
+
function mock_out {
local FNNAME="$1"
local OUTPUT="$2"
@@ -109,16 +116,22 @@
grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
}
-function test_zip_snapshot_location {
+function test_zip_snapshot_location_http {
diff \
- <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \
- <(echo "git://git.openstack.org/openstack/nova/zipball/master")
+ <(zip_snapshot_location "http://github.com/openstack/nova.git" "master") \
+ <(echo "http://github.com/openstack/nova/zipball/master")
+}
+
+function test_zip_snapsot_location_git {
+ diff \
+ <(zip_snapshot_location "git://github.com/openstack/nova.git" "master") \
+ <(echo "http://github.com/openstack/nova/zipball/master")
}
function test_create_directory_for_kernels {
(
. mocks
- mock_out get_local_sr uuid1
+ mock_out get_local_sr_path /var/run/sr-mount/uuid1
create_directory_for_kernels
)
@@ -141,7 +154,7 @@
function test_create_directory_for_images {
(
. mocks
- mock_out get_local_sr uuid1
+ mock_out get_local_sr_path /var/run/sr-mount/uuid1
create_directory_for_images
)
@@ -179,7 +192,7 @@
local IGNORE
IGNORE=$(. mocks && extract_remote_zipball "failurl")
- assert_previous_command_failed
+ assert_died_with "Failed to download [failurl]"
}
function test_find_nova_plugins {
@@ -199,8 +212,7 @@
[ "$RESULT" == "uuid123" ]
- assert_xe_min
- assert_xe_param "sr-list" "name-label=Local storage"
+ assert_xe_param "pool-list" params=default-SR minimal=true
}
function test_get_local_sr_path {
diff --git a/unstack.sh b/unstack.sh
index 67c8b7c..31f6f01 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -30,15 +30,31 @@
exit 1
fi
+
+# Configure Projects
+# ==================
+
# Import apache functions
source $TOP_DIR/lib/apache
-# Get project function libraries
-source $TOP_DIR/lib/baremetal
-source $TOP_DIR/lib/cinder
+# Import TLS functions
+source $TOP_DIR/lib/tls
+
+# Source project function libraries
+source $TOP_DIR/lib/infra
+source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/stackforge
source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/glance
+source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/cinder
source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
source $TOP_DIR/lib/neutron
+source $TOP_DIR/lib/baremetal
+source $TOP_DIR/lib/ldap
source $TOP_DIR/lib/ironic
source $TOP_DIR/lib/trove
@@ -75,21 +91,29 @@
teardown_neutron_debug
fi
-# Shut down devstack's screen to get the bulk of OpenStack services in one shot
-SCREEN=$(which screen)
-if [[ -n "$SCREEN" ]]; then
- SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }')
- if [[ -n "$SESSION" ]]; then
- screen -X -S $SESSION quit
- fi
+# Call service stop
+if is_service_enabled trove; then
+ stop_trove
fi
-# Shut down Nova hypervisor plugins after Nova
-NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins
-if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
- # Load plugin
- source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
- stop_nova_hypervisor
+if is_service_enabled heat; then
+ stop_heat
+fi
+
+if is_service_enabled ceilometer; then
+ stop_ceilometer
+fi
+
+if is_service_enabled nova; then
+ stop_nova
+fi
+
+if is_service_enabled g-api g-reg; then
+ stop_glance
+fi
+
+if is_service_enabled key; then
+ stop_keystone
fi
# Swift runs daemons
@@ -123,6 +147,7 @@
# Get the iSCSI volumes
if is_service_enabled cinder; then
+ stop_cinder
cleanup_cinder
fi
@@ -152,4 +177,13 @@
cleanup_trove
fi
+# Clean up the remainder of the screen processes
+SCREEN=$(which screen)
+if [[ -n "$SCREEN" ]]; then
+ SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }')
+ if [[ -n "$SESSION" ]]; then
+ screen -X -S $SESSION quit
+ fi
+fi
+
cleanup_tmp