Merge "XenAPI: Update DomU to Ubuntu Saucy"
diff --git a/.gitignore b/.gitignore
index a3d5b0d..c49b4a3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
proto
*~
-.*.sw[nop]
+.*.sw?
*.log
*.log.[1-9]
src
@@ -8,6 +8,7 @@
local.sh
files/*.gz
files/images
+files/pip-*
stack-screenrc
*.pem
accrc
diff --git a/README.md b/README.md
index 91d7efb..9914b1e 100644
--- a/README.md
+++ b/README.md
@@ -327,6 +327,7 @@
* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced
* **post-config** - runs after the layer 2 services are configured and before they are started
* **extra** - runs after services are started and before any files in ``extra.d`` are executed
+* **post-extra** - runs after files in ``extra.d`` are executed
The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used.
diff --git a/clean.sh b/clean.sh
index 395941a..480a812 100755
--- a/clean.sh
+++ b/clean.sh
@@ -15,6 +15,8 @@
# Import common functions
source $TOP_DIR/functions
+FILES=$TOP_DIR/files
+
# Load local configuration
source $TOP_DIR/stackrc
@@ -84,6 +86,10 @@
cleanup_neutron
cleanup_swift
+if is_service_enabled ldap; then
+ cleanup_ldap
+fi
+
# Do the hypervisor cleanup until this can be moved back into lib/nova
if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
cleanup_nova_hypervisor
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 96241f9..1b1ac06 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -67,7 +67,10 @@
exit_if_aggregate_present $AGGREGATE_NAME
AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
+
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
# check aggregate created
nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 3b3d3ba..ed8ba63 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -149,7 +149,7 @@
# Create the bootable volume
start_time=$(date +%s)
-cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
@@ -165,10 +165,10 @@
# Boot instance
# -------------
-# Boot using the --block_device_mapping param. The format of mapping is:
+# Boot using the --block-device-mapping param. The format of mapping is:
# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
# Leaving the middle two fields blank appears to do-the-right-thing
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 1e68042..e79774f 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -56,10 +56,8 @@
unset OS_AUTH_URL
# Common authentication args
-TENANT_ARG="--os_tenant_name=$x_TENANT_NAME"
-TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME"
-ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL"
-ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
+TENANT_ARG="--os-tenant-name=$x_TENANT_NAME"
+ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
# Set global return
RETURN=0
@@ -71,7 +69,7 @@
STATUS_KEYSTONE="Skipped"
else
echo -e "\nTest Keystone"
- if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then
+ if keystone $TENANT_ARG $ARGS catalog --service identity; then
STATUS_KEYSTONE="Succeeded"
else
STATUS_KEYSTONE="Failed"
@@ -90,7 +88,7 @@
else
# Test OSAPI
echo -e "\nTest Nova"
- if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then
+ if nova $TENANT_ARG $ARGS flavor-list; then
STATUS_NOVA="Succeeded"
else
STATUS_NOVA="Failed"
@@ -107,7 +105,7 @@
STATUS_CINDER="Skipped"
else
echo -e "\nTest Cinder"
- if cinder $TENANT_ARG_DASH $ARGS_DASH list; then
+ if cinder $TENANT_ARG $ARGS list; then
STATUS_CINDER="Succeeded"
else
STATUS_CINDER="Failed"
@@ -124,7 +122,7 @@
STATUS_GLANCE="Skipped"
else
echo -e "\nTest Glance"
- if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then
+ if glance $TENANT_ARG $ARGS image-list; then
STATUS_GLANCE="Succeeded"
else
STATUS_GLANCE="Failed"
@@ -141,7 +139,7 @@
STATUS_SWIFT="Skipped"
else
echo -e "\nTest Swift"
- if swift $TENANT_ARG_DASH $ARGS_DASH stat; then
+ if swift $TENANT_ARG $ARGS stat; then
STATUS_SWIFT="Succeeded"
else
STATUS_SWIFT="Failed"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 1a1608c..7055278 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -114,6 +114,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
@@ -126,7 +127,7 @@
# Boot instance
# -------------
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
diff --git a/exercises/marconi.sh b/exercises/marconi.sh
new file mode 100755
index 0000000..9d83a99
--- /dev/null
+++ b/exercises/marconi.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# **marconi.sh**
+
+# Sanity check that Marconi started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled marconi-server || exit 55
+
+curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Marconi API not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 7dfa5dc..0a100c0 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -139,24 +139,28 @@
function get_image_id {
local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
echo "$IMAGE_ID"
}
function get_tenant_id {
local TENANT_NAME=$1
local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
echo "$TENANT_ID"
}
function get_user_id {
local USER_NAME=$1
local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
echo "$USER_ID"
}
function get_role_id {
local ROLE_NAME=$1
local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
echo "$ROLE_ID"
}
@@ -169,6 +173,7 @@
function get_flavor_id {
local INSTANCE_TYPE=$1
local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
echo "$FLAVOR_ID"
}
@@ -233,8 +238,9 @@
source $TOP_DIR/openrc admin admin
local TENANT_ID=$(get_tenant_id $TENANT)
source $TOP_DIR/openrc $TENANT $TENANT
- local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
- neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
+ local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
+ neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
neutron-debug probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
}
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 7d80570..eb32cc7 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -56,6 +56,7 @@
# Check to make sure rules were added
SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
+die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
for i in "${RULES_TO_ADD[@]}"; do
skip=
for j in "${SEC_GROUP_RULES[@]}"; do
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 9ee9fa9..21b5d21 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -118,6 +118,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
@@ -129,7 +130,7 @@
# Boot instance
# -------------
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
@@ -155,7 +156,7 @@
# Create a new volume
start_time=$(date +%s)
-cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
die $LINENO "Volume $VOL_NAME not created"
diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh
new file mode 100644
index 0000000..a96a4c5
--- /dev/null
+++ b/extras.d/70-marconi.sh
@@ -0,0 +1,29 @@
+# marconi.sh - Devstack extras script to install Marconi
+
+if is_service_enabled marconi-server; then
+ if [[ "$1" == "source" ]]; then
+ # Initial source
+ source $TOP_DIR/lib/marconi
+ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Installing Marconi"
+ install_marconiclient
+ install_marconi
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring Marconi"
+ configure_marconi
+ configure_marconiclient
+
+ if is_service_enabled key; then
+ create_marconi_accounts
+ fi
+
+ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+ echo_summary "Initializing Marconi"
+ init_marconi
+ start_marconi
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ stop_marconi
+ fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
index f6881cc..6bbe113 100644
--- a/extras.d/70-savanna.sh
+++ b/extras.d/70-savanna.sh
@@ -14,6 +14,7 @@
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Savanna"
configure_savanna
+ create_savanna_accounts
if is_service_enabled horizon; then
configure_savanna_dashboard
fi
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 75b702c..0186e36 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -14,6 +14,9 @@
echo_summary "Initializing Tempest"
configure_tempest
init_tempest
+ elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+ # local.conf Tempest option overrides
+ :
fi
if [[ "$1" == "unstack" ]]; then
diff --git a/extras.d/README.md b/extras.d/README.md
index 88e4265..1dd17da 100644
--- a/extras.d/README.md
+++ b/extras.d/README.md
@@ -19,10 +19,10 @@
source: always called first in any of the scripts, used to set the
initial defaults in a lib/* script or similar
- stack: called by stack.sh. There are three possible values for
+ stack: called by stack.sh. There are four possible values for
the second arg to distinguish the phase stack.sh is in:
- arg 2: install | post-config | extra
+ arg 2: install | post-config | extra | post-extra
unstack: called by unstack.sh
diff --git a/files/apts/general b/files/apts/general
index fcf0b5b..aff687f 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -20,3 +20,4 @@
euca2ools # only for testing client
tar
python-cmd2 # dist:precise
+python2.7
diff --git a/files/apts/ldap b/files/apts/ldap
index 81a00f2..26f7aef 100644
--- a/files/apts/ldap
+++ b/files/apts/ldap
@@ -1,3 +1,3 @@
ldap-utils
-slapd # NOPRIME
+slapd
python-ldap
diff --git a/files/apts/marconi-server b/files/apts/marconi-server
new file mode 100644
index 0000000..bc7ef22
--- /dev/null
+++ b/files/apts/marconi-server
@@ -0,0 +1,3 @@
+python-pymongo
+mongodb-server
+pkg-config
diff --git a/files/apts/tempest b/files/apts/tempest
new file mode 100644
index 0000000..f244e4e
--- /dev/null
+++ b/files/apts/tempest
@@ -0,0 +1 @@
+libxslt1-dev
\ No newline at end of file
diff --git a/files/ldap/keystone.ldif.in b/files/ldap/keystone.ldif.in
new file mode 100644
index 0000000..cf51907
--- /dev/null
+++ b/files/ldap/keystone.ldif.in
@@ -0,0 +1,26 @@
+dn: ${BASE_DN}
+objectClass: dcObject
+objectClass: organizationalUnit
+dc: ${BASE_DC}
+ou: ${BASE_DC}
+
+dn: ou=UserGroups,${BASE_DN}
+objectClass: organizationalUnit
+ou: UserGroups
+
+dn: ou=Users,${BASE_DN}
+objectClass: organizationalUnit
+ou: Users
+
+dn: ou=Roles,${BASE_DN}
+objectClass: organizationalUnit
+ou: Roles
+
+dn: ou=Projects,${BASE_DN}
+objectClass: organizationalUnit
+ou: Projects
+
+dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN}
+objectClass: organizationalRole
+ou: _member_
+cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index e522150..de3b69d 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,10 +1,15 @@
dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
changetype: modify
replace: olcSuffix
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
-
replace: olcRootDN
-olcRootDN: dc=Manager,dc=openstack,dc=org
+olcRootDN: ${MANAGER_DN}
-
${LDAP_ROOTPW_COMMAND}: olcRootPW
olcRootPW: ${SLAPPASS}
+-
+replace: olcDbIndex
+olcDbIndex: objectClass eq
+olcDbIndex: default pres,eq
+olcDbIndex: cn,sn,givenName,co
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
deleted file mode 100644
index 02caf3f..0000000
--- a/files/ldap/openstack.ldif
+++ /dev/null
@@ -1,26 +0,0 @@
-dn: dc=openstack,dc=org
-dc: openstack
-objectClass: dcObject
-objectClass: organizationalUnit
-ou: openstack
-
-dn: ou=UserGroups,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: UserGroups
-
-dn: ou=Users,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Users
-
-dn: ou=Roles,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Roles
-
-dn: ou=Projects,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Projects
-
-dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
-objectClass: organizationalRole
-ou: _member_
-cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/base-config.ldif b/files/ldap/suse-base-config.ldif.in
similarity index 77%
rename from files/ldap/base-config.ldif
rename to files/ldap/suse-base-config.ldif.in
index 026d8bc..00256ee 100644
--- a/files/ldap/base-config.ldif
+++ b/files/ldap/suse-base-config.ldif.in
@@ -12,8 +12,10 @@
cn: schema
include: file:///etc/openldap/schema/core.ldif
+include: file:///etc/openldap/schema/cosine.ldif
+include: file:///etc/openldap/schema/inetorgperson.ldif
dn: olcDatabase={1}hdb,cn=config
objectClass: olcHdbConfig
olcDbDirectory: /var/lib/ldap
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server
new file mode 100644
index 0000000..d7b7ea8
--- /dev/null
+++ b/files/rpms/marconi-server
@@ -0,0 +1,3 @@
+selinux-policy-targeted
+mongodb-server
+pymongo
diff --git a/files/rpms/tempest b/files/rpms/tempest
new file mode 100644
index 0000000..de32b81
--- /dev/null
+++ b/files/rpms/tempest
@@ -0,0 +1 @@
+libxslt-dev
\ No newline at end of file
diff --git a/functions b/functions
index 6137aaf..e79e1d5 100644
--- a/functions
+++ b/functions
@@ -422,6 +422,7 @@
os_CODENAME=$(lsb_release -c -s)
elif [[ -r /etc/redhat-release ]]; then
# Red Hat Enterprise Linux Server release 5.5 (Tikanga)
+ # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
# CentOS release 5.5 (Final)
# CentOS Linux release 6.0 (Final)
# Fedora release 16 (Verne)
@@ -430,7 +431,7 @@
for r in "Red Hat" CentOS Fedora XenServer; do
os_VENDOR=$r
if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
- ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
+ ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
os_CODENAME=${ver#*|}
os_RELEASE=${ver%|*}
os_UPDATE=${os_RELEASE##*.}
@@ -554,7 +555,7 @@
function is_arch {
ARCH_TYPE=$1
- [ "($uname -m)" = "$ARCH_TYPE" ]
+ [[ "$(uname -m)" == "$ARCH_TYPE" ]]
}
# Checks if installed Apache is <= given version
@@ -729,6 +730,8 @@
local option=$3
local value=$4
+ [[ -z $section || -z $option ]] && return
+
if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
# Add section at the end
echo -e "\n[$section]" >>"$file"
@@ -739,8 +742,9 @@
$option = $value
" "$file"
else
+ local sep=$(echo -ne "\x01")
# Replace it
- sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file"
+ sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
}
@@ -1156,6 +1160,11 @@
NL=`echo -ne '\015'`
echo "screen -t $1 bash" >> $SCREENRC
echo "stuff \"$2$NL\"" >> $SCREENRC
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC
+ echo "log on" >>$SCREENRC
+ fi
fi
}
@@ -1351,10 +1360,9 @@
# Create a directory for the downloaded image tarballs.
mkdir -p $FILES/images
-
+ IMAGE_FNAME=`basename "$image_url"`
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+aki style), then extracts it.
- IMAGE_FNAME=`basename "$image_url"`
if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
wget -c $image_url -O $FILES/$IMAGE_FNAME
if [[ $? -ne 0 ]]; then
@@ -1410,13 +1418,92 @@
vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
vmdk_create_type="${vmdk_create_type#*\"}"
vmdk_create_type="${vmdk_create_type%?}"
+
+ descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
+ `"should use a descriptor-data pair."
if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
vmdk_disktype="sparse"
- elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then
- die $LINENO "Monolithic flat disks should use a descriptor-data pair." \
- "Please provide the disk and not the descriptor."
+ elif [[ "$vmdk_create_type" = "monolithicFlat" || \
+ "$vmdk_create_type" = "vmfs" ]]; then
+ # Attempt to retrieve the *-flat.vmdk
+ flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+ flat_fname="${flat_fname#*\"}"
+ flat_fname="${flat_fname%?}"
+ if [[ -z "$flat_name" ]]; then
+ flat_fname="$IMAGE_NAME-flat.vmdk"
+ fi
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_url="${image_url:0:$path_len}$flat_fname"
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the *-flat.vmdk: $flat_url"
+ if [[ $flat_url != file* ]]; then
+ if [[ ! -f $FILES/$flat_fname || \
+ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
+ wget -c $flat_url -O $FILES/$flat_fname
+ if [[ $? -ne 0 ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ fi
+ if $flat_found; then
+ IMAGE="$FILES/${flat_fname}"
+ fi
+ else
+ IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
+ if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ if ! $flat_found; then
+ IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+ fi
+ fi
+ if $flat_found; then
+ IMAGE_NAME="${flat_fname}"
+ fi
+ vmdk_disktype="preallocated"
+ elif [[ -z "$vmdk_create_type" ]]; then
+ # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
+ # to retrieve appropriate metadata
+ if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
+ warn $LINENO "Expected filename suffix: '-flat'."`
+ `" Filename provided: ${IMAGE_NAME}"
+ else
+ descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_path="${image_url:0:$path_len}"
+ descriptor_url=$flat_path$descriptor_fname
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+ if [[ $flat_path != file* ]]; then
+ if [[ ! -f $FILES/$descriptor_fname || \
+ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+ wget -c $descriptor_url -O $FILES/$descriptor_fname
+ if [[ $? -ne 0 ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ descriptor_url="$FILES/$descriptor_fname"
+ else
+ descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+ if [[ ! -f $descriptor_url || \
+ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ if $descriptor_found; then
+ vmdk_adapter_type="$(head -25 $descriptor_url |"`
+ `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
+ vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+ vmdk_adapter_type="${vmdk_adapter_type%?}"
+ fi
+ fi
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+ vmdk_disktype="preallocated"
else
- #TODO(alegendre): handle streamOptimized once supported by VMware driver.
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
vmdk_disktype="preallocated"
fi
@@ -1510,11 +1597,15 @@
*) echo "Do not know what to do with $IMAGE_FNAME"; false;;
esac
+ if is_arch "ppc64"; then
+ IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi"
+ fi
+
if [ "$CONTAINER_FORMAT" = "bare" ]; then
if [ "$UNPACK" = "zcat" ]; then
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
else
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -1522,12 +1613,12 @@
# kernel for use when uploading the root filesystem.
KERNEL_ID=""; RAMDISK_ID="";
if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
fi
if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
fi
}
diff --git a/lib/ceilometer b/lib/ceilometer
index 8e2970c..fac3be1 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -3,7 +3,7 @@
# To enable a minimal set of Ceilometer services, add the following to localrc:
#
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
#
# To ensure Ceilometer alarming services are enabled also, further add to the localrc:
#
@@ -145,6 +145,7 @@
screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
fi
screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
@@ -160,7 +161,7 @@
# stop_ceilometer() - Stop running processes
function stop_ceilometer() {
# Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
+ for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
diff --git a/lib/cinder b/lib/cinder
index 9288685..cbe732e 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -174,6 +174,12 @@
# Set the paths of certain binaries
CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
+ if [[ ! -x $CINDER_ROOTWRAP ]]; then
+ CINDER_ROOTWRAP=$(get_rootwrap_location oslo)
+ if [[ ! -x $CINDER_ROOTWRAP ]]; then
+ die $LINENO "No suitable rootwrap found."
+ fi
+ fi
# If Cinder ships the new rootwrap filters files, deploy them
# (owned by root) and add a parameter to $CINDER_ROOTWRAP
@@ -189,11 +195,16 @@
sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d
sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/*
# Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d
- sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
+ if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then
+ sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
+ else
+ # rootwrap.conf is no longer shipped in Cinder itself
+ echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null
+ fi
sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf
sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
- # Specify rootwrap.conf as first parameter to cinder-rootwrap
+ # Specify rootwrap.conf as first parameter to rootwrap
CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf"
ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *"
fi
@@ -237,6 +248,11 @@
iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2
iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2
+ # NOTE(mriedem): Work around Cinder "wishlist" bug 1255593
+ if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
+ iniset $CINDER_CONF lvmdriver-1 volume_clear none
+ iniset $CINDER_CONF lvmdriver-2 volume_clear none
+ fi
else
iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
diff --git a/lib/config b/lib/config
index 91cefe4..1678aec 100644
--- a/lib/config
+++ b/lib/config
@@ -35,7 +35,7 @@
$CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile '
BEGIN { group = "" }
- /^\[\[.+|.*\]\]/ {
+ /^\[\[.+\|.*\]\]/ {
if (group == "") {
gsub("[][]", "", $1);
split($1, a, "|");
@@ -95,7 +95,7 @@
/^ *\#/ {
next
}
- /^.+/ {
+ /^[^ \t]+/ {
split($0, d, " *= *")
print "iniset " configfile " " section " " d[1] " \"" d[2] "\""
}
diff --git a/lib/glance b/lib/glance
index 2e29a8f..b278796 100644
--- a/lib/glance
+++ b/lib/glance
@@ -124,6 +124,8 @@
iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
+
+ iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store
fi
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
diff --git a/lib/keystone b/lib/keystone
index 6d0c1cd..29b9604 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -151,17 +151,17 @@
if is_service_enabled ldap; then
#Set all needed ldap values
- iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
- iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org"
- iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
+ iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
+ iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
iniset $KEYSTONE_CONF ldap use_dumb_member "True"
iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
- iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
- iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
fi
@@ -337,6 +337,10 @@
# init_keystone() - Initialize databases, etc.
function init_keystone() {
+ if is_service_enabled ldap; then
+ init_ldap
+ fi
+
# (Re)create keystone database
recreate_database keystone utf8
@@ -399,7 +403,7 @@
screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
else
# Start Keystone in a screen window
- screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
+ screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug"
fi
echo "Waiting for keystone to start..."
diff --git a/lib/ldap b/lib/ldap
index 80992a7..e4bd416 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -9,68 +9,137 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+
+LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org}
+# Make an array of domain components
+DC=(${LDAP_DOMAIN/./ })
+
+# Leftmost domain component used in top-level entry
+LDAP_BASE_DC=${DC[0]}
+
+# Build the base DN
+dn=""
+for dc in ${DC[*]}; do
+ dn="$dn,dc=$dc"
+done
+LDAP_BASE_DN=${dn#,}
+
+LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}"
+LDAP_URL=${LDAP_URL:-ldap://localhost}
+
LDAP_SERVICE_NAME=slapd
+if is_ubuntu; then
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=replace
+elif is_fedora; then
+ LDAP_OLCDB_NUMBER=2
+ LDAP_ROOTPW_COMMAND=add
+elif is_suse; then
+ # SUSE has slappasswd in /usr/sbin/
+ PATH=$PATH:/usr/sbin/
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=add
+ LDAP_SERVICE_NAME=ldap
+fi
+
+
# Functions
# ---------
+# Perform common variable substitutions on the data files
+# _ldap_varsubst file
+function _ldap_varsubst() {
+ local infile=$1
+ sed -e "
+ s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${SLAPPASS}|$SLAPPASS|
+ s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
+ s|\${BASE_DC}|$LDAP_BASE_DC|
+ s|\${BASE_DN}|$LDAP_BASE_DN|
+ s|\${MANAGER_DN}|$LDAP_MANAGER_DN|
+ " $infile
+}
+
+# clean_ldap() - Remove ldap server
+function cleanup_ldap() {
+ uninstall_package $(get_packages ldap)
+ if is_ubuntu; then
+ uninstall_package slapd ldap-utils libslp1
+ sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap
+ elif is_fedora; then
+ sudo rm -rf /etc/openldap /var/lib/ldap
+ elif is_suse; then
+ sudo rm -rf /var/lib/ldap
+ fi
+}
+
+# init_ldap
+# init_ldap() - Initialize databases, etc.
+function init_ldap() {
+ local keystone_ldif
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ # Remove data but not schemas
+ clear_ldap_state
+
+ # Add our top level ldap nodes
+ if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then
+ printf "LDAP already configured for $LDAP_BASE_DC\n"
+ else
+ printf "Configuring LDAP for $LDAP_BASE_DC\n"
+ # If BASE_DN is changed, the user may override the default file
+ if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then
+ keystone_ldif=${LDAP_BASE_DC}.ldif
+ else
+ keystone_ldif=keystone.ldif
+ fi
+ _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif}
+ if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then
+ ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif}
+ fi
+ fi
+
+ rm -rf TMP_LDAP_DIR
+}
+
# install_ldap
# install_ldap() - Collect source and prepare
function install_ldap() {
echo "Installing LDAP inside function"
- echo "LDAP_PASSWORD is $LDAP_PASSWORD"
echo "os_VENDOR is $os_VENDOR"
- printf "installing"
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ printf "installing OpenLDAP"
if is_ubuntu; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=replace
- sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils
- #automatically starts LDAP on ubuntu so no need to call start_ldap
+ # Ubuntu automatically starts LDAP so no need to call start_ldap()
+ :
elif is_fedora; then
- LDAP_OLCDB_NUMBER=2
- LDAP_ROOTPW_COMMAND=add
start_ldap
elif is_suse; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=add
- LDAP_SERVICE_NAME=ldap
- # SUSE has slappasswd in /usr/sbin/
- PATH=$PATH:/usr/sbin/
- sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif
+ _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif
+ sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif
sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap
start_ldap
fi
- printf "generate password file"
- SLAPPASS=`slappasswd -s $LDAP_PASSWORD`
+ echo "LDAP_PASSWORD is $LDAP_PASSWORD"
+ SLAPPASS=$(slappasswd -s $LDAP_PASSWORD)
+ printf "LDAP secret is $SLAPPASS\n"
- printf "secret is $SLAPPASS\n"
- #create manager.ldif
- TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif`
- sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE
-
- #update ldap olcdb
- sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE
+ # Create manager.ldif and add to olcdb
+ _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif
+ sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif
# On fedora we need to manually add cosine and inetorgperson schemas
- if is_fedora || is_suse; then
+ if is_fedora; then
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
fi
- # add our top level ldap nodes
- if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then
- printf "LDAP already configured for OpenStack\n"
- if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then
- # clear LDAP state
- clear_ldap_state
- # reconfigure LDAP for OpenStack
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
- else
- printf "Configuring LDAP for OpenStack\n"
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
+ rm -rf TMP_LDAP_DIR
}
# start_ldap() - Start LDAP
@@ -78,7 +147,6 @@
sudo service $LDAP_SERVICE_NAME restart
}
-
# stop_ldap() - Stop LDAP
function stop_ldap() {
sudo service $LDAP_SERVICE_NAME stop
@@ -86,7 +154,7 @@
# clear_ldap_state() - Clear LDAP State
function clear_ldap_state() {
- ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org"
+ ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
}
# Restore xtrace
diff --git a/lib/marconi b/lib/marconi
new file mode 100644
index 0000000..742f866
--- /dev/null
+++ b/lib/marconi
@@ -0,0 +1,172 @@
+# lib/marconi
+# Install and start **Marconi** service
+
+# To enable a minimal set of Marconi services, add the following to localrc:
+# enable_service marconi-server
+#
+# Dependencies:
+# - functions
+# - OS_AUTH_URL for auth in api
+# - DEST set to the destination directory
+# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
+# - STACK_USER service user
+
+# stack.sh
+# ---------
+# install_marconi
+# configure_marconi
+# init_marconi
+# start_marconi
+# stop_marconi
+# cleanup_marconi
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+MARCONI_DIR=$DEST/marconi
+MARCONICLIENT_DIR=$DEST/python-marconiclient
+MARCONI_CONF_DIR=/etc/marconi
+MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
+MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
+
+# Support potential entry-points console scripts
+MARCONI_BIN_DIR=$(get_python_exec_prefix)
+
+# Set up database backend
+MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb}
+
+
+# Set Marconi repository
+MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git}
+MARCONI_BRANCH=${MARCONI_BRANCH:-master}
+
+# Set client library repository
+MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git}
+MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master}
+
+# Functions
+# ---------
+
+# cleanup_marconi() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_marconi() {
+ mongo marconi --eval "db.dropDatabase();"
+}
+
+# configure_marconiclient() - Set config files, create data dirs, etc
+function configure_marconiclient() {
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# configure_marconi() - Set config files, create data dirs, etc
+function configure_marconi() {
+ setup_develop $MARCONI_DIR
+
+ [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR
+ sudo chown $USER $MARCONI_CONF_DIR
+
+ [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR
+ sudo chown $USER $MARCONI_API_LOG_DIR
+
+ iniset $MARCONI_CONF DEFAULT verbose True
+ iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0'
+
+ # Install the policy file for the API server
+ cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR
+ iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json
+
+ iniset $MARCONI_CONF keystone_authtoken auth_protocol http
+ iniset $MARCONI_CONF keystone_authtoken admin_user marconi
+ iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
+
+ if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
+ iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+ configure_mongodb
+ cleanup_marconi
+ fi
+}
+
+function configure_mongodb() {
+ # Set nssize to 2GB. This increases the number of namespaces supported
+ # # per database.
+ sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+
+ restart_service mongod
+}
+
+# init_marconi() - Initialize etc.
+function init_marconi() {
+ # Create cache dir
+ sudo mkdir -p $MARCONI_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR
+ rm -f $MARCONI_AUTH_CACHE_DIR/*
+}
+
+# install_marconi() - Collect source and prepare
+function install_marconi() {
+ git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH
+ setup_develop $MARCONI_DIR
+}
+
+# install_marconiclient() - Collect source and prepare
+function install_marconiclient() {
+ git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# start_marconi() - Start running processes, including screen
+function start_marconi() {
+ screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+}
+
+# stop_marconi() - Stop running processes
+function stop_marconi() {
+ # Kill the marconi screen windows
+ for serv in marconi-server; do
+ screen -S $SCREEN_NAME -p $serv -X kill
+ done
+}
+
+function create_marconi_accounts() {
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ MARCONI_USER=$(get_id keystone user-create --name=marconi \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=marconi@example.com)
+ keystone user-role-add --tenant-id $SERVICE_TENANT \
+ --user-id $MARCONI_USER \
+ --role-id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ MARCONI_SERVICE=$(keystone service-create \
+ --name=marconi \
+ --type=queuing \
+ --description="Marconi Service" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $MARCONI_SERVICE \
+ --publicurl "http://$SERVICE_HOST:8888" \
+ --adminurl "http://$SERVICE_HOST:8888" \
+ --internalurl "http://$SERVICE_HOST:8888"
+ fi
+
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/neutron b/lib/neutron
index 7f1a9d8..dbc5843 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -69,7 +69,7 @@
# Gateway and subnet defaults, in case they are not customized in localrc
NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.225}
+PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
@@ -115,6 +115,13 @@
# nova vif driver that all plugins should use
NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+# The next two variables are configured by plugin
+# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/*
+#
+# The plugin supports L3.
+Q_L3_ENABLED=${Q_L3_ENABLED:-False}
+# L3 routers exist per tenant
+Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False}
# List of config file names in addition to the main plugin config file
# See _configure_neutron_common() for details about setting it up
@@ -346,6 +353,7 @@
function create_neutron_initial_network() {
TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo"
# Create a small network
# Since neutron command is executed in admin context at this point,
@@ -360,12 +368,16 @@
sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
done
NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
sudo ifconfig $OVS_PHYSICAL_BRIDGE up
sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
else
NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
fi
if [[ "$Q_L3_ENABLED" == "True" ]]; then
@@ -373,14 +385,18 @@
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
fi
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create an external network, and a subnet. Configure the external network as router gw
EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+ die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP"
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
if is_service_enabled q-l3; then
@@ -390,6 +406,7 @@
sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
sudo ip link set $PUBLIC_BRIDGE up
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
+ die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
fi
if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
@@ -491,6 +508,19 @@
pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }')
[ ! -z "$pid" ] && sudo kill -9 $pid
fi
+
+ if is_service_enabled q-lbaas; then
+ neutron_lbaas_stop
+ fi
+ if is_service_enabled q-fwaas; then
+ neutron_fwaas_stop
+ fi
+ if is_service_enabled q-vpn; then
+ neutron_vpn_stop
+ fi
+ if is_service_enabled q-metering; then
+ neutron_metering_stop
+ fi
}
# cleanup_neutron() - Remove residual data files, anything left over from previous
@@ -501,7 +531,7 @@
fi
# delete all namespaces created by neutron
- for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do
+ for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do
sudo ip netns delete ${ns}
done
}
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index e406146..f95fcb7 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -62,6 +62,9 @@
if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
fi
+
+ Q_L3_ENABLED=True
+ Q_L3_ROUTER_PER_TENANT=True
}
function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 8d2e303..b5b1873 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -34,10 +34,13 @@
ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
function populate_ml2_config() {
- OPTS=$1
- CONF=$2
- SECTION=$3
+ CONF=$1
+ SECTION=$2
+ OPTS=$3
+ if [ -z "$OPTS" ]; then
+ return
+ fi
for I in "${OPTS[@]}"; do
# Replace the first '=' with ' ' for iniset syntax
iniset $CONF $SECTION ${I/=/ }
@@ -102,19 +105,17 @@
# Since we enable the tunnel TypeDrivers, also enable a local_ip
iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
- populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
- populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
- populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS
- populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS
- populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
- populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan
- fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
}
function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index d4050bb..bccd301 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -6,8 +6,6 @@
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
-#source $TOP_DIR/lib/neutron_plugins/ovs_base
-
function neutron_plugin_create_nova_conf() {
:
}
@@ -23,11 +21,17 @@
Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
+ PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
+ PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
+ PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
}
function neutron_plugin_configure_service() {
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector password $PLUMGRID_PASSWORD
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT
}
function neutron_plugin_configure_debug_command() {
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 1597e85..580071f 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -23,5 +23,9 @@
iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
}
+function neutron_fwaas_stop() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index c38f904..2699a9b 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -45,5 +45,10 @@
fi
}
+function neutron_lbaas_stop() {
+ pids=$(ps aux | awk '/haproxy/ { print $2 }')
+ [ ! -z "$pids" ] && sudo kill $pids
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 629f3b7..b105429 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -26,5 +26,9 @@
cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME
}
+function neutron_metering_stop() {
+ :
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index b8f5c7d..55d0a76 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -22,5 +22,16 @@
fi
}
+function neutron_vpn_stop() {
+ local ipsec_data_dir=$DATA_DIR/neutron/ipsec
+ local pids
+ if [ -d $ipsec_data_dir ]; then
+ pids=$(find $ipsec_data_dir -name 'pluto.pid' -exec cat {} \;)
+ fi
+ if [ -n "$pids" ]; then
+ sudo kill $pids
+ fi
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira
index 3f2a5af..a24392c 100644
--- a/lib/neutron_thirdparty/nicira
+++ b/lib/neutron_thirdparty/nicira
@@ -20,7 +20,7 @@
NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2}
# Re-declare floating range as it's needed also in stop_nicira, which
# is invoked by unstack.sh
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
function configure_nicira() {
:
@@ -33,7 +33,7 @@
echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR
fi
# Make sure the interface is up, but not configured
- sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up
+ sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up
# Save and then flush the IP addresses on the interface
addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE
@@ -45,7 +45,7 @@
sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE
nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac
+ sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE
for address in $addresses; do
sudo ip addr add dev $PUBLIC_BRIDGE $address
done
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index 9efd3f6..bdc2356 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -62,7 +62,7 @@
sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \
$TREMA_SS_APACHE_CONFIG
sudo a2enmod rewrite actions
- sudo a2ensite sliceable_switch
+ sudo a2ensite sliceable_switch.conf
cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
diff --git a/lib/nova b/lib/nova
index 5fd0beb..e754341 100644
--- a/lib/nova
+++ b/lib/nova
@@ -398,6 +398,7 @@
# Add keystone authtoken configuration
iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $NOVA_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
@@ -652,7 +653,7 @@
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
- screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
+ screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
done
else
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
diff --git a/lib/savanna b/lib/savanna
index e9dbe72..6794e36 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -3,7 +3,6 @@
# Dependencies:
# ``functions`` file
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
@@ -28,11 +27,12 @@
SAVANNA_DIR=$DEST/savanna
SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
SAVANNA_CONF_FILE=savanna.conf
-ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
-ADMIN_NAME=${ADMIN_NAME:-admin}
-ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova}
SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
+SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
+SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
+SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
# Support entry points installation of console scripts
if [[ -d $SAVANNA_DIR/bin ]]; then
SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
@@ -43,6 +43,42 @@
# Functions
# ---------
+# create_savanna_accounts() - Set up common required savanna accounts
+#
+# Tenant User Roles
+# ------------------------------
+# service savanna admin
+function create_savanna_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ SAVANNA_USER=$(keystone user-create \
+ --name=savanna \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=savanna@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant-id $SERVICE_TENANT \
+ --user-id $SAVANNA_USER \
+ --role-id $ADMIN_ROLE
+
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ SAVANNA_SERVICE=$(keystone service-create \
+ --name=savanna \
+ --type=data_processing \
+ --description="Savanna Data Processing" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $SAVANNA_SERVICE \
+ --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ fi
+}
+
# configure_savanna() - Set config files, create data dirs, etc
function configure_savanna() {
@@ -54,9 +90,9 @@
# Copy over savanna configuration file and configure common parameters.
cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
recreate_database savanna utf8
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
index e967622..7713a78 100644
--- a/lib/savanna-dashboard
+++ b/lib/savanna-dashboard
@@ -29,7 +29,7 @@
SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard
+SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
# Functions
diff --git a/lib/swift b/lib/swift
index 8a1489b..96929db 100644
--- a/lib/swift
+++ b/lib/swift
@@ -378,6 +378,9 @@
iniuncomment ${swift_node_config} DEFAULT log_facility
iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+ iniuncomment ${swift_node_config} DEFAULT workers
+ iniset ${swift_node_config} DEFAULT workers 1
+
iniuncomment ${swift_node_config} DEFAULT disable_fallocate
iniset ${swift_node_config} DEFAULT disable_fallocate true
@@ -526,14 +529,19 @@
fi
SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
}
diff --git a/lib/tempest b/lib/tempest
index 803b740..0969b2d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,6 +15,7 @@
# - ``PUBLIC_NETWORK_NAME``
# - ``Q_USE_NAMESPACE``
# - ``Q_ROUTER_NAME``
+# - ``Q_L3_ENABLED``
# - ``VIRT_DRIVER``
# - ``LIBVIRT_TYPE``
# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -146,12 +147,21 @@
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
available_flavors=$(nova flavor-list)
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
- nova flavor-create m1.nano 42 64 0 1
+ if is_arch "ppc64"; then
+ # qemu needs at least 128MB of memory to boot on ppc64
+ nova flavor-create m1.nano 42 128 0 1
+ else
+ nova flavor-create m1.nano 42 64 0 1
+ fi
fi
flavor_ref=42
boto_instance_type=m1.nano
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
- nova flavor-create m1.micro 84 128 0 1
+ if is_arch "ppc64"; then
+ nova flavor-create m1.micro 84 256 0 1
+ else
+ nova flavor-create m1.micro 84 128 0 1
+ fi
fi
flavor_ref_alt=84
else
@@ -193,14 +203,16 @@
if [ "$Q_USE_NAMESPACE" != "False" ]; then
tenant_networks_reachable=false
- ssh_connect_method="floating"
+ if ! is_service_enabled n-net; then
+ ssh_connect_method="floating"
+ fi
else
tenant_networks_reachable=true
fi
ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
- if is_service_enabled q-l3; then
+ if [ "$Q_L3_ENABLED" = "True" ]; then
public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
awk '{print $2}')
if [ "$Q_USE_NAMESPACE" == "False" ]; then
@@ -281,7 +293,9 @@
iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
# Orchestration test image
- if [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then
+ if [[ ! -z "$HEAT_FETCHED_TEST_IMAGE" ]]; then
+ iniset $TEMPEST_CONF orchestration image_ref "$HEAT_FETCHED_TEST_IMAGE"
+ elif [[ "$HEAT_CREATE_TEST_IMAGE" = "True" ]]; then
disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest"
iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest"
fi
@@ -310,6 +324,9 @@
# cli
iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR
+ # Networking
+ iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}"
+
# service_available
for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do
if is_service_enabled $service ; then
diff --git a/lib/trove b/lib/trove
index 5ba4de5..6d5a56e 100644
--- a/lib/trove
+++ b/lib/trove
@@ -32,6 +32,17 @@
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_BIN_DIR=/usr/local/bin
+# setup_trove_logging() - Adds logging configuration to conf files
+function setup_trove_logging() {
+ local CONF=$1
+ iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $CONF DEFAULT use_syslog $SYSLOG
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ # Add color to logging output
+ setup_colorized_logging $CONF DEFAULT tenant user
+ fi
+}
+
# create_trove_accounts() - Set up common required trove accounts
# Tenant User Roles
@@ -121,6 +132,9 @@
iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove
sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+ setup_trove_logging $TROVE_CONF_DIR/trove.conf
+ setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
@@ -132,6 +146,7 @@
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
fi
# (Re)create trove conductor conf file if needed
@@ -143,6 +158,7 @@
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove
+ setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf
fi
}
diff --git a/stack.sh b/stack.sh
index a2ef679..ce5fbd4 100755
--- a/stack.sh
+++ b/stack.sh
@@ -260,7 +260,7 @@
# from either range when attempting to guess the IP to use for the host.
# Note that setting FIXED_RANGE may be necessary when running DevStack
# in an OpenStack cloud that uses either of these address ranges internally.
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
@@ -1080,8 +1080,10 @@
# Create an access key and secret key for nova ec2 register image
if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+ die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova"
NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
- CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
+ die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME"
+ CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID)
ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY"
@@ -1275,6 +1277,13 @@
done
fi
+# Local Configuration
+# ===================
+
+# Apply configuration from local.conf if it exists for layer 2 services
+# Phase: post-extra
+merge_config_group $TOP_DIR/local.conf post-extra
+
# Run local script
# ================
diff --git a/stackrc b/stackrc
index 7eda5a5..695bdb1 100644
--- a/stackrc
+++ b/stackrc
@@ -178,7 +178,7 @@
BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
# ryu service
@@ -282,6 +282,10 @@
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
esac
+HEAT_FETCHED_TEST_IMAGE=${HEAT_FETCHED_TEST_IMAGE:-""}
+if [[ "$HEAT_FETCHED_TEST_IMAGE" == "Fedora-i386-20-20131211.1-sda" ]]; then
+ IMAGE_URLS+=",https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/i386/$HEAT_FETCHED_TEST_IMAGE.qcow2"
+fi
# 10Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
diff --git a/tests/functions.sh b/tests/functions.sh
index 40376aa..95dafe1 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -38,195 +38,6 @@
fi
-echo "Testing INI functions"
-
-cat >test.ini <<EOF
-[default]
-# comment an option
-#log_file=./log.conf
-log_file=/etc/log.conf
-handlers=do not disturb
-
-[aaa]
-# the commented option should not change
-#handlers=cc,dd
-handlers = aa, bb
-
-[bbb]
-handlers=ee,ff
-
-[ ccc ]
-spaces = yes
-
-[ddd]
-empty =
-
-[eee]
-multi = foo1
-multi = foo2
-EOF
-
-# Test with spaces
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ "$VAL" == "aa, bb" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini aaa handlers "11, 22"
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ "$VAL" == "11, 22" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test with spaces in section header
-
-VAL=$(iniget test.ini " ccc " spaces)
-if [[ "$VAL" == "yes" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini "b b" opt_ion 42
-
-VAL=$(iniget test.ini "b b" opt_ion)
-if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test without spaces, end of file
-
-VAL=$(iniget test.ini bbb handlers)
-if [[ "$VAL" == "ee,ff" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini bbb handlers "33,44"
-
-VAL=$(iniget test.ini bbb handlers)
-if [[ "$VAL" == "33,44" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# test empty option
-if ini_has_option test.ini ddd empty; then
- echo "OK: ddd.empty present"
-else
- echo "ini_has_option failed: ddd.empty not found"
-fi
-
-# test non-empty option
-if ini_has_option test.ini bbb handlers; then
- echo "OK: bbb.handlers present"
-else
- echo "ini_has_option failed: bbb.handlers not found"
-fi
-
-# test changing empty option
-iniset test.ini ddd empty "42"
-
-VAL=$(iniget test.ini ddd empty)
-if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test section not exist
-
-VAL=$(iniget test.ini zzz handlers)
-if [[ -z "$VAL" ]]; then
- echo "OK: zzz not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini zzz handlers "999"
-
-VAL=$(iniget test.ini zzz handlers)
-if [[ -n "$VAL" ]]; then
- echo "OK: zzz not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test option not exist
-
-VAL=$(iniget test.ini aaa debug)
-if [[ -z "$VAL" ]]; then
- echo "OK aaa.debug not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-if ! ini_has_option test.ini aaa debug; then
- echo "OK aaa.debug not present"
-else
- echo "ini_has_option failed: aaa.debug"
-fi
-
-iniset test.ini aaa debug "999"
-
-VAL=$(iniget test.ini aaa debug)
-if [[ -n "$VAL" ]]; then
- echo "OK aaa.debug present"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test comments
-
-inicomment test.ini aaa handlers
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ -z "$VAL" ]]; then
- echo "OK"
-else
- echo "inicomment failed: $VAL"
-fi
-
-# Test multiple line iniset/iniget
-iniset_multiline test.ini eee multi bar1 bar2
-
-VAL=$(iniget_multiline test.ini eee multi)
-if [[ "$VAL" == "bar1 bar2" ]]; then
- echo "OK: iniset_multiline"
-else
- echo "iniset_multiline failed: $VAL"
-fi
-
-# Test iniadd with exiting values
-iniadd test.ini eee multi bar3
-VAL=$(iniget_multiline test.ini eee multi)
-if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
- echo "OK: iniadd"
-else
- echo "iniadd failed: $VAL"
-fi
-
-# Test iniadd with non-exiting values
-iniadd test.ini eee non-multi foobar1 foobar2
-VAL=$(iniget_multiline test.ini eee non-multi)
-if [[ "$VAL" == "foobar1 foobar2" ]]; then
- echo "OK: iniadd with non-exiting value"
-else
- echo "iniadd with non-exsting failed: $VAL"
-fi
-
-rm test.ini
-
# Enabling/disabling services
echo "Testing enable_service()"
diff --git a/tests/test_config.sh b/tests/test_config.sh
index fed2e7d..39603c9 100755
--- a/tests/test_config.sh
+++ b/tests/test_config.sh
@@ -70,6 +70,12 @@
[[test1|test1c.conf]]
$TEST_1C_ADD
+
+[[test3|test-space.conf]]
+[DEFAULT]
+attribute=value
+
+# the above line has a single space
EOF
@@ -176,4 +182,14 @@
echo "failed: $VAL != $EXPECT_VAL"
fi
-rm -f test.conf test1c.conf test2a.conf
+echo -n "merge_config_file test-space: "
+rm -f test-space.conf
+merge_config_file test.conf test3 test-space.conf
+VAL=$(cat test-space.conf)
+# iniset adds a blank line if it creates the file...
+EXPECT_VAL="
+[DEFAULT]
+attribute = value"
+check_result "$VAL" "$EXPECT_VAL"
+
+rm -f test.conf test1c.conf test2a.conf test-space.conf
diff --git a/tests/test_ini.sh b/tests/test_ini.sh
new file mode 100755
index 0000000..598cd57
--- /dev/null
+++ b/tests/test_ini.sh
@@ -0,0 +1,240 @@
+#!/usr/bin/env bash
+
+# Tests for DevStack INI functions
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+
+
+echo "Testing INI functions"
+
+cat >test.ini <<EOF
+[default]
+# comment an option
+#log_file=./log.conf
+log_file=/etc/log.conf
+handlers=do not disturb
+
+[aaa]
+# the commented option should not change
+#handlers=cc,dd
+handlers = aa, bb
+
+[bbb]
+handlers=ee,ff
+
+[ ccc ]
+spaces = yes
+
+[ddd]
+empty =
+
+[eee]
+multi = foo1
+multi = foo2
+EOF
+
+# Test with missing arguments
+
+BEFORE=$(cat test.ini)
+
+echo -n "iniset: test missing attribute argument: "
+iniset test.ini aaa
+NO_ATTRIBUTE=$(cat test.ini)
+if [[ "$BEFORE" == "$NO_ATTRIBUTE" ]]; then
+ echo "OK"
+else
+ echo "failed"
+fi
+
+echo -n "iniset: test missing section argument: "
+iniset test.ini
+NO_SECTION=$(cat test.ini)
+if [[ "$BEFORE" == "$NO_SECTION" ]]; then
+ echo "OK"
+else
+ echo "failed"
+fi
+
+# Test with spaces
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "aa, bb" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini aaa handlers "11, 22"
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "11, 22" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test with spaces in section header
+
+VAL=$(iniget test.ini " ccc " spaces)
+if [[ "$VAL" == "yes" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini "b b" opt_ion 42
+
+VAL=$(iniget test.ini "b b" opt_ion)
+if [[ "$VAL" == "42" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test without spaces, end of file
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "ee,ff" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini bbb handlers "33,44"
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "33,44" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test empty option
+if ini_has_option test.ini ddd empty; then
+ echo "OK: ddd.empty present"
+else
+ echo "ini_has_option failed: ddd.empty not found"
+fi
+
+# test non-empty option
+if ini_has_option test.ini bbb handlers; then
+ echo "OK: bbb.handlers present"
+else
+ echo "ini_has_option failed: bbb.handlers not found"
+fi
+
+# test changing empty option
+iniset test.ini ddd empty "42"
+
+VAL=$(iniget test.ini ddd empty)
+if [[ "$VAL" == "42" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test pipe in option
+iniset test.ini aaa handlers "a|b"
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "a|b" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test space in option
+iniset test.ini aaa handlers "a b"
+
+VAL="$(iniget test.ini aaa handlers)"
+if [[ "$VAL" == "a b" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test section not exist
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -z "$VAL" ]]; then
+ echo "OK: zzz not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini zzz handlers "999"
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -n "$VAL" ]]; then
+ echo "OK: zzz not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test option not exist
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -z "$VAL" ]]; then
+ echo "OK aaa.debug not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+if ! ini_has_option test.ini aaa debug; then
+ echo "OK aaa.debug not present"
+else
+ echo "ini_has_option failed: aaa.debug"
+fi
+
+iniset test.ini aaa debug "999"
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -n "$VAL" ]]; then
+ echo "OK aaa.debug present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test comments
+
+inicomment test.ini aaa handlers
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ -z "$VAL" ]]; then
+ echo "OK"
+else
+ echo "inicomment failed: $VAL"
+fi
+
+# Test multiple line iniset/iniget
+iniset_multiline test.ini eee multi bar1 bar2
+
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2" ]]; then
+ echo "OK: iniset_multiline"
+else
+ echo "iniset_multiline failed: $VAL"
+fi
+
+# Test iniadd with exiting values
+iniadd test.ini eee multi bar3
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
+ echo "OK: iniadd"
+else
+ echo "iniadd failed: $VAL"
+fi
+
+# Test iniadd with non-exiting values
+iniadd test.ini eee non-multi foobar1 foobar2
+VAL=$(iniget_multiline test.ini eee non-multi)
+if [[ "$VAL" == "foobar1 foobar2" ]]; then
+ echo "OK: iniadd with non-exiting value"
+else
+ echo "iniadd with non-exsting failed: $VAL"
+fi
+
+rm test.ini
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 6b9b25e..d714d33 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -67,7 +67,7 @@
curl -O $PIP_GET_PIP_URL; \
)
fi
- sudo python $FILES/get-pip.py
+ sudo -E python $FILES/get-pip.py
}
function install_pip_tarball() {
@@ -75,7 +75,7 @@
curl -O $PIP_TAR_URL; \
tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
cd pip-$INSTALL_PIP_VERSION; \
- sudo python setup.py install 1>/dev/null; \
+ sudo -E python setup.py install 1>/dev/null; \
)
}
@@ -87,7 +87,7 @@
# Eradicate any and all system packages
uninstall_package python-pip
-if [[ -n "$USE_GET_PIP" ]]; then
+if [[ "$USE_GET_PIP" == "1" ]]; then
install_get_pip
else
install_pip_tarball
diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh
index b49ce9f..a7e635c 100755
--- a/tools/jenkins/adapters/euca.sh
+++ b/tools/jenkins/adapters/euca.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh'
diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh
index a97f935..8da1eeb 100755
--- a/tools/jenkins/adapters/floating_ips.sh
+++ b/tools/jenkins/adapters/floating_ips.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
index ec29209..0a0b6c0 100755
--- a/tools/jenkins/adapters/volumes.sh
+++ b/tools/jenkins/adapters/volumes.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh'
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index d0cdf17..958102b 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -93,13 +93,48 @@
tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack
cd $TOP_DIR
-# Run devstack on launch
-cat <<EOF >$STAGING_DIR/etc/rc.local
-# network restart required for getting the right gateway
-/etc/init.d/networking restart
-chown -R $STACK_USER /opt/stack
-su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER
-exit 0
+# Create an upstart job (task) for devstack, which can interact with the console
+cat >$STAGING_DIR/etc/init/devstack.conf << EOF
+start on stopped rc RUNLEVEL=[2345]
+
+console output
+task
+
+pre-start script
+ rm -f /var/run/devstack.succeeded
+end script
+
+script
+ initctl stop hvc0 || true
+
+ # Read any leftover characters from standard input
+ while read -n 1 -s -t 0.1 -r ignored; do
+ true
+ done
+
+ clear
+
+ chown -R $STACK_USER /opt/stack
+
+ if su -c "/opt/stack/run.sh" $STACK_USER; then
+ touch /var/run/devstack.succeeded
+ fi
+
+ # Update /etc/issue
+ {
+ echo "OpenStack VM - Installed by DevStack"
+ IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p')
+ echo " Management IP: \$IPADDR"
+ echo -n " Devstack run: "
+ if [ -e /var/run/devstack.succeeded ]; then
+ echo "SUCCEEDED"
+ else
+ echo "FAILED"
+ fi
+ echo ""
+ } > /etc/issue
+ initctl start hvc0 > /dev/null 2>&1
+end script
EOF
# Configure the hostname
@@ -138,8 +173,9 @@
# Configure run.sh
cat <<EOF >$STAGING_DIR/opt/stack/run.sh
#!/bin/bash
+set -eux
cd /opt/stack/devstack
-killall screen
-VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh
+./unstack.sh || true
+./stack.sh
EOF
chmod 755 $STAGING_DIR/opt/stack/run.sh
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 6ce334b..41b184c 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -367,25 +367,20 @@
if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then
set +x
- echo "VM Launched - Waiting for startup script"
- # wait for log to appear
- while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do
+ echo "VM Launched - Waiting for devstack to start"
+ while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
done
- echo -n "Running"
- while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ]
- do
+ echo -n "devstack is running"
+ while ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
echo -n "."
done
echo "done!"
set -x
- # output the run.sh.log
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log'
-
- # Fail if the expected text is not found
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in'
+ # Fail if devstack did not succeed
+ ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /var/run/devstack.succeeded'
set +x
echo "################################################################################"
@@ -399,11 +394,12 @@
echo ""
echo "All Finished!"
echo "Now, you can monitor the progress of the stack.sh installation by "
- echo "tailing /opt/stack/run.sh.log from within your domU."
+ echo "looking at the console of your domU / checking the log files."
echo ""
echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
- echo "and then do: 'tail -f /opt/stack/run.sh.log'"
+ echo "and then do: 'sudo service devstack status' to check if devstack is still running."
+ echo "Check that /var/run/devstack.succeeded exists"
echo ""
- echo "When the script completes, you can then visit the OpenStack Dashboard"
+ echo "When devstack completes, you can visit the OpenStack Dashboard"
echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
fi
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 5796268..cd28234 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -13,7 +13,13 @@
# Size of image
VDI_MB=${VDI_MB:-5000}
-OSDOMU_MEM_MB=3072
+
+# Devstack now contains many components. 3GB ram is not enough to prevent
+# swapping and memory fragmentation - the latter of which can cause failures
+# such as blkfront failing to plug a VBD and lead to random test fails.
+#
+# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs
+OSDOMU_MEM_MB=4096
OSDOMU_VDI_GB=8
# Network mapping. Specify bridge names or network names. Network names may