Merge "Allow heat tests to use already uploaded test image"
diff --git a/.gitignore b/.gitignore
index a3d5b0d..49eb188 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
proto
*~
-.*.sw[nop]
+.*.sw?
*.log
*.log.[1-9]
src
diff --git a/README.md b/README.md
index 91d7efb..9914b1e 100644
--- a/README.md
+++ b/README.md
@@ -327,6 +327,7 @@
* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced
* **post-config** - runs after the layer 2 services are configured and before they are started
* **extra** - runs after services are started and before any files in ``extra.d`` are executed
+* **post-extra** - runs after files in ``extra.d`` are executed
The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used.
diff --git a/clean.sh b/clean.sh
index 395941a..480a812 100755
--- a/clean.sh
+++ b/clean.sh
@@ -15,6 +15,8 @@
# Import common functions
source $TOP_DIR/functions
+FILES=$TOP_DIR/files
+
# Load local configuration
source $TOP_DIR/stackrc
@@ -84,6 +86,10 @@
cleanup_neutron
cleanup_swift
+if is_service_enabled ldap; then
+ cleanup_ldap
+fi
+
# Do the hypervisor cleanup until this can be moved back into lib/nova
if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
cleanup_nova_hypervisor
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 96241f9..1b1ac06 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -67,7 +67,10 @@
exit_if_aggregate_present $AGGREGATE_NAME
AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
+
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
# check aggregate created
nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 3b3d3ba..ed8ba63 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -149,7 +149,7 @@
# Create the bootable volume
start_time=$(date +%s)
-cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+cinder create --image-id $IMAGE --display-name=$VOL_NAME --display-description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
@@ -165,10 +165,10 @@
# Boot instance
# -------------
-# Boot using the --block_device_mapping param. The format of mapping is:
+# Boot using the --block-device-mapping param. The format of mapping is:
# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
# Leaving the middle two fields blank appears to do-the-right-thing
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security-groups=$SECGROUP --key-name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 1e68042..e79774f 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -56,10 +56,8 @@
unset OS_AUTH_URL
# Common authentication args
-TENANT_ARG="--os_tenant_name=$x_TENANT_NAME"
-TENANT_ARG_DASH="--os-tenant-name=$x_TENANT_NAME"
-ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL"
-ARGS_DASH="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
+TENANT_ARG="--os-tenant-name=$x_TENANT_NAME"
+ARGS="--os-username=$x_USERNAME --os-password=$x_PASSWORD --os-auth-url=$x_AUTH_URL"
# Set global return
RETURN=0
@@ -71,7 +69,7 @@
STATUS_KEYSTONE="Skipped"
else
echo -e "\nTest Keystone"
- if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then
+ if keystone $TENANT_ARG $ARGS catalog --service identity; then
STATUS_KEYSTONE="Succeeded"
else
STATUS_KEYSTONE="Failed"
@@ -90,7 +88,7 @@
else
# Test OSAPI
echo -e "\nTest Nova"
- if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then
+ if nova $TENANT_ARG $ARGS flavor-list; then
STATUS_NOVA="Succeeded"
else
STATUS_NOVA="Failed"
@@ -107,7 +105,7 @@
STATUS_CINDER="Skipped"
else
echo -e "\nTest Cinder"
- if cinder $TENANT_ARG_DASH $ARGS_DASH list; then
+ if cinder $TENANT_ARG $ARGS list; then
STATUS_CINDER="Succeeded"
else
STATUS_CINDER="Failed"
@@ -124,7 +122,7 @@
STATUS_GLANCE="Skipped"
else
echo -e "\nTest Glance"
- if glance $TENANT_ARG_DASH $ARGS_DASH image-list; then
+ if glance $TENANT_ARG $ARGS image-list; then
STATUS_GLANCE="Succeeded"
else
STATUS_GLANCE="Failed"
@@ -141,7 +139,7 @@
STATUS_SWIFT="Skipped"
else
echo -e "\nTest Swift"
- if swift $TENANT_ARG_DASH $ARGS_DASH stat; then
+ if swift $TENANT_ARG $ARGS stat; then
STATUS_SWIFT="Succeeded"
else
STATUS_SWIFT="Failed"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 1a1608c..7055278 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -114,6 +114,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
@@ -126,7 +127,7 @@
# Boot instance
# -------------
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
diff --git a/exercises/marconi.sh b/exercises/marconi.sh
new file mode 100755
index 0000000..1b9788d
--- /dev/null
+++ b/exercises/marconi.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# **marconi.sh**
+
+# Sanity check that Marconi started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled marconi-server || exit 55
+
+curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 7dfa5dc..0a100c0 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -139,24 +139,28 @@
function get_image_id {
local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
echo "$IMAGE_ID"
}
function get_tenant_id {
local TENANT_NAME=$1
local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
echo "$TENANT_ID"
}
function get_user_id {
local USER_NAME=$1
local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
echo "$USER_ID"
}
function get_role_id {
local ROLE_NAME=$1
local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
echo "$ROLE_ID"
}
@@ -169,6 +173,7 @@
function get_flavor_id {
local INSTANCE_TYPE=$1
local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
echo "$FLAVOR_ID"
}
@@ -233,8 +238,9 @@
source $TOP_DIR/openrc admin admin
local TENANT_ID=$(get_tenant_id $TENANT)
source $TOP_DIR/openrc $TENANT $TENANT
- local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
- neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
+ local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
+ neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
neutron-debug probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
}
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 7d80570..eb32cc7 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -56,6 +56,7 @@
# Check to make sure rules were added
SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
+die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
for i in "${RULES_TO_ADD[@]}"; do
skip=
for j in "${SEC_GROUP_RULES[@]}"; do
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 9ee9fa9..21b5d21 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -118,6 +118,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
@@ -129,7 +130,7 @@
# Boot instance
# -------------
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security-groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
@@ -155,7 +156,7 @@
# Create a new volume
start_time=$(date +%s)
-cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+cinder create --display-name $VOL_NAME --display-description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
die $LINENO "Volume $VOL_NAME not created"
diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh
new file mode 100644
index 0000000..a96a4c5
--- /dev/null
+++ b/extras.d/70-marconi.sh
@@ -0,0 +1,29 @@
+# marconi.sh - Devstack extras script to install Marconi
+
+if is_service_enabled marconi-server; then
+ if [[ "$1" == "source" ]]; then
+ # Initial source
+ source $TOP_DIR/lib/marconi
+ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Installing Marconi"
+ install_marconiclient
+ install_marconi
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring Marconi"
+ configure_marconi
+ configure_marconiclient
+
+ if is_service_enabled key; then
+ create_marconi_accounts
+ fi
+
+ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+ echo_summary "Initializing Marconi"
+ init_marconi
+ start_marconi
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ stop_marconi
+ fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
index f6881cc..6bbe113 100644
--- a/extras.d/70-savanna.sh
+++ b/extras.d/70-savanna.sh
@@ -14,6 +14,7 @@
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Savanna"
configure_savanna
+ create_savanna_accounts
if is_service_enabled horizon; then
configure_savanna_dashboard
fi
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 75b702c..0186e36 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -14,6 +14,9 @@
echo_summary "Initializing Tempest"
configure_tempest
init_tempest
+ elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+ # local.conf Tempest option overrides
+ :
fi
if [[ "$1" == "unstack" ]]; then
diff --git a/extras.d/README.md b/extras.d/README.md
index 88e4265..1dd17da 100644
--- a/extras.d/README.md
+++ b/extras.d/README.md
@@ -19,10 +19,10 @@
source: always called first in any of the scripts, used to set the
initial defaults in a lib/* script or similar
- stack: called by stack.sh. There are three possible values for
+ stack: called by stack.sh. There are four possible values for
the second arg to distinguish the phase stack.sh is in:
- arg 2: install | post-config | extra
+ arg 2: install | post-config | extra | post-extra
unstack: called by unstack.sh
diff --git a/files/apts/general b/files/apts/general
index fcf0b5b..aff687f 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -20,3 +20,4 @@
euca2ools # only for testing client
tar
python-cmd2 # dist:precise
+python2.7
diff --git a/files/apts/ldap b/files/apts/ldap
index 81a00f2..26f7aef 100644
--- a/files/apts/ldap
+++ b/files/apts/ldap
@@ -1,3 +1,3 @@
ldap-utils
-slapd # NOPRIME
+slapd
python-ldap
diff --git a/files/apts/marconi-server b/files/apts/marconi-server
new file mode 100644
index 0000000..bc7ef22
--- /dev/null
+++ b/files/apts/marconi-server
@@ -0,0 +1,3 @@
+python-pymongo
+mongodb-server
+pkg-config
diff --git a/files/ldap/keystone.ldif.in b/files/ldap/keystone.ldif.in
new file mode 100644
index 0000000..cf51907
--- /dev/null
+++ b/files/ldap/keystone.ldif.in
@@ -0,0 +1,26 @@
+dn: ${BASE_DN}
+objectClass: dcObject
+objectClass: organizationalUnit
+dc: ${BASE_DC}
+ou: ${BASE_DC}
+
+dn: ou=UserGroups,${BASE_DN}
+objectClass: organizationalUnit
+ou: UserGroups
+
+dn: ou=Users,${BASE_DN}
+objectClass: organizationalUnit
+ou: Users
+
+dn: ou=Roles,${BASE_DN}
+objectClass: organizationalUnit
+ou: Roles
+
+dn: ou=Projects,${BASE_DN}
+objectClass: organizationalUnit
+ou: Projects
+
+dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN}
+objectClass: organizationalRole
+ou: _member_
+cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index e522150..de3b69d 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,10 +1,15 @@
dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
changetype: modify
replace: olcSuffix
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
-
replace: olcRootDN
-olcRootDN: dc=Manager,dc=openstack,dc=org
+olcRootDN: ${MANAGER_DN}
-
${LDAP_ROOTPW_COMMAND}: olcRootPW
olcRootPW: ${SLAPPASS}
+-
+replace: olcDbIndex
+olcDbIndex: objectClass eq
+olcDbIndex: default pres,eq
+olcDbIndex: cn,sn,givenName,co
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
deleted file mode 100644
index 02caf3f..0000000
--- a/files/ldap/openstack.ldif
+++ /dev/null
@@ -1,26 +0,0 @@
-dn: dc=openstack,dc=org
-dc: openstack
-objectClass: dcObject
-objectClass: organizationalUnit
-ou: openstack
-
-dn: ou=UserGroups,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: UserGroups
-
-dn: ou=Users,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Users
-
-dn: ou=Roles,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Roles
-
-dn: ou=Projects,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Projects
-
-dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
-objectClass: organizationalRole
-ou: _member_
-cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/base-config.ldif b/files/ldap/suse-base-config.ldif.in
similarity index 77%
rename from files/ldap/base-config.ldif
rename to files/ldap/suse-base-config.ldif.in
index 026d8bc..00256ee 100644
--- a/files/ldap/base-config.ldif
+++ b/files/ldap/suse-base-config.ldif.in
@@ -12,8 +12,10 @@
cn: schema
include: file:///etc/openldap/schema/core.ldif
+include: file:///etc/openldap/schema/cosine.ldif
+include: file:///etc/openldap/schema/inetorgperson.ldif
dn: olcDatabase={1}hdb,cn=config
objectClass: olcHdbConfig
olcDbDirectory: /var/lib/ldap
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server
new file mode 100644
index 0000000..d7b7ea8
--- /dev/null
+++ b/files/rpms/marconi-server
@@ -0,0 +1,3 @@
+selinux-policy-targeted
+mongodb-server
+pymongo
diff --git a/functions b/functions
index 6137aaf..995be57 100644
--- a/functions
+++ b/functions
@@ -554,7 +554,7 @@
function is_arch {
ARCH_TYPE=$1
- [ "($uname -m)" = "$ARCH_TYPE" ]
+ [[ "$(uname -m)" == "$ARCH_TYPE" ]]
}
# Checks if installed Apache is <= given version
@@ -729,6 +729,8 @@
local option=$3
local value=$4
+ [[ -z $section || -z $option ]] && return
+
if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
# Add section at the end
echo -e "\n[$section]" >>"$file"
@@ -739,8 +741,9 @@
$option = $value
" "$file"
else
+ local sep=$(echo -ne "\x01")
# Replace it
- sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file"
+ sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
fi
}
@@ -1351,10 +1354,9 @@
# Create a directory for the downloaded image tarballs.
mkdir -p $FILES/images
-
+ IMAGE_FNAME=`basename "$image_url"`
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+aki style), then extracts it.
- IMAGE_FNAME=`basename "$image_url"`
if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
wget -c $image_url -O $FILES/$IMAGE_FNAME
if [[ $? -ne 0 ]]; then
@@ -1410,13 +1412,92 @@
vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
vmdk_create_type="${vmdk_create_type#*\"}"
vmdk_create_type="${vmdk_create_type%?}"
+
+ descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
+ `"should use a descriptor-data pair."
if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
vmdk_disktype="sparse"
- elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then
- die $LINENO "Monolithic flat disks should use a descriptor-data pair." \
- "Please provide the disk and not the descriptor."
+ elif [[ "$vmdk_create_type" = "monolithicFlat" || \
+ "$vmdk_create_type" = "vmfs" ]]; then
+ # Attempt to retrieve the *-flat.vmdk
+ flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+ flat_fname="${flat_fname#*\"}"
+ flat_fname="${flat_fname%?}"
+ if [[ -z "$flat_name" ]]; then
+ flat_fname="$IMAGE_NAME-flat.vmdk"
+ fi
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_url="${image_url:0:$path_len}$flat_fname"
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the *-flat.vmdk: $flat_url"
+ if [[ $flat_url != file* ]]; then
+ if [[ ! -f $FILES/$flat_fname || \
+ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
+ wget -c $flat_url -O $FILES/$flat_fname
+ if [[ $? -ne 0 ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ fi
+ if $flat_found; then
+ IMAGE="$FILES/${flat_fname}"
+ fi
+ else
+ IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
+ if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ if ! $flat_found; then
+ IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+ fi
+ fi
+ if $flat_found; then
+ IMAGE_NAME="${flat_fname}"
+ fi
+ vmdk_disktype="preallocated"
+ elif [[ -z "$vmdk_create_type" ]]; then
+ # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
+ # to retrieve appropriate metadata
+ if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
+ warn $LINENO "Expected filename suffix: '-flat'."`
+ `" Filename provided: ${IMAGE_NAME}"
+ else
+ descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_path="${image_url:0:$path_len}"
+ descriptor_url=$flat_path$descriptor_fname
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+ if [[ $flat_path != file* ]]; then
+ if [[ ! -f $FILES/$descriptor_fname || \
+ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+ wget -c $descriptor_url -O $FILES/$descriptor_fname
+ if [[ $? -ne 0 ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ descriptor_url="$FILES/$descriptor_fname"
+ else
+ descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+ if [[ ! -f $descriptor_url || \
+ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ if $descriptor_found; then
+ vmdk_adapter_type="$(head -25 $descriptor_url |"`
+ `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
+ vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+ vmdk_adapter_type="${vmdk_adapter_type%?}"
+ fi
+ fi
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+ vmdk_disktype="preallocated"
else
- #TODO(alegendre): handle streamOptimized once supported by VMware driver.
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
vmdk_disktype="preallocated"
fi
@@ -1510,11 +1591,15 @@
*) echo "Do not know what to do with $IMAGE_FNAME"; false;;
esac
+ if is_arch "ppc64"; then
+ IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi"
+ fi
+
if [ "$CONTAINER_FORMAT" = "bare" ]; then
if [ "$UNPACK" = "zcat" ]; then
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
else
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -1522,12 +1607,12 @@
# kernel for use when uploading the root filesystem.
KERNEL_ID=""; RAMDISK_ID="";
if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
fi
if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
fi
}
diff --git a/lib/ceilometer b/lib/ceilometer
index 8e2970c..fac3be1 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -3,7 +3,7 @@
# To enable a minimal set of Ceilometer services, add the following to localrc:
#
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
#
# To ensure Ceilometer alarming services are enabled also, further add to the localrc:
#
@@ -145,6 +145,7 @@
screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
fi
screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
@@ -160,7 +161,7 @@
# stop_ceilometer() - Stop running processes
function stop_ceilometer() {
# Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
+ for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
diff --git a/lib/cinder b/lib/cinder
index 96d2505..ef3bd81 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -209,6 +209,7 @@
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol
+ inicomment $CINDER_API_PASTE_INI filter:authtoken cafile
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
@@ -219,6 +220,7 @@
iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CINDER_CONF keystone_authtoken admin_user cinder
iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -235,6 +237,11 @@
iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2
iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2
+ # NOTE(mriedem): Work around Cinder "wishlist" bug 1255593
+ if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
+ iniset $CINDER_CONF lvmdriver-1 volume_clear none
+ iniset $CINDER_CONF lvmdriver-2 volume_clear none
+ fi
else
iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
diff --git a/lib/config b/lib/config
index 91cefe4..c28072f 100644
--- a/lib/config
+++ b/lib/config
@@ -95,7 +95,7 @@
/^ *\#/ {
next
}
- /^.+/ {
+ /^[^ \t]+/ {
split($0, d, " *= *")
print "iniset " configfile " " section " " d[1] " \"" d[2] "\""
}
diff --git a/lib/glance b/lib/glance
index f40b1a7..2e29a8f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -82,6 +82,7 @@
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
@@ -99,6 +100,7 @@
iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
diff --git a/lib/heat b/lib/heat
index 7a9ef0d..e44a618 100644
--- a/lib/heat
+++ b/lib/heat
@@ -96,6 +96,7 @@
iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $HEAT_CONF keystone_authtoken admin_user heat
iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
diff --git a/lib/ironic b/lib/ironic
index 9f86e84..099746a 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -98,6 +98,7 @@
iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
diff --git a/lib/keystone b/lib/keystone
index c1fa0af..712a509 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -4,6 +4,7 @@
# Dependencies:
#
# - ``functions`` file
+# - ``tls`` file
# - ``DEST``, ``STACK_USER``
# - ``IDENTITY_API_VERSION``
# - ``BASE_SQL_CONN``
@@ -79,6 +80,13 @@
# valid assignment backends as per dir keystone/identity/backends
KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
+# if we are running with SSL use https protocols
+if is_ssl_enabled_service "key"; then
+ KEYSTONE_AUTH_PROTOCOL="https"
+ KEYSTONE_SERVICE_PROTOCOL="https"
+fi
+
+
# Functions
# ---------
# cleanup_keystone() - Remove residual data files, anything left over from previous
@@ -143,17 +151,17 @@
if is_service_enabled ldap; then
#Set all needed ldap values
- iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
- iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org"
- iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
+ iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
+ iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
iniset $KEYSTONE_CONF ldap use_dumb_member "True"
iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
- iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
- iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
fi
@@ -172,6 +180,15 @@
iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service key; then
+ ensure_certificates KEYSTONE
+
+ iniset $KEYSTONE_CONF ssl enable True
+ iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT
+ iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY
+ fi
+
if is_service_enabled tls-proxy; then
# Set the service ports for a proxy to take the originals
iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT
@@ -320,6 +337,10 @@
# init_keystone() - Initialize databases, etc.
function init_keystone() {
+ if is_service_enabled ldap; then
+ init_ldap
+ fi
+
# (Re)create keystone database
recreate_database keystone utf8
@@ -386,7 +407,7 @@
fi
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/ldap b/lib/ldap
index 80992a7..e4bd416 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -9,68 +9,137 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+
+LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org}
+# Make an array of domain components
+DC=(${LDAP_DOMAIN/./ })
+
+# Leftmost domain component used in top-level entry
+LDAP_BASE_DC=${DC[0]}
+
+# Build the base DN
+dn=""
+for dc in ${DC[*]}; do
+ dn="$dn,dc=$dc"
+done
+LDAP_BASE_DN=${dn#,}
+
+LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}"
+LDAP_URL=${LDAP_URL:-ldap://localhost}
+
LDAP_SERVICE_NAME=slapd
+if is_ubuntu; then
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=replace
+elif is_fedora; then
+ LDAP_OLCDB_NUMBER=2
+ LDAP_ROOTPW_COMMAND=add
+elif is_suse; then
+ # SUSE has slappasswd in /usr/sbin/
+ PATH=$PATH:/usr/sbin/
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=add
+ LDAP_SERVICE_NAME=ldap
+fi
+
+
# Functions
# ---------
+# Perform common variable substitutions on the data files
+# _ldap_varsubst file
+function _ldap_varsubst() {
+ local infile=$1
+ sed -e "
+ s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${SLAPPASS}|$SLAPPASS|
+ s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
+ s|\${BASE_DC}|$LDAP_BASE_DC|
+ s|\${BASE_DN}|$LDAP_BASE_DN|
+ s|\${MANAGER_DN}|$LDAP_MANAGER_DN|
+ " $infile
+}
+
+# clean_ldap() - Remove ldap server
+function cleanup_ldap() {
+ uninstall_package $(get_packages ldap)
+ if is_ubuntu; then
+ uninstall_package slapd ldap-utils libslp1
+ sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap
+ elif is_fedora; then
+ sudo rm -rf /etc/openldap /var/lib/ldap
+ elif is_suse; then
+ sudo rm -rf /var/lib/ldap
+ fi
+}
+
+# init_ldap
+# init_ldap() - Initialize databases, etc.
+function init_ldap() {
+ local keystone_ldif
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ # Remove data but not schemas
+ clear_ldap_state
+
+ # Add our top level ldap nodes
+ if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then
+ printf "LDAP already configured for $LDAP_BASE_DC\n"
+ else
+ printf "Configuring LDAP for $LDAP_BASE_DC\n"
+ # If BASE_DN is changed, the user may override the default file
+ if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then
+ keystone_ldif=${LDAP_BASE_DC}.ldif
+ else
+ keystone_ldif=keystone.ldif
+ fi
+ _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif}
+ if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then
+ ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif}
+ fi
+ fi
+
+ rm -rf TMP_LDAP_DIR
+}
+
# install_ldap
# install_ldap() - Collect source and prepare
function install_ldap() {
echo "Installing LDAP inside function"
- echo "LDAP_PASSWORD is $LDAP_PASSWORD"
echo "os_VENDOR is $os_VENDOR"
- printf "installing"
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ printf "installing OpenLDAP"
if is_ubuntu; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=replace
- sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils
- #automatically starts LDAP on ubuntu so no need to call start_ldap
+ # Ubuntu automatically starts LDAP so no need to call start_ldap()
+ :
elif is_fedora; then
- LDAP_OLCDB_NUMBER=2
- LDAP_ROOTPW_COMMAND=add
start_ldap
elif is_suse; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=add
- LDAP_SERVICE_NAME=ldap
- # SUSE has slappasswd in /usr/sbin/
- PATH=$PATH:/usr/sbin/
- sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif
+ _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif
+ sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif
sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap
start_ldap
fi
- printf "generate password file"
- SLAPPASS=`slappasswd -s $LDAP_PASSWORD`
+ echo "LDAP_PASSWORD is $LDAP_PASSWORD"
+ SLAPPASS=$(slappasswd -s $LDAP_PASSWORD)
+ printf "LDAP secret is $SLAPPASS\n"
- printf "secret is $SLAPPASS\n"
- #create manager.ldif
- TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif`
- sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE
-
- #update ldap olcdb
- sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE
+ # Create manager.ldif and add to olcdb
+ _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif
+ sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif
# On fedora we need to manually add cosine and inetorgperson schemas
- if is_fedora || is_suse; then
+ if is_fedora; then
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
fi
- # add our top level ldap nodes
- if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then
- printf "LDAP already configured for OpenStack\n"
- if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then
- # clear LDAP state
- clear_ldap_state
- # reconfigure LDAP for OpenStack
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
- else
- printf "Configuring LDAP for OpenStack\n"
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
+ rm -rf TMP_LDAP_DIR
}
# start_ldap() - Start LDAP
@@ -78,7 +147,6 @@
sudo service $LDAP_SERVICE_NAME restart
}
-
# stop_ldap() - Stop LDAP
function stop_ldap() {
sudo service $LDAP_SERVICE_NAME stop
@@ -86,7 +154,7 @@
# clear_ldap_state() - Clear LDAP State
function clear_ldap_state() {
- ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org"
+ ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
}
# Restore xtrace
diff --git a/lib/marconi b/lib/marconi
new file mode 100644
index 0000000..8e0b82b
--- /dev/null
+++ b/lib/marconi
@@ -0,0 +1,171 @@
+# lib/marconi
+# Install and start **Marconi** service
+
+# To enable a minimal set of Marconi services, add the following to localrc:
+# enable_service marconi-server
+#
+# Dependencies:
+# - functions
+# - OS_AUTH_URL for auth in api
+# - DEST set to the destination directory
+# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
+# - STACK_USER service user
+
+# stack.sh
+# ---------
+# install_marconi
+# configure_marconi
+# init_marconi
+# start_marconi
+# stop_marconi
+# cleanup_marconi
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+MARCONI_DIR=$DEST/marconi
+MARCONICLIENT_DIR=$DEST/python-marconiclient
+MARCONI_CONF_DIR=/etc/marconi
+MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
+MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
+
+# Support potential entry-points console scripts
+MARCONI_BIN_DIR=$(get_python_exec_prefix)
+
+# Set up database backend
+MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb}
+
+
+# Set Marconi repository
+MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git}
+MARCONI_BRANCH=${MARCONI_BRANCH:-master}
+
+# Set client library repository
+MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git}
+MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master}
+
+# Functions
+# ---------
+
+# cleanup_marconi() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_marconi() {
+ mongo marconi --eval "db.dropDatabase();"
+}
+
+# configure_marconiclient() - Set config files, create data dirs, etc
+function configure_marconiclient() {
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# configure_marconi() - Set config files, create data dirs, etc
+function configure_marconi() {
+ setup_develop $MARCONI_DIR
+
+ [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR
+ sudo chown $USER $MARCONI_CONF_DIR
+
+ [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR
+ sudo chown $USER $MARCONI_API_LOG_DIR
+
+ iniset $MARCONI_CONF DEFAULT verbose True
+ iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0'
+
+ # Install the policy file for the API server
+ cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR
+ iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json
+
+ iniset $MARCONI_CONF keystone_authtoken auth_protocol http
+ iniset $MARCONI_CONF keystone_authtoken admin_user marconi
+ iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
+
+ if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
+ iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+ configure_mongodb
+ cleanup_marconi
+ fi
+}
+
+function configure_mongodb() {
+ # Set nssize to 2GB. This increases the number of namespaces supported
+ # # per database.
+ sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+
+ restart_service mongod
+}
+
+# init_marconi() - Initialize etc.
+function init_marconi() {
+ # Create cache dir
+ sudo mkdir -p $MARCONI_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR
+ rm -f $MARCONI_AUTH_CACHE_DIR/*
+}
+
+# install_marconi() - Collect source and prepare
+function install_marconi() {
+ git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH
+ setup_develop $MARCONI_DIR
+}
+
+# install_marconiclient() - Collect source and prepare
+function install_marconiclient() {
+ git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# start_marconi() - Start running processes, including screen
+function start_marconi() {
+ screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+}
+
+# stop_marconi() - Stop running processes
+function stop_marconi() {
+ # Kill the marconi screen windows
+ for serv in marconi-server; do
+ screen -S $SCREEN_NAME -p $serv -X kill
+ done
+}
+
+function create_marconi_accounts() {
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ MARCONI_USER=$(get_id keystone user-create --name=marconi \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=marconi@example.com)
+ keystone user-role-add --tenant-id $SERVICE_TENANT \
+ --user-id $MARCONI_USER \
+ --role-id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ MARCONI_SERVICE=$(get_id keystone service-create \
+ --name=marconi \
+ --type=queuing \
+ --description="Marconi Service")
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $MARCONI_SERVICE \
+ --publicurl "http://$SERVICE_HOST:8888" \
+ --adminurl "http://$SERVICE_HOST:8888" \
+ --internalurl "http://$SERVICE_HOST:8888"
+ fi
+
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/neutron b/lib/neutron
index 7f1a9d8..b05b16d 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -115,6 +115,13 @@
# nova vif driver that all plugins should use
NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+# The next two variables are configured by plugin
+# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/*
+#
+# The plugin supports L3.
+Q_L3_ENABLED=${Q_L3_ENABLED:-False}
+# L3 routers exist per tenant
+Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False}
# List of config file names in addition to the main plugin config file
# See _configure_neutron_common() for details about setting it up
@@ -346,6 +353,7 @@
function create_neutron_initial_network() {
TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo"
# Create a small network
# Since neutron command is executed in admin context at this point,
@@ -360,12 +368,16 @@
sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
done
NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
sudo ifconfig $OVS_PHYSICAL_BRIDGE up
sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
else
NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
fi
if [[ "$Q_L3_ENABLED" == "True" ]]; then
@@ -373,14 +385,18 @@
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
fi
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create an external network, and a subnet. Configure the external network as router gw
EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+ die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP"
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
if is_service_enabled q-l3; then
@@ -390,6 +406,7 @@
sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
sudo ip link set $PUBLIC_BRIDGE up
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
+ die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
fi
if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index e406146..f95fcb7 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -62,6 +62,9 @@
if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
fi
+
+ Q_L3_ENABLED=True
+ Q_L3_ROUTER_PER_TENANT=True
}
function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 8d2e303..b5b1873 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -34,10 +34,13 @@
ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
function populate_ml2_config() {
- OPTS=$1
- CONF=$2
- SECTION=$3
+ CONF=$1
+ SECTION=$2
+ OPTS=$3
+ if [ -z "$OPTS" ]; then
+ return
+ fi
for I in "${OPTS[@]}"; do
# Replace the first '=' with ' ' for iniset syntax
iniset $CONF $SECTION ${I/=/ }
@@ -102,19 +105,17 @@
# Since we enable the tunnel TypeDrivers, also enable a local_ip
iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
- populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
- populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
- populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS
- populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS
- populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
- populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan
- fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
}
function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index d4050bb..bccd301 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -6,8 +6,6 @@
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
-#source $TOP_DIR/lib/neutron_plugins/ovs_base
-
function neutron_plugin_create_nova_conf() {
:
}
@@ -23,11 +21,17 @@
Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
+ PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
+ PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
+ PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
}
function neutron_plugin_configure_service() {
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector password $PLUMGRID_PASSWORD
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT
}
function neutron_plugin_configure_debug_command() {
diff --git a/lib/neutron_thirdparty/nicira b/lib/neutron_thirdparty/nicira
index 3f2a5af..3efb5a9 100644
--- a/lib/neutron_thirdparty/nicira
+++ b/lib/neutron_thirdparty/nicira
@@ -33,7 +33,7 @@
echo "Defaulting to "$NVP_GATEWAY_NETWORK_CIDR
fi
# Make sure the interface is up, but not configured
- sudo ip link dev $NVP_GATEWAY_NETWORK_INTERFACE set up
+ sudo ip link set $NVP_GATEWAY_NETWORK_INTERFACE up
# Save and then flush the IP addresses on the interface
addresses=$(ip addr show dev $NVP_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'})
sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE
@@ -45,7 +45,7 @@
sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE
nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
- sudo ip link dev $PUBLIC_BRIDGE set address $nvp_gw_net_if_mac
+ sudo ip link set address $nvp_gw_net_if_mac dev $PUBLIC_BRIDGE
for address in $addresses; do
sudo ip addr add dev $PUBLIC_BRIDGE $address
done
diff --git a/lib/nova b/lib/nova
index 6ab2000..e754341 100644
--- a/lib/nova
+++ b/lib/nova
@@ -225,6 +225,7 @@
inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host
inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name
+ inicomment $NOVA_API_PASTE_INI filter:authtoken cafile
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password
fi
@@ -397,8 +398,10 @@
# Add keystone authtoken configuration
iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $NOVA_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $NOVA_CONF keystone_authtoken admin_user nova
iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
fi
@@ -650,7 +653,7 @@
screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
- screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
+ screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
done
else
if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
diff --git a/lib/savanna b/lib/savanna
index e9dbe72..6794e36 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -3,7 +3,6 @@
# Dependencies:
# ``functions`` file
# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
@@ -28,11 +27,12 @@
SAVANNA_DIR=$DEST/savanna
SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
SAVANNA_CONF_FILE=savanna.conf
-ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
-ADMIN_NAME=${ADMIN_NAME:-admin}
-ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova}
SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
+SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
+SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
+SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
# Support entry points installation of console scripts
if [[ -d $SAVANNA_DIR/bin ]]; then
SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
@@ -43,6 +43,42 @@
# Functions
# ---------
+# create_savanna_accounts() - Set up common required savanna accounts
+#
+# Tenant User Roles
+# ------------------------------
+# service savanna admin
+function create_savanna_accounts() {
+
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ SAVANNA_USER=$(keystone user-create \
+ --name=savanna \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=savanna@example.com \
+ | grep " id " | get_field 2)
+ keystone user-role-add \
+ --tenant-id $SERVICE_TENANT \
+ --user-id $SAVANNA_USER \
+ --role-id $ADMIN_ROLE
+
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ SAVANNA_SERVICE=$(keystone service-create \
+ --name=savanna \
+ --type=data_processing \
+ --description="Savanna Data Processing" \
+ | grep " id " | get_field 2)
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $SAVANNA_SERVICE \
+ --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+ --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+ fi
+}
+
# configure_savanna() - Set config files, create data dirs, etc
function configure_savanna() {
@@ -54,9 +90,9 @@
# Copy over savanna configuration file and configure common parameters.
cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME
- iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
+ iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
recreate_database savanna utf8
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
index e967622..7713a78 100644
--- a/lib/savanna-dashboard
+++ b/lib/savanna-dashboard
@@ -29,7 +29,7 @@
SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard
+SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
# Functions
diff --git a/lib/swift b/lib/swift
index 40722ab..3bf2b78 100644
--- a/lib/swift
+++ b/lib/swift
@@ -316,6 +316,7 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
@@ -339,6 +340,7 @@
auth_port = ${KEYSTONE_AUTH_PORT}
auth_host = ${KEYSTONE_AUTH_HOST}
auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+cafile = ${KEYSTONE_SSL_CA}
auth_token = ${SERVICE_TOKEN}
admin_token = ${SERVICE_TOKEN}
@@ -376,6 +378,9 @@
iniuncomment ${swift_node_config} DEFAULT log_facility
iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+ iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
+
iniuncomment ${swift_node_config} DEFAULT disable_fallocate
iniset ${swift_node_config} DEFAULT disable_fallocate true
@@ -524,14 +529,19 @@
fi
SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
}
diff --git a/lib/tempest b/lib/tempest
index d2e3b0a..aa06791 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,6 +15,7 @@
# - ``PUBLIC_NETWORK_NAME``
# - ``Q_USE_NAMESPACE``
# - ``Q_ROUTER_NAME``
+# - ``Q_L3_ENABLED``
# - ``VIRT_DRIVER``
# - ``LIBVIRT_TYPE``
# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -146,12 +147,21 @@
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
available_flavors=$(nova flavor-list)
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
- nova flavor-create m1.nano 42 64 0 1
+ if is_arch "ppc64"; then
+ # qemu needs at least 128MB of memory to boot on ppc64
+ nova flavor-create m1.nano 42 128 0 1
+ else
+ nova flavor-create m1.nano 42 64 0 1
+ fi
fi
flavor_ref=42
boto_instance_type=m1.nano
if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
- nova flavor-create m1.micro 84 128 0 1
+ if is_arch "ppc64"; then
+ nova flavor-create m1.micro 84 256 0 1
+ else
+ nova flavor-create m1.micro 84 128 0 1
+ fi
fi
flavor_ref_alt=84
else
@@ -193,14 +203,16 @@
if [ "$Q_USE_NAMESPACE" != "False" ]; then
tenant_networks_reachable=false
- ssh_connect_method="floating"
+ if ! is_service_enabled n-net; then
+ ssh_connect_method="floating"
+ fi
else
tenant_networks_reachable=true
fi
ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
- if is_service_enabled q-l3; then
+ if [ "$Q_L3_ENABLED" = "True" ]; then
public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
awk '{print $2}')
if [ "$Q_USE_NAMESPACE" == "False" ]; then
diff --git a/lib/tls b/lib/tls
index a1a7fdd..6134fa1 100644
--- a/lib/tls
+++ b/lib/tls
@@ -22,7 +22,8 @@
# - make_int_ca
# - new_cert $INT_CA_DIR int-server "abc"
# - start_tls_proxy HOST_IP 5000 localhost 5000
-
+# - ensure_certificates
+# - is_ssl_enabled_service
# Defaults
# --------
@@ -309,6 +310,53 @@
}
+# Certificate Input Configuration
+# ===============================
+
+# check to see if the service(s) specified are to be SSL enabled.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# Uses global ``SSL_ENABLED_SERVICES``
+function is_ssl_enabled_service() {
+ services=$@
+ for service in ${services}; do
+ [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ done
+ return 1
+}
+
+
+# Ensure that the certificates for a service are in place. This function does
+# not check that a service is SSL enabled, this should already have been
+# completed.
+#
+# The function expects to find a certificate, key and CA certificate in the
+# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For
+# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
+# KEYSTONE_SSL_CA. If it does not find these certificates the program will
+# quit.
+function ensure_certificates() {
+ local service=$1
+
+ local cert_var="${service}_SSL_CERT"
+ local key_var="${service}_SSL_KEY"
+ local ca_var="${service}_SSL_CA"
+
+ local cert=${!cert_var}
+ local key=${!key_var}
+ local ca=${!ca_var}
+
+ if [[ !($cert && $key && $ca) ]]; then
+ die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
+ "variable to enable SSL for ${service}"
+ fi
+
+ cat $ca >> $SSL_BUNDLE_FILE
+}
+
+
# Proxy Functions
# ===============
diff --git a/lib/trove b/lib/trove
index c40006b..6d5a56e 100644
--- a/lib/trove
+++ b/lib/trove
@@ -29,10 +29,20 @@
TROVECLIENT_DIR=$DEST/python-troveclient
TROVE_CONF_DIR=/etc/trove
TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
-TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_BIN_DIR=/usr/local/bin
+# setup_trove_logging() - Adds logging configuration to conf files
+function setup_trove_logging() {
+ local CONF=$1
+ iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $CONF DEFAULT use_syslog $SYSLOG
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ # Add color to logging output
+ setup_colorized_logging $CONF DEFAULT tenant user
+ fi
+}
+
# create_trove_accounts() - Set up common required trove accounts
# Tenant User Roles
@@ -102,6 +112,7 @@
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD
@@ -121,8 +132,13 @@
iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove
sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+ setup_trove_logging $TROVE_CONF_DIR/trove.conf
+ setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
+ TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
+
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove`
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
@@ -130,6 +146,7 @@
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
fi
# (Re)create trove conductor conf file if needed
@@ -141,6 +158,7 @@
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove
+ setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf
fi
}
diff --git a/openrc b/openrc
index 804bb3f..784b00e 100644
--- a/openrc
+++ b/openrc
@@ -58,6 +58,7 @@
HOST_IP=${HOST_IP:-127.0.0.1}
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
# Some exercises call glance directly. On a single-node installation, Glance
# should be listening on HOST_IP. If its running elsewhere, it can be set here
@@ -71,10 +72,10 @@
# the user/tenant has access to - including nova, glance, keystone, swift, ...
# We currently recommend using the 2.0 *identity api*.
#
-export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
+export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
# Set the pointer to our CA certificate chain. Harmless if TLS is not used.
-export OS_CACERT=$INT_CA_DIR/ca-chain.pem
+export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem}
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
diff --git a/stack.sh b/stack.sh
index 825373e..22d184e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -290,6 +290,10 @@
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+# Reset the bundle of CA certificates
+SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
+rm -f $SSL_BUNDLE_FILE
+
# Configure Projects
# ==================
@@ -799,6 +803,17 @@
restart_rpc_backend
+# Export Certicate Authority Bundle
+# ---------------------------------
+
+# If certificates were used and written to the SSL bundle file then these
+# should be exported so clients can validate their connections.
+
+if [ -f $SSL_BUNDLE_FILE ]; then
+ export OS_CACERT=$SSL_BUNDLE_FILE
+fi
+
+
# Configure database
# ------------------
@@ -1065,8 +1080,10 @@
# Create an access key and secret key for nova ec2 register image
if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+ die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova"
NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
- CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
+ die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME"
+ CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID)
ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY"
@@ -1146,6 +1163,7 @@
start_trove
fi
+
# Create account rc files
# =======================
@@ -1154,7 +1172,13 @@
# which is helpful in image bundle steps.
if is_service_enabled nova && is_service_enabled key; then
- $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc
+ USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+
+ if [ -f $SSL_BUNDLE_FILE ]; then
+ USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
+ fi
+
+ $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS
fi
@@ -1230,7 +1254,7 @@
CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
- SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do
+ SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do
echo $i=${!i} >>$TOP_DIR/.stackenv
done
@@ -1253,6 +1277,13 @@
done
fi
+# Local Configuration
+# ===================
+
+# Apply configuration from local.conf if it exists for layer 2 services
+# Phase: post-extra
+merge_config_group $TOP_DIR/local.conf post-extra
+
# Run local script
# ================
diff --git a/stackrc b/stackrc
index 7eda5a5..410f9d8 100644
--- a/stackrc
+++ b/stackrc
@@ -178,7 +178,7 @@
BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
# ryu service
diff --git a/tests/functions.sh b/tests/functions.sh
index 40376aa..95dafe1 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -38,195 +38,6 @@
fi
-echo "Testing INI functions"
-
-cat >test.ini <<EOF
-[default]
-# comment an option
-#log_file=./log.conf
-log_file=/etc/log.conf
-handlers=do not disturb
-
-[aaa]
-# the commented option should not change
-#handlers=cc,dd
-handlers = aa, bb
-
-[bbb]
-handlers=ee,ff
-
-[ ccc ]
-spaces = yes
-
-[ddd]
-empty =
-
-[eee]
-multi = foo1
-multi = foo2
-EOF
-
-# Test with spaces
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ "$VAL" == "aa, bb" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini aaa handlers "11, 22"
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ "$VAL" == "11, 22" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test with spaces in section header
-
-VAL=$(iniget test.ini " ccc " spaces)
-if [[ "$VAL" == "yes" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini "b b" opt_ion 42
-
-VAL=$(iniget test.ini "b b" opt_ion)
-if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test without spaces, end of file
-
-VAL=$(iniget test.ini bbb handlers)
-if [[ "$VAL" == "ee,ff" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini bbb handlers "33,44"
-
-VAL=$(iniget test.ini bbb handlers)
-if [[ "$VAL" == "33,44" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# test empty option
-if ini_has_option test.ini ddd empty; then
- echo "OK: ddd.empty present"
-else
- echo "ini_has_option failed: ddd.empty not found"
-fi
-
-# test non-empty option
-if ini_has_option test.ini bbb handlers; then
- echo "OK: bbb.handlers present"
-else
- echo "ini_has_option failed: bbb.handlers not found"
-fi
-
-# test changing empty option
-iniset test.ini ddd empty "42"
-
-VAL=$(iniget test.ini ddd empty)
-if [[ "$VAL" == "42" ]]; then
- echo "OK: $VAL"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test section not exist
-
-VAL=$(iniget test.ini zzz handlers)
-if [[ -z "$VAL" ]]; then
- echo "OK: zzz not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-iniset test.ini zzz handlers "999"
-
-VAL=$(iniget test.ini zzz handlers)
-if [[ -n "$VAL" ]]; then
- echo "OK: zzz not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test option not exist
-
-VAL=$(iniget test.ini aaa debug)
-if [[ -z "$VAL" ]]; then
- echo "OK aaa.debug not present"
-else
- echo "iniget failed: $VAL"
-fi
-
-if ! ini_has_option test.ini aaa debug; then
- echo "OK aaa.debug not present"
-else
- echo "ini_has_option failed: aaa.debug"
-fi
-
-iniset test.ini aaa debug "999"
-
-VAL=$(iniget test.ini aaa debug)
-if [[ -n "$VAL" ]]; then
- echo "OK aaa.debug present"
-else
- echo "iniget failed: $VAL"
-fi
-
-# Test comments
-
-inicomment test.ini aaa handlers
-
-VAL=$(iniget test.ini aaa handlers)
-if [[ -z "$VAL" ]]; then
- echo "OK"
-else
- echo "inicomment failed: $VAL"
-fi
-
-# Test multiple line iniset/iniget
-iniset_multiline test.ini eee multi bar1 bar2
-
-VAL=$(iniget_multiline test.ini eee multi)
-if [[ "$VAL" == "bar1 bar2" ]]; then
- echo "OK: iniset_multiline"
-else
- echo "iniset_multiline failed: $VAL"
-fi
-
-# Test iniadd with exiting values
-iniadd test.ini eee multi bar3
-VAL=$(iniget_multiline test.ini eee multi)
-if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
- echo "OK: iniadd"
-else
- echo "iniadd failed: $VAL"
-fi
-
-# Test iniadd with non-exiting values
-iniadd test.ini eee non-multi foobar1 foobar2
-VAL=$(iniget_multiline test.ini eee non-multi)
-if [[ "$VAL" == "foobar1 foobar2" ]]; then
- echo "OK: iniadd with non-exiting value"
-else
- echo "iniadd with non-exsting failed: $VAL"
-fi
-
-rm test.ini
-
# Enabling/disabling services
echo "Testing enable_service()"
diff --git a/tests/test_config.sh b/tests/test_config.sh
index fed2e7d..39603c9 100755
--- a/tests/test_config.sh
+++ b/tests/test_config.sh
@@ -70,6 +70,12 @@
[[test1|test1c.conf]]
$TEST_1C_ADD
+
+[[test3|test-space.conf]]
+[DEFAULT]
+attribute=value
+
+# the above line has a single space
EOF
@@ -176,4 +182,14 @@
echo "failed: $VAL != $EXPECT_VAL"
fi
-rm -f test.conf test1c.conf test2a.conf
+echo -n "merge_config_file test-space: "
+rm -f test-space.conf
+merge_config_file test.conf test3 test-space.conf
+VAL=$(cat test-space.conf)
+# iniset adds a blank line if it creates the file...
+EXPECT_VAL="
+[DEFAULT]
+attribute = value"
+check_result "$VAL" "$EXPECT_VAL"
+
+rm -f test.conf test1c.conf test2a.conf test-space.conf
diff --git a/tests/test_ini.sh b/tests/test_ini.sh
new file mode 100755
index 0000000..598cd57
--- /dev/null
+++ b/tests/test_ini.sh
@@ -0,0 +1,240 @@
+#!/usr/bin/env bash
+
+# Tests for DevStack INI functions
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+
+
+echo "Testing INI functions"
+
+cat >test.ini <<EOF
+[default]
+# comment an option
+#log_file=./log.conf
+log_file=/etc/log.conf
+handlers=do not disturb
+
+[aaa]
+# the commented option should not change
+#handlers=cc,dd
+handlers = aa, bb
+
+[bbb]
+handlers=ee,ff
+
+[ ccc ]
+spaces = yes
+
+[ddd]
+empty =
+
+[eee]
+multi = foo1
+multi = foo2
+EOF
+
+# Test with missing arguments
+
+BEFORE=$(cat test.ini)
+
+echo -n "iniset: test missing attribute argument: "
+iniset test.ini aaa
+NO_ATTRIBUTE=$(cat test.ini)
+if [[ "$BEFORE" == "$NO_ATTRIBUTE" ]]; then
+ echo "OK"
+else
+ echo "failed"
+fi
+
+echo -n "iniset: test missing section argument: "
+iniset test.ini
+NO_SECTION=$(cat test.ini)
+if [[ "$BEFORE" == "$NO_SECTION" ]]; then
+ echo "OK"
+else
+ echo "failed"
+fi
+
+# Test with spaces
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "aa, bb" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini aaa handlers "11, 22"
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "11, 22" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test with spaces in section header
+
+VAL=$(iniget test.ini " ccc " spaces)
+if [[ "$VAL" == "yes" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini "b b" opt_ion 42
+
+VAL=$(iniget test.ini "b b" opt_ion)
+if [[ "$VAL" == "42" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test without spaces, end of file
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "ee,ff" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini bbb handlers "33,44"
+
+VAL=$(iniget test.ini bbb handlers)
+if [[ "$VAL" == "33,44" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test empty option
+if ini_has_option test.ini ddd empty; then
+ echo "OK: ddd.empty present"
+else
+ echo "ini_has_option failed: ddd.empty not found"
+fi
+
+# test non-empty option
+if ini_has_option test.ini bbb handlers; then
+ echo "OK: bbb.handlers present"
+else
+ echo "ini_has_option failed: bbb.handlers not found"
+fi
+
+# test changing empty option
+iniset test.ini ddd empty "42"
+
+VAL=$(iniget test.ini ddd empty)
+if [[ "$VAL" == "42" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test pipe in option
+iniset test.ini aaa handlers "a|b"
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ "$VAL" == "a|b" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# test space in option
+iniset test.ini aaa handlers "a b"
+
+VAL="$(iniget test.ini aaa handlers)"
+if [[ "$VAL" == "a b" ]]; then
+ echo "OK: $VAL"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test section not exist
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -z "$VAL" ]]; then
+ echo "OK: zzz not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+iniset test.ini zzz handlers "999"
+
+VAL=$(iniget test.ini zzz handlers)
+if [[ -n "$VAL" ]]; then
+ echo "OK: zzz not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test option not exist
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -z "$VAL" ]]; then
+ echo "OK aaa.debug not present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+if ! ini_has_option test.ini aaa debug; then
+ echo "OK aaa.debug not present"
+else
+ echo "ini_has_option failed: aaa.debug"
+fi
+
+iniset test.ini aaa debug "999"
+
+VAL=$(iniget test.ini aaa debug)
+if [[ -n "$VAL" ]]; then
+ echo "OK aaa.debug present"
+else
+ echo "iniget failed: $VAL"
+fi
+
+# Test comments
+
+inicomment test.ini aaa handlers
+
+VAL=$(iniget test.ini aaa handlers)
+if [[ -z "$VAL" ]]; then
+ echo "OK"
+else
+ echo "inicomment failed: $VAL"
+fi
+
+# Test multiple line iniset/iniget
+iniset_multiline test.ini eee multi bar1 bar2
+
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2" ]]; then
+ echo "OK: iniset_multiline"
+else
+ echo "iniset_multiline failed: $VAL"
+fi
+
+# Test iniadd with exiting values
+iniadd test.ini eee multi bar3
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
+ echo "OK: iniadd"
+else
+ echo "iniadd failed: $VAL"
+fi
+
+# Test iniadd with non-exiting values
+iniadd test.ini eee non-multi foobar1 foobar2
+VAL=$(iniget_multiline test.ini eee non-multi)
+if [[ "$VAL" == "foobar1 foobar2" ]]; then
+ echo "OK: iniadd with non-exiting value"
+else
+ echo "iniadd with non-exsting failed: $VAL"
+fi
+
+rm test.ini
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 8383fe7..5f4c486 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -43,6 +43,7 @@
--os-tenant-name <tenant_name>
--os-tenant-id <tenant_id>
--os-auth-url <auth_url>
+--os-cacert <cert file>
--target-dir <target_directory>
--skip-tenant <tenant-name>
--debug
@@ -53,7 +54,7 @@
EOF
}
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@")
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@")
then
#parse error
display_help
@@ -80,6 +81,7 @@
--os-tenant-id) export OS_TENANT_ID=$2; shift ;;
--skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
--os-auth-url) export OS_AUTH_URL=$2; shift ;;
+ --os-cacert) export OS_CACERT=$2; shift ;;
--target-dir) ACCOUNT_DIR=$2; shift ;;
--debug) set -o xtrace ;;
-u) MODE=${MODE:-one}; USER_NAME=$2; shift ;;
@@ -201,6 +203,7 @@
# Openstack Tenant ID = $tenant_id
export OS_TENANT_NAME="$tenant_name"
export OS_AUTH_URL="$OS_AUTH_URL"
+export OS_CACERT="$OS_CACERT"
export EC2_CERT="$ec2_cert"
export EC2_PRIVATE_KEY="$ec2_private_key"
export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 6b9b25e..a65a77e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -67,7 +67,7 @@
curl -O $PIP_GET_PIP_URL; \
)
fi
- sudo python $FILES/get-pip.py
+ sudo -E python $FILES/get-pip.py
}
function install_pip_tarball() {
@@ -75,7 +75,7 @@
curl -O $PIP_TAR_URL; \
tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
cd pip-$INSTALL_PIP_VERSION; \
- sudo python setup.py install 1>/dev/null; \
+ sudo -E python setup.py install 1>/dev/null; \
)
}
diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh
index b49ce9f..a7e635c 100755
--- a/tools/jenkins/adapters/euca.sh
+++ b/tools/jenkins/adapters/euca.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh'
diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh
index a97f935..8da1eeb 100755
--- a/tools/jenkins/adapters/floating_ips.sh
+++ b/tools/jenkins/adapters/floating_ips.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
index ec29209..0a0b6c0 100755
--- a/tools/jenkins/adapters/volumes.sh
+++ b/tools/jenkins/adapters/volumes.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh'
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index d0cdf17..958102b 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -93,13 +93,48 @@
tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack
cd $TOP_DIR
-# Run devstack on launch
-cat <<EOF >$STAGING_DIR/etc/rc.local
-# network restart required for getting the right gateway
-/etc/init.d/networking restart
-chown -R $STACK_USER /opt/stack
-su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER
-exit 0
+# Create an upstart job (task) for devstack, which can interact with the console
+cat >$STAGING_DIR/etc/init/devstack.conf << EOF
+start on stopped rc RUNLEVEL=[2345]
+
+console output
+task
+
+pre-start script
+ rm -f /var/run/devstack.succeeded
+end script
+
+script
+ initctl stop hvc0 || true
+
+ # Read any leftover characters from standard input
+ while read -n 1 -s -t 0.1 -r ignored; do
+ true
+ done
+
+ clear
+
+ chown -R $STACK_USER /opt/stack
+
+ if su -c "/opt/stack/run.sh" $STACK_USER; then
+ touch /var/run/devstack.succeeded
+ fi
+
+ # Update /etc/issue
+ {
+ echo "OpenStack VM - Installed by DevStack"
+ IPADDR=\$(ip -4 address show eth0 | sed -n 's/.*inet \\([0-9\.]\\+\\).*/\1/p')
+ echo " Management IP: \$IPADDR"
+ echo -n " Devstack run: "
+ if [ -e /var/run/devstack.succeeded ]; then
+ echo "SUCCEEDED"
+ else
+ echo "FAILED"
+ fi
+ echo ""
+ } > /etc/issue
+ initctl start hvc0 > /dev/null 2>&1
+end script
EOF
# Configure the hostname
@@ -138,8 +173,9 @@
# Configure run.sh
cat <<EOF >$STAGING_DIR/opt/stack/run.sh
#!/bin/bash
+set -eux
cd /opt/stack/devstack
-killall screen
-VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh
+./unstack.sh || true
+./stack.sh
EOF
chmod 755 $STAGING_DIR/opt/stack/run.sh
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 6ce334b..41b184c 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -367,25 +367,20 @@
if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then
set +x
- echo "VM Launched - Waiting for startup script"
- # wait for log to appear
- while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do
+ echo "VM Launched - Waiting for devstack to start"
+ while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
done
- echo -n "Running"
- while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ]
- do
+ echo -n "devstack is running"
+ while ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
echo -n "."
done
echo "done!"
set -x
- # output the run.sh.log
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log'
-
- # Fail if the expected text is not found
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in'
+ # Fail if devstack did not succeed
+ ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /var/run/devstack.succeeded'
set +x
echo "################################################################################"
@@ -399,11 +394,12 @@
echo ""
echo "All Finished!"
echo "Now, you can monitor the progress of the stack.sh installation by "
- echo "tailing /opt/stack/run.sh.log from within your domU."
+ echo "looking at the console of your domU / checking the log files."
echo ""
echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
- echo "and then do: 'tail -f /opt/stack/run.sh.log'"
+ echo "and then do: 'sudo service devstack status' to check if devstack is still running."
+ echo "Check that /var/run/devstack.succeeded exists"
echo ""
- echo "When the script completes, you can then visit the OpenStack Dashboard"
+ echo "When devstack completes, you can visit the OpenStack Dashboard"
echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
fi
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index bdcaf99..c0ea3bc 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -13,7 +13,13 @@
# Size of image
VDI_MB=${VDI_MB:-5000}
-OSDOMU_MEM_MB=3072
+
+# Devstack now contains many components. 3GB ram is not enough to prevent
+# swapping and memory fragmentation - the latter of which can cause failures
+# such as blkfront failing to plug a VBD and lead to random test fails.
+#
+# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs
+OSDOMU_MEM_MB=4096
OSDOMU_VDI_GB=8
# Network mapping. Specify bridge names or network names. Network names may