Merge "Make tempest L3 capable plugin aware."
diff --git a/.gitignore b/.gitignore
index a3d5b0d..49eb188 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
proto
*~
-.*.sw[nop]
+.*.sw?
*.log
*.log.[1-9]
src
diff --git a/README.md b/README.md
index cb7752d..9914b1e 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@
`stackrc` for the default set). Usually just before a release there will be
milestone-proposed branches that need to be tested::
- GLANCE_REPO=https://github.com/openstack/glance.git
+ GLANCE_REPO=git://git.openstack.org/openstack/glance.git
GLANCE_BRANCH=milestone-proposed
# Start A Dev Cloud
@@ -327,6 +327,7 @@
* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced
* **post-config** - runs after the layer 2 services are configured and before they are started
* **extra** - runs after services are started and before any files in ``extra.d`` are executed
+* **post-extra** - runs after files in ``extra.d`` are executed
The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to appear in the file will be used.
diff --git a/clean.sh b/clean.sh
index 395941a..480a812 100755
--- a/clean.sh
+++ b/clean.sh
@@ -15,6 +15,8 @@
# Import common functions
source $TOP_DIR/functions
+FILES=$TOP_DIR/files
+
# Load local configuration
source $TOP_DIR/stackrc
@@ -84,6 +86,10 @@
cleanup_neutron
cleanup_swift
+if is_service_enabled ldap; then
+ cleanup_ldap
+fi
+
# Do the hypervisor cleanup until this can be moved back into lib/nova
if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
cleanup_nova_hypervisor
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 96241f9..1b1ac06 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -67,7 +67,10 @@
exit_if_aggregate_present $AGGREGATE_NAME
AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE_ID "Failure creating AGGREGATE_ID for $AGGREGATE_NAME $AGGREGATE_A_ZONE"
+
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
+die_if_not_set $LINENO AGGREGATE2_ID "Fail creating AGGREGATE2_ID for $AGGREGATE2_NAME $AGGREGATE_A_ZONE"
# check aggregate created
nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 1a1608c..4d71d49 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -114,6 +114,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
diff --git a/exercises/marconi.sh b/exercises/marconi.sh
new file mode 100755
index 0000000..1b9788d
--- /dev/null
+++ b/exercises/marconi.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# **marconi.sh**
+
+# Sanity check that Marconi started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled marconi-server || exit 55
+
+curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Marconi API not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 7dfa5dc..28e0a3d 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -139,24 +139,28 @@
function get_image_id {
local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
echo "$IMAGE_ID"
}
function get_tenant_id {
local TENANT_NAME=$1
local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
echo "$TENANT_ID"
}
function get_user_id {
local USER_NAME=$1
local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
echo "$USER_ID"
}
function get_role_id {
local ROLE_NAME=$1
local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'`
+ die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
echo "$ROLE_ID"
}
@@ -169,6 +173,7 @@
function get_flavor_id {
local INSTANCE_TYPE=$1
local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
echo "$FLAVOR_ID"
}
@@ -234,6 +239,7 @@
local TENANT_ID=$(get_tenant_id $TENANT)
source $TOP_DIR/openrc $TENANT $TENANT
local NET_ID=$(neutron net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
neutron subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
neutron-debug probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 7d80570..eb32cc7 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -56,6 +56,7 @@
# Check to make sure rules were added
SEC_GROUP_RULES=( $(nova secgroup-list-rules $SEC_GROUP_NAME | grep -v \- | grep -v 'Source Group' | cut -d '|' -f3 | tr -d ' ') )
+die_if_not_set $LINENO SEC_GROUP_RULES "Failure retrieving SEC_GROUP_RULES for $SEC_GROUP_NAME"
for i in "${RULES_TO_ADD[@]}"; do
skip=
for j in "${SEC_GROUP_RULES[@]}"; do
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 9ee9fa9..77fa4eb 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -118,6 +118,7 @@
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+ die_if_not_set $LINENO INSTANCE_TYPE "Failure retrieving INSTANCE_TYPE"
fi
# Clean-up from previous runs
diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh
new file mode 100644
index 0000000..a96a4c5
--- /dev/null
+++ b/extras.d/70-marconi.sh
@@ -0,0 +1,29 @@
+# marconi.sh - Devstack extras script to install Marconi
+
+if is_service_enabled marconi-server; then
+ if [[ "$1" == "source" ]]; then
+ # Initial source
+ source $TOP_DIR/lib/marconi
+ elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+ echo_summary "Installing Marconi"
+ install_marconiclient
+ install_marconi
+ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+ echo_summary "Configuring Marconi"
+ configure_marconi
+ configure_marconiclient
+
+ if is_service_enabled key; then
+ create_marconi_accounts
+ fi
+
+ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+ echo_summary "Initializing Marconi"
+ init_marconi
+ start_marconi
+ fi
+
+ if [[ "$1" == "unstack" ]]; then
+ stop_marconi
+ fi
+fi
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 75b702c..0186e36 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -14,6 +14,9 @@
echo_summary "Initializing Tempest"
configure_tempest
init_tempest
+ elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+ # local.conf Tempest option overrides
+ :
fi
if [[ "$1" == "unstack" ]]; then
diff --git a/extras.d/README.md b/extras.d/README.md
index 88e4265..1dd17da 100644
--- a/extras.d/README.md
+++ b/extras.d/README.md
@@ -19,10 +19,10 @@
source: always called first in any of the scripts, used to set the
initial defaults in a lib/* script or similar
- stack: called by stack.sh. There are three possible values for
+ stack: called by stack.sh. There are four possible values for
the second arg to distinguish the phase stack.sh is in:
- arg 2: install | post-config | extra
+ arg 2: install | post-config | extra | post-extra
unstack: called by unstack.sh
diff --git a/files/apts/ldap b/files/apts/ldap
index 81a00f2..26f7aef 100644
--- a/files/apts/ldap
+++ b/files/apts/ldap
@@ -1,3 +1,3 @@
ldap-utils
-slapd # NOPRIME
+slapd
python-ldap
diff --git a/files/apts/marconi-server b/files/apts/marconi-server
new file mode 100644
index 0000000..bc7ef22
--- /dev/null
+++ b/files/apts/marconi-server
@@ -0,0 +1,3 @@
+python-pymongo
+mongodb-server
+pkg-config
diff --git a/files/ldap/keystone.ldif.in b/files/ldap/keystone.ldif.in
new file mode 100644
index 0000000..cf51907
--- /dev/null
+++ b/files/ldap/keystone.ldif.in
@@ -0,0 +1,26 @@
+dn: ${BASE_DN}
+objectClass: dcObject
+objectClass: organizationalUnit
+dc: ${BASE_DC}
+ou: ${BASE_DC}
+
+dn: ou=UserGroups,${BASE_DN}
+objectClass: organizationalUnit
+ou: UserGroups
+
+dn: ou=Users,${BASE_DN}
+objectClass: organizationalUnit
+ou: Users
+
+dn: ou=Roles,${BASE_DN}
+objectClass: organizationalUnit
+ou: Roles
+
+dn: ou=Projects,${BASE_DN}
+objectClass: organizationalUnit
+ou: Projects
+
+dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,${BASE_DN}
+objectClass: organizationalRole
+ou: _member_
+cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in
index e522150..de3b69d 100644
--- a/files/ldap/manager.ldif.in
+++ b/files/ldap/manager.ldif.in
@@ -1,10 +1,15 @@
dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config
changetype: modify
replace: olcSuffix
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
-
replace: olcRootDN
-olcRootDN: dc=Manager,dc=openstack,dc=org
+olcRootDN: ${MANAGER_DN}
-
${LDAP_ROOTPW_COMMAND}: olcRootPW
olcRootPW: ${SLAPPASS}
+-
+replace: olcDbIndex
+olcDbIndex: objectClass eq
+olcDbIndex: default pres,eq
+olcDbIndex: cn,sn,givenName,co
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
deleted file mode 100644
index 02caf3f..0000000
--- a/files/ldap/openstack.ldif
+++ /dev/null
@@ -1,26 +0,0 @@
-dn: dc=openstack,dc=org
-dc: openstack
-objectClass: dcObject
-objectClass: organizationalUnit
-ou: openstack
-
-dn: ou=UserGroups,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: UserGroups
-
-dn: ou=Users,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Users
-
-dn: ou=Roles,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Roles
-
-dn: ou=Projects,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Projects
-
-dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
-objectClass: organizationalRole
-ou: _member_
-cn: 9fe2ff9ee4384b1894a90878d3e92bab
diff --git a/files/ldap/base-config.ldif b/files/ldap/suse-base-config.ldif.in
similarity index 77%
rename from files/ldap/base-config.ldif
rename to files/ldap/suse-base-config.ldif.in
index 026d8bc..00256ee 100644
--- a/files/ldap/base-config.ldif
+++ b/files/ldap/suse-base-config.ldif.in
@@ -12,8 +12,10 @@
cn: schema
include: file:///etc/openldap/schema/core.ldif
+include: file:///etc/openldap/schema/cosine.ldif
+include: file:///etc/openldap/schema/inetorgperson.ldif
dn: olcDatabase={1}hdb,cn=config
objectClass: olcHdbConfig
olcDbDirectory: /var/lib/ldap
-olcSuffix: dc=openstack,dc=org
+olcSuffix: ${BASE_DN}
diff --git a/files/rpms/marconi-server b/files/rpms/marconi-server
new file mode 100644
index 0000000..d7b7ea8
--- /dev/null
+++ b/files/rpms/marconi-server
@@ -0,0 +1,3 @@
+selinux-policy-targeted
+mongodb-server
+pymongo
diff --git a/functions b/functions
index 6137aaf..5ff4a9b 100644
--- a/functions
+++ b/functions
@@ -554,7 +554,7 @@
function is_arch {
ARCH_TYPE=$1
- [ "($uname -m)" = "$ARCH_TYPE" ]
+ [[ "$(uname -m)" == "$ARCH_TYPE" ]]
}
# Checks if installed Apache is <= given version
@@ -1351,10 +1351,9 @@
# Create a directory for the downloaded image tarballs.
mkdir -p $FILES/images
-
+ IMAGE_FNAME=`basename "$image_url"`
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+aki style), then extracts it.
- IMAGE_FNAME=`basename "$image_url"`
if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
wget -c $image_url -O $FILES/$IMAGE_FNAME
if [[ $? -ne 0 ]]; then
@@ -1410,13 +1409,92 @@
vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
vmdk_create_type="${vmdk_create_type#*\"}"
vmdk_create_type="${vmdk_create_type%?}"
+
+ descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
+ `"should use a descriptor-data pair."
if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
vmdk_disktype="sparse"
- elif [[ "$vmdk_create_type" = "monolithicFlat" ]]; then
- die $LINENO "Monolithic flat disks should use a descriptor-data pair." \
- "Please provide the disk and not the descriptor."
+ elif [[ "$vmdk_create_type" = "monolithicFlat" || \
+ "$vmdk_create_type" = "vmfs" ]]; then
+ # Attempt to retrieve the *-flat.vmdk
+ flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+ flat_fname="${flat_fname#*\"}"
+ flat_fname="${flat_fname%?}"
+ if [[ -z "$flat_name" ]]; then
+ flat_fname="$IMAGE_NAME-flat.vmdk"
+ fi
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_url="${image_url:0:$path_len}$flat_fname"
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the *-flat.vmdk: $flat_url"
+ if [[ $flat_url != file* ]]; then
+ if [[ ! -f $FILES/$flat_fname || \
+ "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
+ wget -c $flat_url -O $FILES/$flat_fname
+ if [[ $? -ne 0 ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ fi
+ if $flat_found; then
+ IMAGE="$FILES/${flat_fname}"
+ fi
+ else
+ IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
+ if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+ echo "Flat disk not found: $flat_url"
+ flat_found=false
+ fi
+ if ! $flat_found; then
+ IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+ fi
+ fi
+ if $flat_found; then
+ IMAGE_NAME="${flat_fname}"
+ fi
+ vmdk_disktype="preallocated"
+ elif [[ -z "$vmdk_create_type" ]]; then
+ # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
+ # to retrieve appropriate metadata
+ if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
+ warn $LINENO "Expected filename suffix: '-flat'."`
+ `" Filename provided: ${IMAGE_NAME}"
+ else
+ descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+ path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+ flat_path="${image_url:0:$path_len}"
+ descriptor_url=$flat_path$descriptor_fname
+ warn $LINENO "$descriptor_data_pair_msg"`
+ `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+ if [[ $flat_path != file* ]]; then
+ if [[ ! -f $FILES/$descriptor_fname || \
+ "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+ wget -c $descriptor_url -O $FILES/$descriptor_fname
+ if [[ $? -ne 0 ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ descriptor_url="$FILES/$descriptor_fname"
+ else
+ descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+ if [[ ! -f $descriptor_url || \
+ "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+ warn $LINENO "Descriptor not found $descriptor_url"
+ descriptor_found=false
+ fi
+ fi
+ if $descriptor_found; then
+ vmdk_adapter_type="$(head -25 $descriptor_url |"`
+ `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
+ vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+ vmdk_adapter_type="${vmdk_adapter_type%?}"
+ fi
+ fi
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+ vmdk_disktype="preallocated"
else
- #TODO(alegendre): handle streamOptimized once supported by VMware driver.
+ #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
vmdk_disktype="preallocated"
fi
@@ -1510,11 +1588,15 @@
*) echo "Do not know what to do with $IMAGE_FNAME"; false;;
esac
+ if is_arch "ppc64"; then
+ IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi"
+ fi
+
if [ "$CONTAINER_FORMAT" = "bare" ]; then
if [ "$UNPACK" = "zcat" ]; then
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
else
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -1522,12 +1604,12 @@
# kernel for use when uploading the root filesystem.
KERNEL_ID=""; RAMDISK_ID="";
if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
fi
if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
fi
}
diff --git a/lib/ceilometer b/lib/ceilometer
index 8e2970c..fac3be1 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -3,7 +3,7 @@
# To enable a minimal set of Ceilometer services, add the following to localrc:
#
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
#
# To ensure Ceilometer alarming services are enabled also, further add to the localrc:
#
@@ -145,6 +145,7 @@
screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
fi
screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
@@ -160,7 +161,7 @@
# stop_ceilometer() - Stop running processes
function stop_ceilometer() {
# Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
+ for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
diff --git a/lib/cinder b/lib/cinder
index 96d2505..9288685 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -209,6 +209,7 @@
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port
inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol
+ inicomment $CINDER_API_PASTE_INI filter:authtoken cafile
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user
inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
@@ -219,6 +220,7 @@
iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CINDER_CONF keystone_authtoken admin_user cinder
iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
diff --git a/lib/glance b/lib/glance
index f40b1a7..2e29a8f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -82,6 +82,7 @@
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
@@ -99,6 +100,7 @@
iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
diff --git a/lib/heat b/lib/heat
index 7a9ef0d..e44a618 100644
--- a/lib/heat
+++ b/lib/heat
@@ -96,6 +96,7 @@
iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $HEAT_CONF keystone_authtoken admin_user heat
iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
diff --git a/lib/ironic b/lib/ironic
index 9f86e84..099746a 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -98,6 +98,7 @@
iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
diff --git a/lib/keystone b/lib/keystone
index c1fa0af..712a509 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -4,6 +4,7 @@
# Dependencies:
#
# - ``functions`` file
+# - ``tls`` file
# - ``DEST``, ``STACK_USER``
# - ``IDENTITY_API_VERSION``
# - ``BASE_SQL_CONN``
@@ -79,6 +80,13 @@
# valid assignment backends as per dir keystone/identity/backends
KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
+# if we are running with SSL use https protocols
+if is_ssl_enabled_service "key"; then
+ KEYSTONE_AUTH_PROTOCOL="https"
+ KEYSTONE_SERVICE_PROTOCOL="https"
+fi
+
+
# Functions
# ---------
# cleanup_keystone() - Remove residual data files, anything left over from previous
@@ -143,17 +151,17 @@
if is_service_enabled ldap; then
#Set all needed ldap values
- iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
- iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org"
- iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
+ iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
+ iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
iniset $KEYSTONE_CONF ldap use_dumb_member "True"
iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
- iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
- iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,dc=openstack,dc=org"
+ iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
fi
@@ -172,6 +180,15 @@
iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service key; then
+ ensure_certificates KEYSTONE
+
+ iniset $KEYSTONE_CONF ssl enable True
+ iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT
+ iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY
+ fi
+
if is_service_enabled tls-proxy; then
# Set the service ports for a proxy to take the originals
iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT
@@ -320,6 +337,10 @@
# init_keystone() - Initialize databases, etc.
function init_keystone() {
+ if is_service_enabled ldap; then
+ init_ldap
+ fi
+
# (Re)create keystone database
recreate_database keystone utf8
@@ -386,7 +407,7 @@
fi
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/ldap b/lib/ldap
index 80992a7..e4bd416 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -9,68 +9,137 @@
XTRACE=$(set +o | grep xtrace)
set +o xtrace
+
+LDAP_DOMAIN=${LDAP_DOMAIN:-openstack.org}
+# Make an array of domain components
+DC=(${LDAP_DOMAIN/./ })
+
+# Leftmost domain component used in top-level entry
+LDAP_BASE_DC=${DC[0]}
+
+# Build the base DN
+dn=""
+for dc in ${DC[*]}; do
+ dn="$dn,dc=$dc"
+done
+LDAP_BASE_DN=${dn#,}
+
+LDAP_MANAGER_DN="${LDAP_MANAGER_DN:-cn=Manager,${LDAP_BASE_DN}}"
+LDAP_URL=${LDAP_URL:-ldap://localhost}
+
LDAP_SERVICE_NAME=slapd
+if is_ubuntu; then
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=replace
+elif is_fedora; then
+ LDAP_OLCDB_NUMBER=2
+ LDAP_ROOTPW_COMMAND=add
+elif is_suse; then
+ # SUSE has slappasswd in /usr/sbin/
+ PATH=$PATH:/usr/sbin/
+ LDAP_OLCDB_NUMBER=1
+ LDAP_ROOTPW_COMMAND=add
+ LDAP_SERVICE_NAME=ldap
+fi
+
+
# Functions
# ---------
+# Perform common variable substitutions on the data files
+# _ldap_varsubst file
+function _ldap_varsubst() {
+ local infile=$1
+ sed -e "
+ s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
+ s|\${SLAPPASS}|$SLAPPASS|
+ s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|
+ s|\${BASE_DC}|$LDAP_BASE_DC|
+ s|\${BASE_DN}|$LDAP_BASE_DN|
+ s|\${MANAGER_DN}|$LDAP_MANAGER_DN|
+ " $infile
+}
+
+# clean_ldap() - Remove ldap server
+function cleanup_ldap() {
+ uninstall_package $(get_packages ldap)
+ if is_ubuntu; then
+ uninstall_package slapd ldap-utils libslp1
+ sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap
+ elif is_fedora; then
+ sudo rm -rf /etc/openldap /var/lib/ldap
+ elif is_suse; then
+ sudo rm -rf /var/lib/ldap
+ fi
+}
+
+# init_ldap
+# init_ldap() - Initialize databases, etc.
+function init_ldap() {
+ local keystone_ldif
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ # Remove data but not schemas
+ clear_ldap_state
+
+ # Add our top level ldap nodes
+ if ldapsearch -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -b "$LDAP_BASE_DN" | grep -q "Success"; then
+ printf "LDAP already configured for $LDAP_BASE_DC\n"
+ else
+ printf "Configuring LDAP for $LDAP_BASE_DC\n"
+ # If BASE_DN is changed, the user may override the default file
+ if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then
+ keystone_ldif=${LDAP_BASE_DC}.ldif
+ else
+ keystone_ldif=keystone.ldif
+ fi
+ _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif}
+ if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then
+ ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif}
+ fi
+ fi
+
+ rm -rf TMP_LDAP_DIR
+}
+
# install_ldap
# install_ldap() - Collect source and prepare
function install_ldap() {
echo "Installing LDAP inside function"
- echo "LDAP_PASSWORD is $LDAP_PASSWORD"
echo "os_VENDOR is $os_VENDOR"
- printf "installing"
+
+ TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+
+ printf "installing OpenLDAP"
if is_ubuntu; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=replace
- sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils
- #automatically starts LDAP on ubuntu so no need to call start_ldap
+ # Ubuntu automatically starts LDAP so no need to call start_ldap()
+ :
elif is_fedora; then
- LDAP_OLCDB_NUMBER=2
- LDAP_ROOTPW_COMMAND=add
start_ldap
elif is_suse; then
- LDAP_OLCDB_NUMBER=1
- LDAP_ROOTPW_COMMAND=add
- LDAP_SERVICE_NAME=ldap
- # SUSE has slappasswd in /usr/sbin/
- PATH=$PATH:/usr/sbin/
- sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $FILES/ldap/base-config.ldif
+ _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif
+ sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif
sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap
start_ldap
fi
- printf "generate password file"
- SLAPPASS=`slappasswd -s $LDAP_PASSWORD`
+ echo "LDAP_PASSWORD is $LDAP_PASSWORD"
+ SLAPPASS=$(slappasswd -s $LDAP_PASSWORD)
+ printf "LDAP secret is $SLAPPASS\n"
- printf "secret is $SLAPPASS\n"
- #create manager.ldif
- TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif`
- sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE
-
- #update ldap olcdb
- sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE
+ # Create manager.ldif and add to olcdb
+ _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif
+ sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif
# On fedora we need to manually add cosine and inetorgperson schemas
- if is_fedora || is_suse; then
+ if is_fedora; then
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif
sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
fi
- # add our top level ldap nodes
- if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then
- printf "LDAP already configured for OpenStack\n"
- if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then
- # clear LDAP state
- clear_ldap_state
- # reconfigure LDAP for OpenStack
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
- else
- printf "Configuring LDAP for OpenStack\n"
- ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif
- fi
+ rm -rf TMP_LDAP_DIR
}
# start_ldap() - Start LDAP
@@ -78,7 +147,6 @@
sudo service $LDAP_SERVICE_NAME restart
}
-
# stop_ldap() - Stop LDAP
function stop_ldap() {
sudo service $LDAP_SERVICE_NAME stop
@@ -86,7 +154,7 @@
# clear_ldap_state() - Clear LDAP State
function clear_ldap_state() {
- ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org"
+ ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
}
# Restore xtrace
diff --git a/lib/marconi b/lib/marconi
new file mode 100644
index 0000000..8e0b82b
--- /dev/null
+++ b/lib/marconi
@@ -0,0 +1,171 @@
+# lib/marconi
+# Install and start **Marconi** service
+
+# To enable a minimal set of Marconi services, add the following to localrc:
+# enable_service marconi-server
+#
+# Dependencies:
+# - functions
+# - OS_AUTH_URL for auth in api
+# - DEST set to the destination directory
+# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
+# - STACK_USER service user
+
+# stack.sh
+# ---------
+# install_marconi
+# configure_marconi
+# init_marconi
+# start_marconi
+# stop_marconi
+# cleanup_marconi
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+MARCONI_DIR=$DEST/marconi
+MARCONICLIENT_DIR=$DEST/python-marconiclient
+MARCONI_CONF_DIR=/etc/marconi
+MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
+MARCONI_API_LOG_DIR=/var/log/marconi-api
+MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
+
+# Support potential entry-points console scripts
+MARCONI_BIN_DIR=$(get_python_exec_prefix)
+
+# Set up database backend
+MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb}
+
+
+# Set Marconi repository
+MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git}
+MARCONI_BRANCH=${MARCONI_BRANCH:-master}
+
+# Set client library repository
+MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git}
+MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master}
+
+# Functions
+# ---------
+
+# cleanup_marconi() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_marconi() {
+ mongo marconi --eval "db.dropDatabase();"
+}
+
+# configure_marconiclient() - Set config files, create data dirs, etc
+function configure_marconiclient() {
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# configure_marconi() - Set config files, create data dirs, etc
+function configure_marconi() {
+ setup_develop $MARCONI_DIR
+
+ [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR
+ sudo chown $USER $MARCONI_CONF_DIR
+
+ [ ! -d $MARCONI_API_LOG_DIR ] && sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR
+ sudo chown $USER $MARCONI_API_LOG_DIR
+
+ iniset $MARCONI_CONF DEFAULT verbose True
+ iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0'
+
+ # Install the policy file for the API server
+ cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR
+ iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json
+
+ iniset $MARCONI_CONF keystone_authtoken auth_protocol http
+ iniset $MARCONI_CONF keystone_authtoken admin_user marconi
+ iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
+
+ if [[ "$MARCONI_BACKEND" = 'mongodb' ]]; then
+ iniset $MARCONI_CONF database connection mongodb://localhost:27017/marconi
+ configure_mongodb
+ cleanup_marconi
+ fi
+}
+
+function configure_mongodb() {
+ # Set nssize to 2GB. This increases the number of namespaces supported
+ # # per database.
+ sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+
+ restart_service mongod
+}
+
+# init_marconi() - Initialize etc.
+function init_marconi() {
+ # Create cache dir
+ sudo mkdir -p $MARCONI_AUTH_CACHE_DIR
+ sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR
+ rm -f $MARCONI_AUTH_CACHE_DIR/*
+}
+
+# install_marconi() - Collect source and prepare
+function install_marconi() {
+ git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH
+ setup_develop $MARCONI_DIR
+}
+
+# install_marconiclient() - Collect source and prepare
+function install_marconiclient() {
+ git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH
+ setup_develop $MARCONICLIENT_DIR
+}
+
+# start_marconi() - Start running processes, including screen
+function start_marconi() {
+ screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+}
+
+# stop_marconi() - Stop running processes
+function stop_marconi() {
+ # Kill the marconi screen windows
+ for serv in marconi-server; do
+ screen -S $SCREEN_NAME -p $serv -X kill
+ done
+}
+
+function create_marconi_accounts() {
+ SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+ MARCONI_USER=$(get_id keystone user-create --name=marconi \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=marconi@example.com)
+ keystone user-role-add --tenant-id $SERVICE_TENANT \
+ --user-id $MARCONI_USER \
+ --role-id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ MARCONI_SERVICE=$(get_id keystone service-create \
+ --name=marconi \
+ --type=queuing \
+ --description="Marconi Service")
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $MARCONI_SERVICE \
+ --publicurl "http://$SERVICE_HOST:8888" \
+ --adminurl "http://$SERVICE_HOST:8888" \
+ --internalurl "http://$SERVICE_HOST:8888"
+ fi
+
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/neutron b/lib/neutron
index 6eabef5..b05b16d 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -353,6 +353,7 @@
function create_neutron_initial_network() {
TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+ die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo"
# Create a small network
# Since neutron command is executed in admin context at this point,
@@ -367,12 +368,16 @@
sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
done
NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
sudo ifconfig $OVS_PHYSICAL_BRIDGE up
sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
else
NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $TENANT_ID"
fi
if [[ "$Q_L3_ENABLED" == "True" ]]; then
@@ -380,14 +385,18 @@
if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
# create a tenant-owned router.
ROUTER_ID=$(neutron router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME"
else
# Plugin only supports creating a single router, which should be admin owned.
ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
fi
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create an external network, and a subnet. Configure the external network as router gw
EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+ die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP"
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
if is_service_enabled q-l3; then
@@ -397,6 +406,7 @@
sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
sudo ip link set $PUBLIC_BRIDGE up
ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'`
+ die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP
fi
if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
@@ -572,7 +582,7 @@
# Format logging
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
- setup_colorized_logging $NEUTRON_CONF DEFAULT
+ setup_colorized_logging $NEUTRON_CONF DEFAULT project_id
fi
_neutron_setup_rootwrap
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 8d2e303..b5b1873 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -34,10 +34,13 @@
ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
function populate_ml2_config() {
- OPTS=$1
- CONF=$2
- SECTION=$3
+ CONF=$1
+ SECTION=$2
+ OPTS=$3
+ if [ -z "$OPTS" ]; then
+ return
+ fi
for I in "${OPTS[@]}"; do
# Replace the first '=' with ' ' for iniset syntax
iniset $CONF $SECTION ${I/=/ }
@@ -102,19 +105,17 @@
# Since we enable the tunnel TypeDrivers, also enable a local_ip
iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
- populate_ml2_config mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
- populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
- populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS
- populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS
- populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
- populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan
- fi
+ populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
}
function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index d4050bb..bccd301 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -6,8 +6,6 @@
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
-#source $TOP_DIR/lib/neutron_plugins/ovs_base
-
function neutron_plugin_create_nova_conf() {
:
}
@@ -23,11 +21,17 @@
Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
+ PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
+ PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
+ PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
}
function neutron_plugin_configure_service() {
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP
iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector password $PLUMGRID_PASSWORD
+ iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT
}
function neutron_plugin_configure_debug_command() {
diff --git a/lib/nova b/lib/nova
index 6ab2000..5fd0beb 100644
--- a/lib/nova
+++ b/lib/nova
@@ -225,6 +225,7 @@
inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host
inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name
+ inicomment $NOVA_API_PASTE_INI filter:authtoken cafile
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user
inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password
fi
@@ -399,6 +400,7 @@
iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
iniset $NOVA_CONF keystone_authtoken admin_user nova
iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
fi
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
index e967622..7713a78 100644
--- a/lib/savanna-dashboard
+++ b/lib/savanna-dashboard
@@ -29,7 +29,7 @@
SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard
+SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
# Functions
diff --git a/lib/swift b/lib/swift
index 40722ab..5ff6055 100644
--- a/lib/swift
+++ b/lib/swift
@@ -316,6 +316,7 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
@@ -339,6 +340,7 @@
auth_port = ${KEYSTONE_AUTH_PORT}
auth_host = ${KEYSTONE_AUTH_HOST}
auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+cafile = ${KEYSTONE_SSL_CA}
auth_token = ${SERVICE_TOKEN}
admin_token = ${SERVICE_TOKEN}
@@ -524,14 +526,19 @@
fi
SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2)
+ die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
}
diff --git a/lib/tempest b/lib/tempest
index 7932fe6..5ee4e8a 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -194,7 +194,9 @@
if [ "$Q_USE_NAMESPACE" != "False" ]; then
tenant_networks_reachable=false
- ssh_connect_method="floating"
+ if ! is_service_enabled n-net; then
+ ssh_connect_method="floating"
+ fi
else
tenant_networks_reachable=true
fi
diff --git a/lib/tls b/lib/tls
index a1a7fdd..6134fa1 100644
--- a/lib/tls
+++ b/lib/tls
@@ -22,7 +22,8 @@
# - make_int_ca
# - new_cert $INT_CA_DIR int-server "abc"
# - start_tls_proxy HOST_IP 5000 localhost 5000
-
+# - ensure_certificates
+# - is_ssl_enabled_service
# Defaults
# --------
@@ -309,6 +310,53 @@
}
+# Certificate Input Configuration
+# ===============================
+
+# check to see if the service(s) specified are to be SSL enabled.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# Uses global ``SSL_ENABLED_SERVICES``
+function is_ssl_enabled_service() {
+ services=$@
+ for service in ${services}; do
+ [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ done
+ return 1
+}
+
+
+# Ensure that the certificates for a service are in place. This function does
+# not check that a service is SSL enabled, this should already have been
+# completed.
+#
+# The function expects to find a certificate, key and CA certificate in the
+# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For
+# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
+# KEYSTONE_SSL_CA. If it does not find these certificates the program will
+# quit.
+function ensure_certificates() {
+ local service=$1
+
+ local cert_var="${service}_SSL_CERT"
+ local key_var="${service}_SSL_KEY"
+ local ca_var="${service}_SSL_CA"
+
+ local cert=${!cert_var}
+ local key=${!key_var}
+ local ca=${!ca_var}
+
+ if [[ !($cert && $key && $ca) ]]; then
+ die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
+ "variable to enable SSL for ${service}"
+ fi
+
+ cat $ca >> $SSL_BUNDLE_FILE
+}
+
+
# Proxy Functions
# ===============
diff --git a/lib/trove b/lib/trove
index c40006b..6d5a56e 100644
--- a/lib/trove
+++ b/lib/trove
@@ -29,10 +29,20 @@
TROVECLIENT_DIR=$DEST/python-troveclient
TROVE_CONF_DIR=/etc/trove
TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
-TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_BIN_DIR=/usr/local/bin
+# setup_trove_logging() - Adds logging configuration to conf files
+function setup_trove_logging() {
+ local CONF=$1
+ iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+ iniset $CONF DEFAULT use_syslog $SYSLOG
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ # Add color to logging output
+ setup_colorized_logging $CONF DEFAULT tenant user
+ fi
+}
+
# create_trove_accounts() - Set up common required trove accounts
# Tenant User Roles
@@ -102,6 +112,7 @@
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT
iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove
iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD
@@ -121,8 +132,13 @@
iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove
sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+ setup_trove_logging $TROVE_CONF_DIR/trove.conf
+ setup_trove_logging $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
+ TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
+
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove`
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
@@ -130,6 +146,7 @@
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+ setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
fi
# (Re)create trove conductor conf file if needed
@@ -141,6 +158,7 @@
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONF_DIR/trove-conductor.conf DEFAULT control_exchange trove
+ setup_trove_logging $TROVE_CONF_DIR/trove-conductor.conf
fi
}
diff --git a/openrc b/openrc
index 804bb3f..784b00e 100644
--- a/openrc
+++ b/openrc
@@ -58,6 +58,7 @@
HOST_IP=${HOST_IP:-127.0.0.1}
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
# Some exercises call glance directly. On a single-node installation, Glance
# should be listening on HOST_IP. If its running elsewhere, it can be set here
@@ -71,10 +72,10 @@
# the user/tenant has access to - including nova, glance, keystone, swift, ...
# We currently recommend using the 2.0 *identity api*.
#
-export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
+export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
# Set the pointer to our CA certificate chain. Harmless if TLS is not used.
-export OS_CACERT=$INT_CA_DIR/ca-chain.pem
+export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem}
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
diff --git a/stack.sh b/stack.sh
index 47d93bd..af01faa 100755
--- a/stack.sh
+++ b/stack.sh
@@ -290,6 +290,10 @@
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+# Reset the bundle of CA certificates
+SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
+rm -f $SSL_BUNDLE_FILE
+
# Configure Projects
# ==================
@@ -747,6 +751,7 @@
if [[ $TRACK_DEPENDS = True ]]; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
+ echo "Detect some changes for installed packages of pip, in depend tracking mode"
cat $DEST/requires.diff
fi
echo "Ran stack.sh in depend tracking mode, bailing out now"
@@ -798,6 +803,17 @@
restart_rpc_backend
+# Export Certicate Authority Bundle
+# ---------------------------------
+
+# If certificates were used and written to the SSL bundle file then these
+# should be exported so clients can validate their connections.
+
+if [ -f $SSL_BUNDLE_FILE ]; then
+ export OS_CACERT=$SSL_BUNDLE_FILE
+fi
+
+
# Configure database
# ------------------
@@ -1064,7 +1080,9 @@
# Create an access key and secret key for nova ec2 register image
if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+ die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova"
NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
+ die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME"
CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
@@ -1145,6 +1163,7 @@
start_trove
fi
+
# Create account rc files
# =======================
@@ -1153,7 +1172,13 @@
# which is helpful in image bundle steps.
if is_service_enabled nova && is_service_enabled key; then
- $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc
+ USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+
+ if [ -f $SSL_BUNDLE_FILE ]; then
+ USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
+ fi
+
+ $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS
fi
@@ -1229,7 +1254,7 @@
CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
- SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do
+ SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do
echo $i=${!i} >>$TOP_DIR/.stackenv
done
@@ -1252,6 +1277,13 @@
done
fi
+# Local Configuration
+# ===================
+
+# Apply configuration from local.conf if it exists for layer 2 services
+# Phase: post-extra
+merge_config_group $TOP_DIR/local.conf post-extra
+
# Run local script
# ================
diff --git a/stackrc b/stackrc
index 6adb676..410f9d8 100644
--- a/stackrc
+++ b/stackrc
@@ -62,7 +62,7 @@
# Base GIT Repo URL
# Another option is http://review.openstack.org/p
-GIT_BASE=${GIT_BASE:-https://github.com}
+GIT_BASE=${GIT_BASE:-git://git.openstack.org}
# metering service
CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
@@ -178,11 +178,11 @@
BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
+NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
NOVNC_BRANCH=${NOVNC_BRANCH:-master}
# ryu service
-RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
+RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git}
RYU_BRANCH=${RYU_BRANCH:-master}
# a websockets/html5 or flash powered SPICE console for vm instances
diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh
index 1758e7d..6c527f5 100755
--- a/tools/build_tempest.sh
+++ b/tools/build_tempest.sh
@@ -2,7 +2,7 @@
#
# **build_tempest.sh**
-# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git
+# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git
function usage {
echo "$0 - Check out and prepare a Tempest repo"
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 8383fe7..5f4c486 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -43,6 +43,7 @@
--os-tenant-name <tenant_name>
--os-tenant-id <tenant_id>
--os-auth-url <auth_url>
+--os-cacert <cert file>
--target-dir <target_directory>
--skip-tenant <tenant-name>
--debug
@@ -53,7 +54,7 @@
EOF
}
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@")
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@")
then
#parse error
display_help
@@ -80,6 +81,7 @@
--os-tenant-id) export OS_TENANT_ID=$2; shift ;;
--skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
--os-auth-url) export OS_AUTH_URL=$2; shift ;;
+ --os-cacert) export OS_CACERT=$2; shift ;;
--target-dir) ACCOUNT_DIR=$2; shift ;;
--debug) set -o xtrace ;;
-u) MODE=${MODE:-one}; USER_NAME=$2; shift ;;
@@ -201,6 +203,7 @@
# Openstack Tenant ID = $tenant_id
export OS_TENANT_NAME="$tenant_name"
export OS_AUTH_URL="$OS_AUTH_URL"
+export OS_CACERT="$OS_CACERT"
export EC2_CERT="$ec2_cert"
export EC2_PRIVATE_KEY="$ec2_private_key"
export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 6b9b25e..a65a77e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -67,7 +67,7 @@
curl -O $PIP_GET_PIP_URL; \
)
fi
- sudo python $FILES/get-pip.py
+ sudo -E python $FILES/get-pip.py
}
function install_pip_tarball() {
@@ -75,7 +75,7 @@
curl -O $PIP_TAR_URL; \
tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
cd pip-$INSTALL_PIP_VERSION; \
- sudo python setup.py install 1>/dev/null; \
+ sudo -E python setup.py install 1>/dev/null; \
)
}
diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh
index b49ce9f..a7e635c 100755
--- a/tools/jenkins/adapters/euca.sh
+++ b/tools/jenkins/adapters/euca.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./euca.sh'
diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh
index a97f935..8da1eeb 100755
--- a/tools/jenkins/adapters/floating_ips.sh
+++ b/tools/jenkins/adapters/floating_ips.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./floating_ips.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
index ec29209..0a0b6c0 100755
--- a/tools/jenkins/adapters/volumes.sh
+++ b/tools/jenkins/adapters/volumes.sh
@@ -5,4 +5,5 @@
TOP_DIR=$(cd ../../.. && pwd)
HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh'
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index d0cdf17..7272fe2 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -93,13 +93,34 @@
tar xf /tmp/devstack.tar -C $STAGING_DIR/opt/stack/devstack
cd $TOP_DIR
-# Run devstack on launch
-cat <<EOF >$STAGING_DIR/etc/rc.local
-# network restart required for getting the right gateway
-/etc/init.d/networking restart
-chown -R $STACK_USER /opt/stack
-su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER
-exit 0
+# Create an upstart job (task) for devstack, which can interact with the console
+cat >$STAGING_DIR/etc/init/devstack.conf << EOF
+start on stopped rc RUNLEVEL=[2345]
+
+console output
+task
+
+pre-start script
+ rm -f /var/run/devstack.succeeded
+end script
+
+script
+ initctl stop hvc0 || true
+
+ # Read any leftover characters from standard input
+ while read -n 1 -s -t 0.1 -r ignored; do
+ true
+ done
+
+ clear
+
+ chown -R $STACK_USER /opt/stack
+
+ if su -c "/opt/stack/run.sh" $STACK_USER; then
+ touch /var/run/devstack.succeeded
+ fi
+ initctl start hvc0 > /dev/null 2>&1
+end script
EOF
# Configure the hostname
@@ -138,8 +159,9 @@
# Configure run.sh
cat <<EOF >$STAGING_DIR/opt/stack/run.sh
#!/bin/bash
+set -eux
cd /opt/stack/devstack
-killall screen
-VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh
+./unstack.sh || true
+./stack.sh
EOF
chmod 755 $STAGING_DIR/opt/stack/run.sh
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 6ce334b..41b184c 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -367,25 +367,20 @@
if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then
set +x
- echo "VM Launched - Waiting for startup script"
- # wait for log to appear
- while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do
+ echo "VM Launched - Waiting for devstack to start"
+ while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
done
- echo -n "Running"
- while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ]
- do
+ echo -n "devstack is running"
+ while ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
sleep 10
echo -n "."
done
echo "done!"
set -x
- # output the run.sh.log
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log'
-
- # Fail if the expected text is not found
- ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in'
+ # Fail if devstack did not succeed
+ ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'test -e /var/run/devstack.succeeded'
set +x
echo "################################################################################"
@@ -399,11 +394,12 @@
echo ""
echo "All Finished!"
echo "Now, you can monitor the progress of the stack.sh installation by "
- echo "tailing /opt/stack/run.sh.log from within your domU."
+ echo "looking at the console of your domU / checking the log files."
echo ""
echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
- echo "and then do: 'tail -f /opt/stack/run.sh.log'"
+ echo "and then do: 'sudo service devstack status' to check if devstack is still running."
+ echo "Check that /var/run/devstack.succeeded exists"
echo ""
- echo "When the script completes, you can then visit the OpenStack Dashboard"
+ echo "When devstack completes, you can visit the OpenStack Dashboard"
echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
fi
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 5347238..0ae2cb7 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -111,8 +111,8 @@
function test_zip_snapshot_location {
diff \
- <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \
- <(echo "https://github.com/openstack/nova/zipball/master")
+ <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \
+ <(echo "git://git.openstack.org/openstack/nova/zipball/master")
}
function test_create_directory_for_kernels {