Merge "Support passing extra args to network create if NETWORK_CREATE_ARGS is defined"
diff --git a/.gitignore b/.gitignore
index e482090..c8d2560 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,4 @@
*.log
src
localrc
+local.sh
diff --git a/AUTHORS b/AUTHORS
index 561826c..8645615 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,3 +1,4 @@
+Aaron Lee <aaron.lee@rackspace.com>
Adam Gandelman <adamg@canonical.com>
Andy Smith <github@anarkystic.com>
Anthony Young <sleepsonthefloor@gmail.com>
@@ -9,6 +10,7 @@
Eddie Hebert <edhebert@gmail.com>
Eoghan Glynn <eglynn@redhat.com>
Gabriel Hurley <gabriel@strikeawe.com>
+Hengqing Hu <hudayou@hotmail.com>
Jake Dahn <admin@jakedahn.com>
James E. Blair <james.blair@rackspace.com>
Jason Cannavale <jason.cannavale@rackspace.com>
@@ -16,6 +18,7 @@
Jesse Andrews <anotherjesse@gmail.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
Justin Shepherd <galstrom21@gmail.com>
+Ken Pepple <ken.pepple@rabbityard.com>
Kiall Mac Innes <kiall@managedit.ie>
Russell Bryant <rbryant@redhat.com>
Scott Moser <smoser@ubuntu.com>
@@ -23,3 +26,5 @@
Tres Henry <tres@treshenry.net>
Vishvananda Ishaya <vishvananda@gmail.com>
Yun Mao <yunmao@gmail.com>
+Yong Sheng Gong <gongysh@cn.ibm.com>
+Zhongyue Luo <lzyeval@gmail.com>
diff --git a/HACKING.rst b/HACKING.rst
new file mode 100644
index 0000000..7262cff
--- /dev/null
+++ b/HACKING.rst
@@ -0,0 +1,162 @@
+Contributing to DevStack
+========================
+
+
+General
+-------
+
+DevStack is written in POSIX shell script. This choice was made because
+it best illustrates the configuration steps that this implementation takes
+on setting up and interacting with OpenStack components. DevStack specifies
+BASH and is compatible with Bash 3.
+
+DevStack's official repository is located on GitHub at
+https://github.com/openstack-dev/devstack.git. Besides the master branch that
+tracks the OpenStack trunk branches a separate branch is maintained for all
+OpenStack releases starting with Diablo (stable/diablo).
+
+The primary script in DevStack is ``stack.sh``, which performs the bulk of the
+work for DevStack's use cases. There is a subscript ``functions`` that contains
+generally useful shell functions and is used by a number of the scripts in
+DevStack.
+
+A number of additional scripts can be found in the ``tools`` directory that may
+be useful in setting up special-case uses of DevStack. These include: bare metal
+deployment, ramdisk deployment and Jenkins integration.
+
+
+Scripts
+-------
+
+DevStack scripts should generally begin by calling ``env(1)`` in the shebang line::
+
+ #!/usr/bin/env bash
+
+Sometimes the script needs to know the location of the DevStack install directory.
+``TOP_DIR`` should always point there, even if the script itself is located in
+a subdirectory::
+
+ # Keep track of the current devstack directory.
+ TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+Many scripts will utilize shared functions from the ``functions`` file. There are
+also rc files (``stackrc`` and ``openrc``) that are often included to set the primary
+configuration of the user environment::
+
+ # Keep track of the current devstack directory.
+ TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+ # Import common functions
+ source $TOP_DIR/functions
+
+ # Import configuration
+ source $TOP_DIR/openrc
+
+``stack.sh`` is a rather large monolithic script that flows through from beginning
+to end. There is a proposal to segment it to put the OpenStack projects
+into their own sub-scripts to better document the projects as a unit rather than
+have it scattered throughout ``stack.sh``. Someday.
+
+
+Documentation
+-------------
+
+The official DevStack repo on GitHub does not include a gh-pages branch that
+GitHub uses to create static web sites. That branch is maintained in the
+`CloudBuilders DevStack repo`__ mirror that supports the
+http://devstack.org site. This is the primary DevStack
+documentation along with the DevStack scripts themselves.
+
+__ repo_
+.. _repo: https://github.com/cloudbuilders/devstack
+
+All of the scripts are processed with shocco_ to render them with the comments
+as text describing the script below. For this reason we tend to be a little
+verbose in the comments _ABOVE_ the code they pertain to. Shocco also supports
+Markdown formatting in the comments; use it sparingly. Specifically, ``stack.sh``
+uses Markdown headers to divide the script into logical sections.
+
+.. _shocco: http://rtomayko.github.com/shocco/
+
+
+Exercises
+---------
+
+The scripts in the exercises directory are meant to 1) perform basic operational
+checks on certain aspects of OpenStack; and b) document the use of the
+OpenStack command-line clients.
+
+In addition to the guidelines above, exercise scripts MUST follow the structure
+outlined here. ``swift.sh`` is perhaps the clearest example of these guidelines.
+These scripts are executed serially by ``exercise.sh`` in testing situations.
+
+* Begin and end with a banner that stands out in a sea of script logs to aid
+ in debugging failures, particularly in automated testing situations. If the
+ end banner is not displayed, the script ended prematurely and can be assumed
+ to have failed.
+
+ ::
+
+ echo "**************************************************"
+ echo "Begin DevStack Exercise: $0"
+ echo "**************************************************"
+ ...
+ set +o xtrace
+ echo "**************************************************"
+ echo "End DevStack Exercise: $0"
+ echo "**************************************************"
+
+* The scripts will generally have the shell ``xtrace`` attribute set to display
+ the actual commands being executed, and the ``errexit`` attribute set to exit
+ the script on non-zero exit codes::
+
+ # This script exits on an error so that errors don't compound and you see
+ # only the first error that occured.
+ set -o errexit
+
+ # Print the commands being run so that we can see the command that triggers
+ # an error. It is also useful for following allowing as the install occurs.
+ set -o xtrace
+
+* Settings and configuration are stored in ``exerciserc``, which must be
+ sourced after ``openrc`` or ``stackrc``::
+
+ # Import exercise configuration
+ source $TOP_DIR/exerciserc
+
+* There are a couple of helper functions in the common ``functions`` sub-script
+ that will check for non-zero exit codes and unset environment variables and
+ print a message and exit the script. These should be called after most client
+ commands that are not otherwise checked to short-circuit long timeouts
+ (instance boot failure, for example)::
+
+ swift post $CONTAINER
+ die_if_error "Failure creating container $CONTAINER"
+
+ FLOATING_IP=`euca-allocate-address | cut -f2`
+ die_if_not_set FLOATING_IP "Failure allocating floating IP"
+
+* If you want an exercise to be skipped when for example a service wasn't
+ enabled for the exercise to be run, you can exit your exercise with the
+ special exitcode 55 and it will be detected as skipped.
+
+* The exercise scripts should only use the various OpenStack client binaries to
+ interact with OpenStack. This specifically excludes any ``*-manage`` tools
+ as those assume direct access to configuration and databases, as well as direct
+ database access from the exercise itself.
+
+* If specific configuration needs to be present for the exercise to complete,
+ it should be staged in ``stack.sh``, or called from ``stack.sh`` (see
+ ``files/keystone_data.sh`` for an example of this).
+
+* The ``OS_*`` environment variables should be the only ones used for all
+ authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
+
+.. _CLIAuth: http://wiki.openstack.org/CLIAuth
+
+* The exercise MUST clean up after itself if successful. If it is not successful,
+ it is assumed that state will be left behind; this allows a chance for developers
+ to look around and attempt to debug the problem. The exercise SHOULD clean up
+ or graciously handle possible artifacts left over from previous runs if executed
+ again. It is acceptable to require a reboot or even a re-install of DevStack
+ to restore a clean test environment.
diff --git a/README.md b/README.md
index a185f34..5c32893 100644
--- a/README.md
+++ b/README.md
@@ -1,25 +1,36 @@
-Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud.
+DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud.
# Goals
-* To quickly build dev OpenStack environments in a clean oneiric environment
+* To quickly build dev OpenStack environments in a clean Oneiric or Precise environment
* To describe working configurations of OpenStack (which code branches work together? what do config files look like for those branches?)
* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once
* To make it easy to prototype cross-project features
+* To sanity-check OpenStack builds (used in gating commits to the primary repos)
Read more at http://devstack.org (built from the gh-pages branch)
-IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started.
+IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration. We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started.
+
+# Devstack on Xenserver
+
+If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`.
# Versions
-The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[mil
-estone]. For example, you can do the following to create a diablo OpenStack cloud:
+The devstack master branch generally points to trunk versions of OpenStack components. For older, stable versions, look for branches named stable/[release] in the DevStack repo. For example, you can do the following to create a diablo OpenStack cloud:
git checkout stable/diablo
./stack.sh
-# To start a dev cloud (Installing in a dedicated, disposable vm is safer than installing on your dev machine!):
+You can also pick specific OpenStack project releases by setting the appropriate `*_BRANCH` variables in `localrc` (look in `stackrc` for the default set). Usually just before a release there will be milestone-proposed branches that need to be tested::
+
+ GLANCE_REPO=https://github.com/openstack/glance.git
+ GLANCE_BRANCH=milestone-proposed
+
+# Start A Dev Cloud
+
+Installing in a dedicated disposable vm is safer than installing on your dev machine! To start a dev cloud:
./stack.sh
@@ -34,9 +45,32 @@
. openrc
# list instances
nova list
+
+If the EC2 API is your cup-o-tea, you can create credentials and use euca2ools:
+
+ # source eucarc to generate EC2 credentials and set up the environment
+ . eucarc
# list instances using ec2 api
euca-describe-instances
# Customizing
-You can override environment variables used in stack.sh by creating file name 'localrc'. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
+You can override environment variables used in `stack.sh` by creating file name `localrc`. It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
+
+# Swift
+
+Swift is not installed by default, you can enable easily by adding this to your `localrc`:
+
+ ENABLED_SERVICE="$ENABLED_SERVICES,swift"
+
+If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`:
+
+ ENABLED_SERVICES="key,mysql,swift"
+
+If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against.
+
+Swift will be acting as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
+
+Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
+
+By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`.
diff --git a/eucarc b/eucarc
new file mode 100644
index 0000000..2b0f7dd
--- /dev/null
+++ b/eucarc
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+#
+# source eucarc [username] [tenantname]
+#
+# Create EC2 credentials for the current user as defined by OS_TENANT_NAME:OS_USERNAME
+# Optionally set the tenant/username via openrc
+
+if [[ -n "$1" ]]; then
+ USERNAME=$1
+fi
+if [[ -n "$2" ]]; then
+ TENANT=$2
+fi
+
+# Find the other rc files
+RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+
+# Get user configuration
+source $RC_DIR/openrc
+
+# Set the ec2 url so euca2ools works
+export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
+
+# Create EC2 credentials for the current user
+CREDS=$(keystone ec2-credentials-create)
+export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
+export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
+
+# Euca2ools Certificate stuff for uploading bundles
+# See exercises/bundle.sh to see how to get certs using nova cli
+NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR}
+export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }')
+export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
+export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem
+export EC2_CERT=${NOVA_KEY_DIR}/cert.pem
+export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem
+export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
+alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user ${EC2_USER_ID} --ec2cert ${NOVA_CERT}"
+alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+
diff --git a/exercise.sh b/exercise.sh
index dd45c5a..15f264f 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -1,6 +1,13 @@
#!/usr/bin/env bash
-source ./stackrc
+# **exercise.sh**
+
+# Keep track of the current devstack directory.
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Load local configuration
+source $TOP_DIR/stackrc
+
# Run everything in the exercises/ directory that isn't explicitly disabled
# comma separated list of script basenames to skip
@@ -21,11 +28,14 @@
if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
skips="$skips $script"
else
- echo =========================
+ echo "====================================================================="
echo Running $script
- echo =========================
+ echo "====================================================================="
$EXERCISE_DIR/$script.sh
- if [[ $? -ne 0 ]] ; then
+ exitcode=$?
+ if [[ $exitcode == 55 ]]; then
+ skips="$skips $script"
+ elif [[ $exitcode -ne 0 ]] ; then
failures="$failures $script"
else
passes="$passes $script"
@@ -34,8 +44,7 @@
done
# output status of exercise run
-echo =========================
-echo =========================
+echo "====================================================================="
for script in $skips; do
echo SKIP $script
done
@@ -45,6 +54,7 @@
for script in $failures; do
echo FAILED $script
done
+echo "====================================================================="
if [ -n "$failures" ] ; then
exit 1
diff --git a/exerciserc b/exerciserc
new file mode 100644
index 0000000..b41714d
--- /dev/null
+++ b/exerciserc
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+#
+# source exerciserc
+#
+# Configure the DevStack exercise scripts
+# For best results, source this _after_ stackrc/localrc as it will set
+# values only if they are not already set.
+
+# Max time to wait while vm goes from build to active state
+export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time to wait for proper IP association and dis-association.
+export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+
+# Max time till the vm is bootable
+export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+
+# Max time from run instance command until it is running
+export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
+
+# Max time to wait for a vm to terminate
+export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
new file mode 100755
index 0000000..38fac12
--- /dev/null
+++ b/exercises/aggregates.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+# **aggregates.sh**
+
+# This script demonstrates how to use host aggregates:
+# * Create an Aggregate
+# * Updating Aggregate details
+# * Testing Aggregate metadata
+# * Testing Aggregate delete
+# * TODO(johngar) - test adding a host (idealy with two hosts)
+
+echo "**************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "**************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# run test as the admin user
+_OLD_USERNAME=$OS_USERNAME
+OS_USERNAME=admin
+
+
+# Create an aggregate
+# ===================
+
+AGGREGATE_NAME=test_aggregate_$RANDOM
+AGGREGATE_A_ZONE=nova
+
+exit_if_aggregate_present() {
+ aggregate_name=$1
+
+ if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then
+ echo "SUCCESS $aggregate_name not present"
+ else
+ echo "ERROR found aggregate: $aggregate_name"
+ exit -1
+ fi
+}
+
+exit_if_aggregate_present $AGGREGATE_NAME
+
+AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1`
+
+# check aggregate created
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
+
+
+# Ensure creating a duplicate fails
+# =================================
+
+if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
+ echo "ERROR could create duplicate aggregate"
+ exit -1
+fi
+
+
+# Test aggregate-update (and aggregate-details)
+# =============================================
+AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
+
+nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
+
+nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
+nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
+
+
+# Test aggregate-set-metadata
+# ===========================
+META_DATA_1_KEY=asdf
+META_DATA_2_KEY=foo
+META_DATA_3_KEY=bar
+
+#ensure no metadata is set
+nova aggregate-details $AGGREGATE_ID | grep {}
+
+nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep 123
+
+nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
+
+nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
+
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
+
+nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
+nova aggregate-details $AGGREGATE_ID | grep {}
+
+
+# Test aggregate-add/remove-host
+# ==============================
+if [ "$VIRT_DRIVER" == "xenserver" ]; then
+ echo "TODO(johngarbutt) add tests for add/remove host from aggregate"
+fi
+
+
+# Test aggregate-delete
+# =====================
+nova aggregate-delete $AGGREGATE_ID
+exit_if_aggregate_present $AGGREGATE_NAME
+
+
+# Test complete
+# =============
+OS_USERNAME=$_OLD_USERNAME
+echo "AGGREGATE TEST PASSED"
+
+set +o xtrace
+echo "**************************************************"
+echo "End DevStack Exercise: $0"
+echo "**************************************************"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
new file mode 100755
index 0000000..c707b47
--- /dev/null
+++ b/exercises/boot_from_volume.sh
@@ -0,0 +1,263 @@
+#!/usr/bin/env bash
+
+# **boot_from_volume.sh**
+
+# This script demonstrates how to boot from a volume. It does the following:
+# * Create a 'builder' instance
+# * Attach a volume to the instance
+# * Format and install an os onto the volume
+# * Detach volume from builder, and then boot volume-backed instance
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# Boot this image, use first AMI image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
+# Instance type
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
+# Default floating IP pool name
+DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova}
+
+
+# Launching servers
+# =================
+
+# Grab the id of the image to launch
+IMAGE=`glance -f index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1`
+die_if_not_set IMAGE "Failure getting image"
+
+# Instance and volume names
+INSTANCE_NAME=${INSTANCE_NAME:-test_instance}
+VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance}
+VOL_NAME=${VOL_NAME:-test_volume}
+
+# Clean-up from previous runs
+nova delete $VOL_INSTANCE_NAME || true
+nova delete $INSTANCE_NAME || true
+
+# Wait till server is gone
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then
+ echo "server didn't terminate!"
+ exit 1
+fi
+
+# Configure Security Groups
+SECGROUP=${SECGROUP:-test_secgroup}
+nova secgroup-delete $SECGROUP || true
+nova secgroup-create $SECGROUP "$SECGROUP description"
+nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
+
+# Determinine instance type
+INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+if [[ -z "$INSTANCE_TYPE" ]]; then
+ # grab the first flavor in the list to launch if default doesn't exist
+ INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+fi
+
+# Setup Keypair
+KEY_NAME=test_key
+KEY_FILE=key.pem
+nova keypair-delete $KEY_NAME || true
+nova keypair-add $KEY_NAME > $KEY_FILE
+chmod 600 $KEY_FILE
+
+# Boot our instance
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | get_field 2`
+die_if_not_set VM_UUID "Failure launching $INSTANCE_NAME"
+
+# check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+ echo "server didn't become active!"
+ exit 1
+fi
+
+# Delete the old volume
+nova volume-delete $VOL_NAME || true
+
+# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers
+if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then
+ nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true
+fi
+
+# Allocate floating ip
+FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1`
+
+# Make sure the ip gets allocated
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
+ echo "Floating IP not allocated"
+ exit 1
+fi
+
+# Add floating ip to our server
+nova add-floating-ip $VM_UUID $FLOATING_IP
+
+# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+ echo "Couldn't ping server with floating ip"
+ exit 1
+fi
+
+# Create our volume
+nova volume-create --display_name=$VOL_NAME 1
+
+# Wait for volume to activate
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not created"
+ exit 1
+fi
+
+# FIXME (anthony) - python-novaclient should accept a volume_name for the attachment param?
+DEVICE=/dev/vdb
+VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1`
+nova volume-attach $INSTANCE_NAME $VOLUME_ID $DEVICE
+
+# Wait till volume is attached
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not created"
+ exit 1
+fi
+
+# The following script builds our bootable volume.
+# To do this, ssh to the builder instance, mount volume, and build a volume-backed image.
+STAGING_DIR=/tmp/stage
+CIRROS_DIR=/tmp/cirros
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+set -o errexit
+set -o xtrace
+sudo mkdir -p $STAGING_DIR
+sudo mkfs.ext3 -b 1024 $DEVICE 1048576
+sudo mount $DEVICE $STAGING_DIR
+# The following lines create a writable empty file so that we can scp
+# the actual file
+sudo touch $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz
+sudo chown cirros $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz
+EOF
+
+# Download cirros
+if [ ! -e cirros-0.3.0-x86_64-rootfs.img.gz ]; then
+ wget http://images.ansolabs.com/cirros-0.3.0-x86_64-rootfs.img.gz
+fi
+
+# Copy cirros onto the volume
+scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz cirros@$FLOATING_IP:$STAGING_DIR
+
+# Unpack cirros into volume
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+set -o errexit
+set -o xtrace
+cd $STAGING_DIR
+sudo mkdir -p $CIRROS_DIR
+sudo gunzip cirros-0.3.0-x86_64-rootfs.img.gz
+sudo mount cirros-0.3.0-x86_64-rootfs.img $CIRROS_DIR
+
+# Copy cirros into our volume
+sudo cp -pr $CIRROS_DIR/* $STAGING_DIR/
+
+cd
+sync
+sudo umount $CIRROS_DIR
+# The following typically fails. Don't know why.
+sudo umount $STAGING_DIR || true
+EOF
+
+# Detach the volume from the builder instance
+nova volume-detach $INSTANCE_NAME $VOLUME_ID
+
+# Boot instance from volume! This is done with the --block_device_mapping param.
+# The format of mapping is:
+# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
+# Leaving the middle two fields blank appears to do-the-right-thing
+VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2`
+die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME"
+
+# Check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+ echo "server didn't become active!"
+ exit 1
+fi
+
+# Add floating ip to our server
+nova remove-floating-ip $VM_UUID $FLOATING_IP
+
+# Gratuitous sleep, probably hiding a race condition :/
+sleep 1
+
+# Add floating ip to our server
+nova add-floating-ip $VOL_VM_UUID $FLOATING_IP
+
+# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+ echo "Couldn't ping volume-backed server with floating ip"
+ exit 1
+fi
+
+# Make sure our volume-backed instance launched
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+echo "success!"
+EOF
+
+# Delete volume backed instance
+nova delete $VOL_INSTANCE_NAME || \
+ die "Failure deleting instance volume $VOL_INSTANCE_NAME"
+
+# Wait till our volume is no longer in-use
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not created"
+ exit 1
+fi
+
+# Delete the volume
+nova volume-delete $VOL_NAME || \
+ die "Failure deleting volume $VOLUME_NAME"
+
+# Delete instance
+nova delete $INSTANCE_NAME || \
+ die "Failure deleting instance $INSTANCE_NAME"
+
+# Wait for termination
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then
+ echo "server didn't terminate!"
+ exit 1
+fi
+
+# De-allocate the floating ip
+nova floating-ip-delete $FLOATING_IP || \
+ die "Failure deleting floating IP $FLOATING_IP"
+
+# Delete a secgroup
+nova secgroup-delete $SECGROUP || \
+ die "Failure deleting security group $SECGROUP"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index d5c78af..c607c94 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -1,8 +1,13 @@
#!/usr/bin/env bash
+# **bundle.sh**
+
# we will use the ``euca2ools`` cli tool that wraps the python boto
-# library to test ec2 compatibility
-#
+# library to test ec2 bundle upload compatibility
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
@@ -15,19 +20,27 @@
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd)
-source ./openrc
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import EC2 configuration
+source $TOP_DIR/eucarc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
# Remove old certificates
-rm -f cacert.pem
-rm -f cert.pem
-rm -f pk.pem
+rm -f $TOP_DIR/cacert.pem
+rm -f $TOP_DIR/cert.pem
+rm -f $TOP_DIR/pk.pem
# Get Certificates
-nova x509-get-root-cert
-nova x509-create-cert
-popd
+nova x509-get-root-cert $TOP_DIR/cacert.pem
+nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem
# Max time to wait for image to be registered
REGISTER_TIMEOUT=${REGISTER_TIMEOUT:-15}
@@ -35,17 +48,23 @@
BUCKET=testbucket
IMAGE=bundle.img
truncate -s 5M /tmp/$IMAGE
-euca-bundle-image -i /tmp/$IMAGE
+euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE"
+euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
-euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml
AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2`
+die_if_not_set AMI "Failure registering $BUCKET/$IMAGE"
# Wait for the image to become available
-if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep '$AMI' | grep 'available'; do sleep 1; done"; then
+if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then
echo "Image $AMI not available within $REGISTER_TIMEOUT seconds"
exit 1
fi
# Clean up
-euca-deregister $AMI
+euca-deregister $AMI || die "Failure deregistering $AMI"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
new file mode 100755
index 0000000..66fddcf
--- /dev/null
+++ b/exercises/client-args.sh
@@ -0,0 +1,142 @@
+#!/usr/bin/env bash
+
+# Test OpenStack client authentication aguemnts handling
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# Unset all of the known NOVA_ vars
+unset NOVA_API_KEY
+unset NOVA_ENDPOINT_NAME
+unset NOVA_PASSWORD
+unset NOVA_PROJECT_ID
+unset NOVA_REGION_NAME
+unset NOVA_URL
+unset NOVA_USERNAME
+unset NOVA_VERSION
+
+# Save the known variables for later
+export x_TENANT_NAME=$OS_TENANT_NAME
+export x_USERNAME=$OS_USERNAME
+export x_PASSWORD=$OS_PASSWORD
+export x_AUTH_URL=$OS_AUTH_URL
+
+#Unset the usual variables to force argument processing
+unset OS_TENANT_NAME
+unset OS_USERNAME
+unset OS_PASSWORD
+unset OS_AUTH_URL
+
+# Common authentication args
+TENANT_ARG="--os_tenant_name=$x_TENANT_NAME"
+ARGS="--os_username=$x_USERNAME --os_password=$x_PASSWORD --os_auth_url=$x_AUTH_URL"
+
+# Set global return
+RETURN=0
+
+# Keystone client
+# ---------------
+if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
+ if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then
+ STATUS_KEYSTONE="Skipped"
+ else
+ echo -e "\nTest Keystone"
+ if keystone $TENANT_ARG $ARGS catalog --service identity; then
+ STATUS_KEYSTONE="Succeeded"
+ else
+ STATUS_KEYSTONE="Failed"
+ RETURN=1
+ fi
+ fi
+fi
+
+# Nova client
+# -----------
+
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
+ if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then
+ STATUS_NOVA="Skipped"
+ STATUS_EC2="Skipped"
+ else
+ # Test OSAPI
+ echo -e "\nTest Nova"
+ if nova $TENANT_ARG $ARGS flavor-list; then
+ STATUS_NOVA="Succeeded"
+ else
+ STATUS_NOVA="Failed"
+ RETURN=1
+ fi
+ fi
+fi
+
+# Glance client
+# -------------
+
+if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
+ if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then
+ STATUS_GLANCE="Skipped"
+ else
+ echo -e "\nTest Glance"
+ if glance $TENANT_ARG $ARGS index; then
+ STATUS_GLANCE="Succeeded"
+ else
+ STATUS_GLANCE="Failed"
+ RETURN=1
+ fi
+ fi
+fi
+
+# Swift client
+# ------------
+
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+ if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
+ STATUS_SWIFT="Skipped"
+ else
+ echo -e "\nTest Swift"
+ if swift $ARGS stat; then
+ STATUS_SWIFT="Succeeded"
+ else
+ STATUS_SWIFT="Failed"
+ RETURN=1
+ fi
+ fi
+fi
+
+# Results
+# -------
+
+function report() {
+ if [[ -n "$2" ]]; then
+ echo "$1: $2"
+ fi
+}
+
+echo -e "\n"
+report "Keystone" $STATUS_KEYSTONE
+report "Nova" $STATUS_NOVA
+report "Glance" $STATUS_GLANCE
+report "Swift" $STATUS_SWIFT
+
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
+
+exit $RETURN
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index a15a5c0..af2c4c2 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -2,16 +2,28 @@
# Test OpenStack client enviroment variable handling
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
# Verify client workage
VERIFY=${1:-""}
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd) >/dev/null
-source ./openrc
-popd >/dev/null
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
# Unset all of the known NOVA_ vars
unset NOVA_API_KEY
@@ -23,19 +35,10 @@
unset NOVA_USERNAME
unset NOVA_VERSION
-# Make sure we have the vars we are expecting
-function is_set() {
- local var=\$"$1"
- eval echo $1=$var
- if eval "[ -z $var ]"; then
- return 1
- fi
- return 0
-}
-
for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
is_set $i
if [[ $? -ne 0 ]]; then
+ echo "$i expected to be set"
ABORT=1
fi
done
@@ -52,24 +55,13 @@
if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then
STATUS_KEYSTONE="Skipped"
else
- # We need to run the keystone test as admin since there doesn't
- # seem to be anything to test the cli vars that runs as a user
- # tenant-list should do that, it isn't implemented (yet)
- xOS_TENANT_NAME=$OS_TENANT_NAME
- xOS_USERNAME=$OS_USERNAME
- export OS_USERNAME=admin
- export OS_TENANT_NAME=admin
-
echo -e "\nTest Keystone"
- if keystone service-list; then
+ if keystone catalog --service identity; then
STATUS_KEYSTONE="Succeeded"
else
STATUS_KEYSTONE="Failed"
RETURN=1
fi
-
- OS_TENANT_NAME=$xOS_TENANT_NAME
- OS_USERNAME=$xOS_USERNAME
fi
fi
@@ -79,7 +71,9 @@
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then
STATUS_NOVA="Skipped"
+ STATUS_EC2="Skipped"
else
+ # Test OSAPI
echo -e "\nTest Nova"
if nova flavor-list; then
STATUS_NOVA="Succeeded"
@@ -87,6 +81,21 @@
STATUS_NOVA="Failed"
RETURN=1
fi
+
+ # Test EC2 API
+ echo -e "\nTest EC2"
+ # Get EC2 creds
+ source $TOP_DIR/eucarc
+
+ if euca-describe-images; then
+ STATUS_EC2="Succeeded"
+ else
+ STATUS_EC2="Failed"
+ RETURN=1
+ fi
+
+ # Clean up side effects
+ unset NOVA_VERSION
fi
fi
@@ -136,7 +145,12 @@
echo -e "\n"
report "Keystone" $STATUS_KEYSTONE
report "Nova" $STATUS_NOVA
+report "EC2" $STATUS_EC2
report "Glance" $STATUS_GLANCE
report "Swift" $STATUS_SWIFT
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
+
exit $RETURN
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 86cd673..76e5202 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -1,8 +1,13 @@
#!/usr/bin/env bash
+# **euca.sh**
+
# we will use the ``euca2ools`` cli tool that wraps the python boto
# library to test ec2 compatibility
-#
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
@@ -12,26 +17,30 @@
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
+
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd)
-source ./openrc
-popd
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+# Import common functions
+source $TOP_DIR/functions
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+# Import EC2 configuration
+source $TOP_DIR/eucarc
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+# Import exercise configuration
+source $TOP_DIR/exerciserc
# Instance type to create
DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
+# Launching a server
+# ==================
+
# Find a machine image to boot
IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
@@ -49,6 +58,7 @@
# Launch it
INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
+die_if_not_set INSTANCE "Failure launching instance"
# Assure it has booted within a reasonable time
if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
@@ -58,12 +68,15 @@
# Allocate floating address
FLOATING_IP=`euca-allocate-address | cut -f2`
+die_if_not_set FLOATING_IP "Failure allocating floating IP"
# Associate floating address
-euca-associate-address -i $INSTANCE $FLOATING_IP
+euca-associate-address -i $INSTANCE $FLOATING_IP || \
+ die "Failure associating address $FLOATING_IP to $INSTANCE"
# Authorize pinging
-euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+ die "Failure authorizing rule in $SECGROUP"
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
@@ -72,10 +85,12 @@
fi
# Revoke pinging
-euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+ die "Failure revoking rule in $SECGROUP"
# Release floating address
-euca-disassociate-address $FLOATING_IP
+euca-disassociate-address $FLOATING_IP || \
+ die "Failure disassociating address $FLOATING_IP"
# Wait just a tick for everything above to complete so release doesn't fail
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
@@ -84,7 +99,8 @@
fi
# Release floating address
-euca-release-address $FLOATING_IP
+euca-release-address $FLOATING_IP || \
+ die "Failure releasing address $FLOATING_IP"
# Wait just a tick for everything above to complete so terminate doesn't fail
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
@@ -93,7 +109,8 @@
fi
# Terminate instance
-euca-terminate-instances $INSTANCE
+euca-terminate-instances $INSTANCE || \
+ die "Failure terminating instance $INSTANCE"
# Assure it has terminated within a reasonable time
if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
@@ -102,4 +119,10 @@
fi
# Delete group
-euca-delete-group $SECGROUP
+euca-delete-group $SECGROUP || \
+ die "Failure deleting security group $SECGROUP"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index b559965..9974b4b 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -1,11 +1,13 @@
#!/usr/bin/env bash
-# **exercise.sh** - using the cloud can be fun
+# **floating_ips.sh** - using the cloud can be fun
# we will use the ``nova`` cli tool provided by the ``python-novaclient``
-# package
-#
+# package to work out the instance connectivity
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
@@ -19,19 +21,18 @@
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd)
-source ./openrc
-popd
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+# Import common functions
+source $TOP_DIR/functions
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+# Import configuration
+source $TOP_DIR/openrc
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+# Import exercise configuration
+source $TOP_DIR/exerciserc
# Instance type to create
DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
@@ -48,6 +49,7 @@
# Additional floating IP pool and range
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+
# Launching a server
# ==================
@@ -87,15 +89,16 @@
# List of instance types:
nova flavor-list
-INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1`
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
- INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+ INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1`
fi
-NAME="myserver"
+NAME="ex-float"
-VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2`
+die_if_not_set VM_UUID "Failure launching $NAME"
# Testing
# =======
@@ -114,7 +117,8 @@
fi
# get the IP of the server
-IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
+IP=`nova show $VM_UUID | grep "private network" | get_field 2`
+die_if_not_set IP "Failure retrieving IP address"
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
@@ -147,7 +151,8 @@
nova secgroup-list-rules $SECGROUP
# allocate a floating ip from default pool
-FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2`
+FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1`
+die_if_not_set FLOATING_IP "Failure creating floating IP"
# list floating addresses
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
@@ -156,7 +161,8 @@
fi
# add floating ip to our server
-nova add-floating-ip $VM_UUID $FLOATING_IP
+nova add-floating-ip $VM_UUID $FLOATING_IP || \
+ die "Failure adding floating IP $FLOATING_IP to $NAME"
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
@@ -165,7 +171,8 @@
fi
# Allocate an IP from second floating pool
-TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | cut -d '|' -f2`
+TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1`
+die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
# list floating addresses
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
@@ -174,7 +181,7 @@
fi
# dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP"
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver" ]; then
@@ -187,13 +194,13 @@
fi
# de-allocate the floating ip
-nova floating-ip-delete $FLOATING_IP
+nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP"
# Delete second floating IP
-nova floating-ip-delete $TEST_FLOATING_IP
+nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP"
# shutdown the server
-nova delete $VM_UUID
+nova delete $VM_UUID || die "Failure deleting instance $NAME"
# make sure the VM shuts down within a reasonable time
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
@@ -202,4 +209,9 @@
fi
# Delete a secgroup
-nova secgroup-delete $SECGROUP
+nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 3a57744..732445d 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -1,7 +1,13 @@
#!/usr/bin/env bash
+# **swift.sh**
+
# Test swift via the command line tools that ship with it.
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
@@ -14,31 +20,46 @@
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd)
-source ./openrc
-popd
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# Container name
+CONTAINER=ex-swift
+
+# If swift is not enabled we exit with exitcode 55 which mean
+# exercise is skipped.
+is_service_enabled swift || exit 55
# Testing Swift
# =============
-# FIXME(chmou): when review https://review.openstack.org/#change,3712
-# is merged we would be able to use the common openstack options and
-# remove the trailing slash to v2.0 auth url.
-#
# Check if we have to swift via keystone
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD stat
+swift stat || die "Failure geting status"
# We start by creating a test container
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD post testcontainer
+swift post $CONTAINER || die "Failure creating container $CONTAINER"
# add some files into it.
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
+swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER"
# list them
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD list testcontainer
+swift list $CONTAINER || die "Failure listing contents of container $CONTAINER"
# And we may want to delete them now that we have tested that
# everything works.
-swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0/ -U admin -K $ADMIN_PASSWORD delete testcontainer
+swift delete $CONTAINER || die "Failure deleting container $CONTAINER"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 622fb18..1abbecc 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -1,7 +1,13 @@
#!/usr/bin/env bash
+# **volumes.sh**
+
# Test nova volumes with the nova command from python-novaclient
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
# This script exits on an error so that errors don't compound and you see
# only the first error that occured.
set -o errexit
@@ -14,19 +20,18 @@
# Settings
# ========
-# Use openrc + stackrc + localrc for settings
-pushd $(cd $(dirname "$0")/.. && pwd)
-source ./openrc
-popd
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+# Import common functions
+source $TOP_DIR/functions
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+# Import configuration
+source $TOP_DIR/openrc
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+# Import exercise configuration
+source $TOP_DIR/exerciserc
# Instance type to create
DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
@@ -34,6 +39,7 @@
# Boot this image, use first AMi image if unset
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
# Launching a server
# ==================
@@ -55,21 +61,6 @@
# determinine instance type
# -------------------------
-# Helper function to grab a numbered field from python novaclient cli result
-# Fields are numbered starting with 1
-# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
-function get_field () {
- while read data
- do
- if [ "$1" -lt 0 ]; then
- field="(\$(NF$1))"
- else
- field="\$$(($1 + 1))"
- fi
- echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
- done
-}
-
# List of instance types:
nova flavor-list
@@ -79,9 +70,11 @@
INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1`
fi
-NAME="myserver"
+NAME="ex-vol"
VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2`
+die_if_not_set VM_UUID "Failure launching $NAME"
+
# Testing
# =======
@@ -101,6 +94,7 @@
# get the IP of the server
IP=`nova show $VM_UUID | grep "private network" | get_field 2`
+die_if_not_set IP "Failure retrieving IP address"
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
@@ -130,6 +124,10 @@
# Create a new volume
nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
+if [[ $? != 0 ]]; then
+ echo "Failure creating volume $VOL_NAME"
+ exit 1
+fi
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
exit 1
@@ -137,34 +135,42 @@
# Get volume ID
VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1`
+die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
# Attach to server
DEVICE=/dev/vdb
-nova volume-attach $VM_UUID $VOL_ID $DEVICE
+nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
+ die "Failure attaching volume $VOL_NAME to $NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
echo "Volume $VOL_NAME not attached to $NAME"
exit 1
fi
VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1`
+die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
echo "Volume not attached to correct instance"
exit 1
fi
# Detach volume
-nova volume-detach $VM_UUID $VOL_ID
+nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not detached from $NAME"
exit 1
fi
# Delete volume
-nova volume-delete $VOL_ID
+nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then
echo "Volume $VOL_NAME not deleted"
exit 1
fi
# shutdown the server
-nova delete $NAME
+nova delete $NAME || die "Failure deleting instance $NAME"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/files/apts/glance b/files/apts/glance
index 71230c4..17c84ad 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,7 +1,7 @@
python-eventlet
python-routes
python-greenlet
-python-argparse
+python-argparse # dist:oneiric
python-sqlalchemy
python-wsgiref
python-pastedeploy
diff --git a/files/apts/horizon b/files/apts/horizon
index 1e0b0e6..b00d8c0 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -22,5 +22,4 @@
python-django-mailer
python-django-nose
python-django-registration
-python-cloudfiles
python-migrate
diff --git a/files/apts/keystone b/files/apts/keystone
index 94479c9..ce536bf 100644
--- a/files/apts/keystone
+++ b/files/apts/keystone
@@ -7,6 +7,7 @@
sqlite3
python-pysqlite2
python-sqlalchemy
+python-mysqldb
python-webob
python-greenlet
python-routes
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index b9b1844..31618ab 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -3,28 +3,34 @@
catalog.RegionOne.identity.publicURL = http://%SERVICE_HOST%:$(public_port)s/v2.0
catalog.RegionOne.identity.adminURL = http://%SERVICE_HOST%:$(admin_port)s/v2.0
catalog.RegionOne.identity.internalURL = http://%SERVICE_HOST%:$(public_port)s/v2.0
-catalog.RegionOne.identity.name = 'Identity Service'
+catalog.RegionOne.identity.name = Identity Service
catalog.RegionOne.compute.publicURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
catalog.RegionOne.compute.adminURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
catalog.RegionOne.compute.internalURL = http://%SERVICE_HOST%:8774/v2/$(tenant_id)s
-catalog.RegionOne.compute.name = 'Compute Service'
+catalog.RegionOne.compute.name = Compute Service
catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
-catalog.RegionOne.volume.name = 'Volume Service'
+catalog.RegionOne.volume.name = Volume Service
catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud
catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin
catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud
-catalog.RegionOne.ec2.name = 'EC2 Service'
+catalog.RegionOne.ec2.name = EC2 Service
+
+
+catalog.RegionOne.s3.publicURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
+catalog.RegionOne.s3.adminURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
+catalog.RegionOne.s3.internalURL = http://%SERVICE_HOST%:%S3_SERVICE_PORT%
+catalog.RegionOne.s3.name = S3 Service
catalog.RegionOne.image.publicURL = http://%SERVICE_HOST%:9292/v1
catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292/v1
catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292/v1
-catalog.RegionOne.image.name = 'Image Service'
+catalog.RegionOne.image.name = Image Service
diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini
index b8832ad..5cfd22f 100644
--- a/files/glance-api-paste.ini
+++ b/files/glance-api-paste.ini
@@ -1,7 +1,7 @@
[pipeline:glance-api]
#pipeline = versionnegotiation context apiv1app
# NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken auth-context apiv1app
+pipeline = versionnegotiation authtoken context apiv1app
# To enable Image Cache Management API replace pipeline with below:
# pipeline = versionnegotiation context imagecache apiv1app
@@ -30,15 +30,10 @@
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_host = %KEYSTONE_SERVICE_HOST%
-service_port = %KEYSTONE_SERVICE_PORT%
-service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
auth_host = %KEYSTONE_AUTH_HOST%
auth_port = %KEYSTONE_AUTH_PORT%
auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_token = %SERVICE_TOKEN%
-
-[filter:auth-context]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini
index f4130ec..b792aa8 100644
--- a/files/glance-registry-paste.ini
+++ b/files/glance-registry-paste.ini
@@ -1,7 +1,7 @@
[pipeline:glance-registry]
#pipeline = context registryapp
# NOTE: use the following pipeline for keystone
-pipeline = authtoken auth-context context registryapp
+pipeline = authtoken context registryapp
[app:registryapp]
paste.app_factory = glance.common.wsgi:app_factory
@@ -14,16 +14,10 @@
[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_host = %KEYSTONE_SERVICE_HOST%
-service_port = %KEYSTONE_SERVICE_PORT%
-service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
auth_host = %KEYSTONE_AUTH_HOST%
auth_port = %KEYSTONE_AUTH_PORT%
auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_token = %SERVICE_TOKEN%
-
-[filter:auth-context]
-context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
diff --git a/files/horizon_settings.py b/files/horizon_settings.py
index 2d1d1f8..1a6c17a 100644
--- a/files/horizon_settings.py
+++ b/files/horizon_settings.py
@@ -40,6 +40,12 @@
'user_home': 'openstack_dashboard.views.user_home',
}
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True
+}
+
OPENSTACK_HOST = "127.0.0.1"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
# FIXME: this is only needed until keystone fixes its GET /tenants call
diff --git a/files/keystone.conf b/files/keystone.conf
index 76c618a..1a924ed 100644
--- a/files/keystone.conf
+++ b/files/keystone.conf
@@ -1,4 +1,5 @@
[DEFAULT]
+bind_host = 0.0.0.0
public_port = 5000
admin_port = 35357
admin_token = %SERVICE_TOKEN%
@@ -34,7 +35,7 @@
driver = keystone.token.backends.kvs.Token
[policy]
-driver = keystone.policy.backends.simple.SimpleMatch
+driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
@@ -48,6 +49,9 @@
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
+[filter:xml_body]
+paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
+
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
@@ -67,10 +71,10 @@
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
-pipeline = token_auth admin_token_auth json_body debug ec2_extension s3_extension public_service
+pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
[pipeline:admin_api]
-pipeline = token_auth admin_token_auth json_body debug ec2_extension crud_extension admin_service
+pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
@@ -79,10 +83,10 @@
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
-pipeline = public_version_service
+pipeline = xml_body public_version_service
[pipeline:admin_version_api]
-pipeline = admin_version_service
+pipeline = xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 3f4841f..a49eb42 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -1,137 +1,118 @@
#!/bin/bash
-# Tenants
+#
+# Initial data for Keystone using python-keystoneclient
+#
+# Tenant User Roles
+# ------------------------------------------------------------------
+# admin admin admin
+# service glance admin
+# service nova admin, [ResellerAdmin (swift only)]
+# service quantum admin # if enabled
+# service swift admin # if enabled
+# demo admin admin
+# demo demo Member, anotherrole
+# invisible_to_admin demo Member
+#
+# Variables set before calling this script:
+# SERVICE_TOKEN - aka admin_token in keystone.conf
+# SERVICE_ENDPOINT - local Keystone admin endpoint
+# SERVICE_TENANT_NAME - name of tenant containing service accounts
+# ENABLED_SERVICES - stack.sh's list of services to start
+# DEVSTACK_DIR - Top-level DevStack directory
+
+ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
+SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
export SERVICE_TOKEN=$SERVICE_TOKEN
export SERVICE_ENDPOINT=$SERVICE_ENDPOINT
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
function get_id () {
- echo `$@ | grep ' id ' | awk '{print $4}'`
+ echo `$@ | awk '/ id / { print $4 }'`
}
-# Detect if the keystone cli binary has the command names changed
-# in https://review.openstack.org/4375
-# FIXME(dtroyer): Remove the keystone client command checking
-# after a suitable transition period. add-user-role
-# and ec2-create-credentials were renamed
-if keystone help | grep -q user-role-add; then
- KEYSTONE_COMMAND_4375=1
-fi
-
-ADMIN_TENANT=`get_id keystone tenant-create --name=admin`
-DEMO_TENANT=`get_id keystone tenant-create --name=demo`
-INVIS_TENANT=`get_id keystone tenant-create --name=invisible_to_admin`
+# Tenants
+ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
+SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
+DEMO_TENANT=$(get_id keystone tenant-create --name=demo)
+INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin)
# Users
-ADMIN_USER=`get_id keystone user-create \
- --name=admin \
- --pass="$ADMIN_PASSWORD" \
- --email=admin@example.com`
-DEMO_USER=`get_id keystone user-create \
- --name=demo \
- --pass="$ADMIN_PASSWORD" \
- --email=admin@example.com`
+ADMIN_USER=$(get_id keystone user-create --name=admin \
+ --pass="$ADMIN_PASSWORD" \
+ --email=admin@example.com)
+DEMO_USER=$(get_id keystone user-create --name=demo \
+ --pass="$ADMIN_PASSWORD" \
+ --email=demo@example.com)
+
# Roles
-ADMIN_ROLE=`get_id keystone role-create --name=admin`
-MEMBER_ROLE=`get_id keystone role-create --name=Member`
-KEYSTONEADMIN_ROLE=`get_id keystone role-create --name=KeystoneAdmin`
-KEYSTONESERVICE_ROLE=`get_id keystone role-create --name=KeystoneServiceAdmin`
-SYSADMIN_ROLE=`get_id keystone role-create --name=sysadmin`
-NETADMIN_ROLE=`get_id keystone role-create --name=netadmin`
+ADMIN_ROLE=$(get_id keystone role-create --name=admin)
+KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
+KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
+# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
+# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
+ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole)
-if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then
- # Add Roles to Users in Tenants
- keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT
- keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT
- keystone user-role-add --user $DEMO_USER --role $SYSADMIN_ROLE --tenant_id $DEMO_TENANT
- keystone user-role-add --user $DEMO_USER --role $NETADMIN_ROLE --tenant_id $DEMO_TENANT
- keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT
- keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT
+# Add Roles to Users in Tenants
+keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $ADMIN_TENANT
+keystone user-role-add --user $ADMIN_USER --role $ADMIN_ROLE --tenant_id $DEMO_TENANT
+keystone user-role-add --user $DEMO_USER --role $ANOTHER_ROLE --tenant_id $DEMO_TENANT
- # TODO(termie): these two might be dubious
- keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT
- keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT
-else
- ### compat
- # Add Roles to Users in Tenants
- keystone add-user-role $ADMIN_USER $ADMIN_ROLE $ADMIN_TENANT
- keystone add-user-role $DEMO_USER $MEMBER_ROLE $DEMO_TENANT
- keystone add-user-role $DEMO_USER $SYSADMIN_ROLE $DEMO_TENANT
- keystone add-user-role $DEMO_USER $NETADMIN_ROLE $DEMO_TENANT
- keystone add-user-role $DEMO_USER $MEMBER_ROLE $INVIS_TENANT
- keystone add-user-role $ADMIN_USER $ADMIN_ROLE $DEMO_TENANT
+# TODO(termie): these two might be dubious
+keystone user-role-add --user $ADMIN_USER --role $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT
+keystone user-role-add --user $ADMIN_USER --role $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT
- # TODO(termie): these two might be dubious
- keystone add-user-role $ADMIN_USER $KEYSTONEADMIN_ROLE $ADMIN_TENANT
- keystone add-user-role $ADMIN_USER $KEYSTONESERVICE_ROLE $ADMIN_TENANT
- ###
-fi
-# Services
-keystone service-create \
- --name=nova \
- --type=compute \
- --description="Nova Compute Service"
+# The Member role is used by Horizon and Swift so we need to keep it:
+MEMBER_ROLE=$(get_id keystone role-create --name=Member)
+keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $DEMO_TENANT
+keystone user-role-add --user $DEMO_USER --role $MEMBER_ROLE --tenant_id $INVIS_TENANT
-keystone service-create \
- --name=ec2 \
- --type=ec2 \
- --description="EC2 Compatibility Layer"
-keystone service-create \
- --name=glance \
- --type=image \
- --description="Glance Image Service"
+# Configure service users/roles
+NOVA_USER=$(get_id keystone user-create --name=nova \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=nova@example.com)
+keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user $NOVA_USER \
+ --role $ADMIN_ROLE
-keystone service-create \
- --name=keystone \
- --type=identity \
- --description="Keystone Identity Service"
-
-if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
- keystone service-create \
- --name="nova-volume" \
- --type=volume \
- --description="Nova Volume Service"
-fi
+GLANCE_USER=$(get_id keystone user-create --name=glance \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=glance@example.com)
+keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user $GLANCE_USER \
+ --role $ADMIN_ROLE
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
- keystone service-create \
- --name=swift \
- --type="object-store" \
- --description="Swift Service"
+ SWIFT_USER=$(get_id keystone user-create --name=swift \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=swift@example.com)
+ keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user $SWIFT_USER \
+ --role $ADMIN_ROLE
+ # Nova needs ResellerAdmin role to download images when accessing
+ # swift through the s3 api. The admin role in swift allows a user
+ # to act as an admin for their tenant, but ResellerAdmin is needed
+ # for a user to act as any tenant. The name of this role is also
+ # configurable in swift-proxy.conf
+ RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+ keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user $NOVA_USER \
+ --role $RESELLER_ROLE
fi
+
if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
- keystone service-create \
- --name=quantum \
- --type=network \
- --description="Quantum Service"
+ QUANTUM_USER=$(get_id keystone user-create --name=quantum \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=quantum@example.com)
+ keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user $QUANTUM_USER \
+ --role $ADMIN_ROLE
fi
-
-# create ec2 creds and parse the secret and access key returned
-if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then
- RESULT=`keystone ec2-credentials-create --tenant_id=$ADMIN_TENANT --user=$ADMIN_USER`
-else
- RESULT=`keystone ec2-create-credentials --tenant_id=$ADMIN_TENANT --user_id=$ADMIN_USER`
-fi
- echo `$@ | grep id | awk '{print $4}'`
-ADMIN_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
-ADMIN_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
-
-
-if [[ -n "$KEYSTONE_COMMAND_4375" ]]; then
- RESULT=`keystone ec2-credentials-create --tenant_id=$DEMO_TENANT --user=$DEMO_USER`
-else
- RESULT=`keystone ec2-create-credentials --tenant_id=$DEMO_TENANT --user_id=$DEMO_USER`
-fi
-DEMO_ACCESS=`echo "$RESULT" | grep access | awk '{print $4}'`
-DEMO_SECRET=`echo "$RESULT" | grep secret | awk '{print $4}'`
-
-# write the secret and access to ec2rc
-cat > $DEVSTACK_DIR/ec2rc <<EOF
-ADMIN_ACCESS=$ADMIN_ACCESS
-ADMIN_SECRET=$ADMIN_SECRET
-DEMO_ACCESS=$DEMO_ACCESS
-DEMO_SECRET=$DEMO_SECRET
-EOF
diff --git a/files/pips/horizon b/files/pips/horizon
index 44bf6db..f15602e 100644
--- a/files/pips/horizon
+++ b/files/pips/horizon
@@ -1,2 +1,3 @@
django-nose-selenium
pycrypto==2.3
+python-cloudfiles
diff --git a/files/sudo/nova b/files/sudo/nova
deleted file mode 100644
index 3231e2d..0000000
--- a/files/sudo/nova
+++ /dev/null
@@ -1,49 +0,0 @@
-Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
- /bin/chown /var/lib/nova/tmp/*/root/.ssh, \
- /bin/chown, \
- /bin/chmod, \
- /bin/dd, \
- /sbin/ifconfig, \
- /sbin/ip, \
- /sbin/route, \
- /sbin/iptables, \
- /sbin/iptables-save, \
- /sbin/iptables-restore, \
- /sbin/ip6tables-save, \
- /sbin/ip6tables-restore, \
- /sbin/kpartx, \
- /sbin/losetup, \
- /sbin/lvcreate, \
- /sbin/lvdisplay, \
- /sbin/lvremove, \
- /bin/mkdir, \
- /bin/mount, \
- /sbin/pvcreate, \
- /usr/bin/tee, \
- /sbin/tune2fs, \
- /bin/umount, \
- /sbin/vgcreate, \
- /usr/bin/virsh, \
- /usr/bin/qemu-nbd, \
- /usr/sbin/brctl, \
- /sbin/brctl, \
- /usr/sbin/radvd, \
- /usr/sbin/vblade-persist, \
- /sbin/pvcreate, \
- /sbin/aoe-discover, \
- /sbin/vgcreate, \
- /bin/aoe-stat, \
- /bin/kill, \
- /sbin/vconfig, \
- /usr/sbin/ietadm, \
- /sbin/vgs, \
- /sbin/iscsiadm, \
- /usr/bin/socat, \
- /sbin/parted, \
- /usr/sbin/dnsmasq, \
- /usr/bin/ovs-vsctl, \
- /usr/bin/ovs-ofctl, \
- /usr/sbin/arping
-
-%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS
-
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index d6db117..1627af0 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -19,6 +19,8 @@
paste.filter_factory = keystone.middleware.swift_auth:filter_factory
operator_roles = Member,admin
+# NOTE(chmou): s3token middleware is not updated yet to use only
+# username and password.
[filter:s3token]
paste.filter_factory = keystone.middleware.s3_token:filter_factory
service_port = %KEYSTONE_SERVICE_PORT%
@@ -29,16 +31,15 @@
auth_token = %SERVICE_TOKEN%
admin_token = %SERVICE_TOKEN%
-[filter:tokenauth]
+[filter:authtoken]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_port = %KEYSTONE_SERVICE_PORT%
-service_host = %KEYSTONE_SERVICE_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
auth_host = %KEYSTONE_AUTH_HOST%
+auth_port = %KEYSTONE_AUTH_PORT%
auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_token = %SERVICE_TOKEN%
-admin_token = %SERVICE_TOKEN%
-cache = swift.cache
+auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
+admin_tenant_name = %SERVICE_TENANT_NAME%
+admin_user = %SERVICE_USERNAME%
+admin_password = %SERVICE_PASSWORD%
[filter:swift3]
use = egg:swift#swift3
diff --git a/functions b/functions
index 01c4758..75c20d7 100644
--- a/functions
+++ b/functions
@@ -1,10 +1,14 @@
# functions - Common functions used by DevStack components
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
# apt-get wrapper to set arguments correctly
# apt_get package [package ...]
function apt_get() {
- [[ "$OFFLINE" = "True" ]] && return
+ [[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo DEBIAN_FRONTEND=noninteractive \
@@ -22,6 +26,50 @@
}
+# Prints "message" and exits
+# die "message"
+function die() {
+ local exitcode=$?
+ set +o xtrace
+ echo $@
+ exit $exitcode
+}
+
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message" and exits
+# NOTE: env-var is the variable name without a '$'
+# die_if_not_set env-var "message"
+function die_if_not_set() {
+ (
+ local exitcode=$?
+ set +o xtrace
+ local evar=$1; shift
+ if ! is_set $evar || [ $exitcode != 0 ]; then
+ set +o xtrace
+ echo $@
+ exit -1
+ fi
+ )
+}
+
+
+# Grab a numbered field from python prettytable output
+# Fields are numbered starting with 1
+# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
+# get_field field-number
+function get_field() {
+ while read data; do
+ if [ "$1" -lt 0 ]; then
+ field="(\$(NF$1))"
+ else
+ field="\$$(($1 + 1))"
+ fi
+ echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
+ done
+}
+
+
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
@@ -67,10 +115,44 @@
}
+# is_service_enabled() checks if the service(s) specified as arguments are
+# enabled by the user in **ENABLED_SERVICES**.
+#
+# If there are multiple services specified as arguments the test performs a
+# boolean OR or if any of the services specified on the command line
+# return true.
+#
+# There is a special cases for some 'catch-all' services::
+# **nova** returns true if any service enabled start with **n-**
+# **glance** returns true if any service enabled start with **g-**
+# **quantum** returns true if any service enabled start with **q-**
+function is_service_enabled() {
+ services=$@
+ for service in ${services}; do
+ [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
+ [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
+ [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+ done
+ return 1
+}
+
+
+# Test if the named environment variable is set and not zero length
+# is_set env-var
+function is_set() {
+ local var=\$"$1"
+ if eval "[ -z $var ]"; then
+ return 1
+ fi
+ return 0
+}
+
+
# pip install wrapper to set cache and proxy environment variables
# pip_install package [package ...]
function pip_install {
- [[ "$OFFLINE" = "True" ]] && return
+ [[ "$OFFLINE" = "True" || -z "$@" ]] && return
sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
@@ -89,3 +171,6 @@
[[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
}
+
+# Restore xtrace
+$XTRACE
diff --git a/openrc b/openrc
index 9b3d7ba..be7850b 100644
--- a/openrc
+++ b/openrc
@@ -1,7 +1,42 @@
#!/usr/bin/env bash
+#
+# source openrc [username] [tenantname]
+#
+# Configure a set of credentials for $TENANT/$USERNAME:
+# Set OS_TENANT_NAME to override the default tenant 'demo'
+# Set OS_USERNAME to override the default user name 'demo'
+# Set ADMIN_PASSWORD to set the password for 'admin' and 'demo'
+
+# NOTE: support for the old NOVA_* novaclient environment variables has
+# been removed.
+
+if [[ -n "$1" ]]; then
+ OS_USERNAME=$1
+fi
+if [[ -n "$2" ]]; then
+ OS_TENANT_NAME=$2
+fi
+
+# Find the other rc files
+RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
# Load local configuration
-source ./stackrc
+source $RC_DIR/stackrc
+
+# The introduction of Keystone to the OpenStack ecosystem has standardized the
+# term **tenant** as the entity that owns resources. In some places references
+# still exist to the original Nova term **project** for this use. Also,
+# **tenant_name** is prefered to **tenant_id**.
+export OS_TENANT_NAME=${OS_TENANT_NAME:-demo}
+
+# In addition to the owning entity (tenant), nova stores the entity performing
+# the action as the **user**.
+export OS_USERNAME=${OS_USERNAME:-demo}
+
+# With Keystone you pass the keystone password instead of an api key.
+# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs
+# or NOVA_PASSWORD.
+export OS_PASSWORD=${ADMIN_PASSWORD:-secrete}
# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint,
# which is convenient for some localrc configurations.
@@ -12,83 +47,21 @@
# should be listening on HOST_IP. If its running elsewhere, it can be set here
GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
-# novaclient now supports the new OS_* configuration variables in addition to
-# the older NOVA_* variables. Set them both for now...
-
-# Nova original used project_id as the *account* that owned resources (servers,
-# ip address, ...) With the addition of Keystone we have standardized on the
-# term **tenant** as the entity that owns the resources. **novaclient** still
-# uses the old deprecated terms project_id. Note that this field should now be
-# set to tenant_name, not tenant_id.
-export NOVA_PROJECT_ID=${TENANT:-demo}
-export OS_TENANT_NAME=${NOVA_PROJECT_ID}
-
-# In addition to the owning entity (tenant), nova stores the entity performing
-# the action as the **user**.
-export NOVA_USERNAME=${USERNAME:-demo}
-export OS_USERNAME=${NOVA_USERNAME}
-
-# With Keystone you pass the keystone password instead of an api key.
-# Recent versions of novaclient use NOVA_PASSWORD instead of NOVA_API_KEY
-# The most recent versions of novaclient use OS_PASSWORD in addition to NOVA_PASSWORD
-export NOVA_PASSWORD=${ADMIN_PASSWORD:-secrete}
-export OS_PASSWORD=${NOVA_PASSWORD}
-
-# With the addition of Keystone, to use an openstack cloud you should
-# authenticate against keystone, which returns a **Token** and **Service
-# Catalog**. The catalog contains the endpoint for all services the user/tenant
-# has access to - including nova, glance, keystone, swift, ... We currently
-# recommend using the 2.0 *auth api*.
+# Authenticating against an Openstack cloud using Keystone returns a **Token**
+# and **Service Catalog**. The catalog contains the endpoints for all services
+# the user/tenant has access to - including nova, glance, keystone, swift, ...
+# We currently recommend using the 2.0 *identity api*.
#
-# *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0. We
+# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We
# will use the 1.1 *compute api*
-export NOVA_URL=${NOVA_URL:-http://$SERVICE_HOST:5000/v2.0}
-export OS_AUTH_URL=${NOVA_URL}
+export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0
# Currently novaclient needs you to specify the *compute api* version. This
# needs to match the config of your catalog returned by Keystone.
export NOVA_VERSION=${NOVA_VERSION:-1.1}
-
-# FIXME - why does this need to be specified?
-export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne}
-
-# Set the ec2 url so euca2ools works
-export EC2_URL=${EC2_URL:-http://$SERVICE_HOST:8773/services/Cloud}
-
-# Access key is set in the initial keystone data to be the same as username
-export EC2_ACCESS_KEY=${DEMO_ACCESS}
-
-# Secret key is set in the initial keystone data to the admin password
-export EC2_SECRET_KEY=${DEMO_SECRET}
-
-# Euca2ools Certificate stuff for uploading bundles
-# See exercises/bundle.sh to see how to get certs using nova cli
-NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
- NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
-NOVA_KEY_DIR=${NOVARC%/*}
-export S3_URL=http://$SERVICE_HOST:3333
-export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
-export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem
-export EC2_CERT=${NOVA_KEY_DIR}/cert.pem
-export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem
-export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
-alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
-alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+# In the future this will change names:
+export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION}
# set log level to DEBUG (helps debug issues)
+# export KEYSTONECLIENT_DEBUG=1
# export NOVACLIENT_DEBUG=1
-
-# Max time till the vm is bootable
-export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
-
-# Max time to wait while vm goes from build to active state
-export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time from run instance command until it is running
-export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
-
-# Max time to wait for proper IP association and dis-association.
-export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
-
-# Max time to wait for a vm to terminate
-export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
diff --git a/samples/local.sh b/samples/local.sh
new file mode 100755
index 0000000..83637f9
--- /dev/null
+++ b/samples/local.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+
+# Sample ``local.sh`` for user-configurable tasks to run automatically
+# at the sucessful conclusion of ``stack.sh``.
+
+# NOTE: Copy this file to the root ``devstack`` directory for it to
+# work properly.
+
+# This is a collection of some of the things we have found to be useful to run
+# after stack.sh to tweak the OpenStack configuration that DevStack produces.
+# These should be considered as samples and are unsupported DevStack code.
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Use openrc + stackrc + localrc for settings
+source $TOP_DIR/stackrc
+
+# Destination path for installation ``DEST``
+DEST=${DEST:-/opt/stack}
+
+
+# Import ssh keys
+# ---------------
+
+# Import keys from the current user into the default OpenStack user (usually
+# ``demo``)
+
+# Get OpenStack auth
+source $TOP_DIR/openrc
+
+# Add first keypair found in localhost:$HOME/.ssh
+for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
+ if [[ -f $i ]]; then
+ nova keypair-add --pub_key=$i `hostname`
+ break
+ fi
+done
+
+
+# Create A Flavor
+# ---------------
+
+# Get OpenStack admin auth
+source $TOP_DIR/openrc admin admin
+
+# Name of new flavor
+# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro``
+MI_NAME=m1.micro
+
+# Create micro flavor if not present
+if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
+ nova flavor-create $MI_NAME 6 128 0 1
+fi
+# Other Uses
+# ----------
+
+# Add tcp/22 to default security group
+
diff --git a/samples/localrc b/samples/localrc
new file mode 100644
index 0000000..4fb093d
--- /dev/null
+++ b/samples/localrc
@@ -0,0 +1,77 @@
+# Sample ``localrc`` for user-configurable variables in ``stack.sh``
+
+# NOTE: Copy this file to the root ``devstack`` directory for it to work properly.
+
+# ``localrc`` is a user-maintained setings file that is sourced at the end of
+# ``stackrc``. This gives it the ability to override any variables set in ``stackrc``.
+# Also, most of the settings in ``stack.sh`` are written to only be set if no
+# value has already been set; this lets ``localrc`` effectively override the
+# default values.
+
+# This is a collection of some of the settings we have found to be useful
+# in our DevStack development environments. Additional settings are described
+# in http://devstack.org/localrc.html
+# These should be considered as samples and are unsupported DevStack code.
+
+
+# Minimal Contents
+# ----------------
+
+# While ``stack.sh`` is happy to run without ``localrc``, devlife is better when
+# there are a few minimal variables set:
+
+# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
+# values for them by ``stack.sh``.
+ADMIN_PASSWORD=nomoresecrete
+MYSQL_PASSWORD=stackdb
+RABBIT_PASSWORD=stackqueue
+SERVICE_PASSWORD=$ADMIN_PASSWORD
+
+# HOST_IP should be set manually for best results. It is auto-detected during the
+# first run of ``stack.sh`` but often is indeterminate on later runs due to the IP
+# being moved from an Ethernet interface to a bridge on the host. Setting it here
+# also makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``.
+# ``HOST_IP`` is not set by default.
+HOST_IP=w.x.y.z
+
+
+# Set DevStack Install Directory
+# ------------------------------
+
+# The DevStack install directory is set by the ``DEST`` variable. By setting it
+# early in ``localrc`` you can reference it in later variables. The default value
+# is ``/opt/stack``. It can be useful to set it even though it is not changed from
+# the default value.
+DEST=/opt/stack
+
+
+# Using milestone-proposed branches
+# ---------------------------------
+
+# Uncomment these to grab the milestone-proposed branches from the repos:
+#GLANCE_BRANCH=milestone-proposed
+#HORIZON_BRANCH=milestone-proposed
+#KEYSTONE_BRANCH=milestone-proposed
+#KEYSTONECLIENT_BRANCH=milestone-proposed
+#NOVA_BRANCH=milestone-proposed
+#NOVACLIENT_BRANCH=milestone-proposed
+#SWIFT_BRANCH=milestone-proposed
+
+
+# Swift
+# -----
+
+# Swift is now used as the back-end for the S3-like object store. If Nova's
+# objectstore (``n-obj`` in ``ENABLED_SERVICES``) is enabled, it will NOT
+# run if Swift is enabled. Setting the hash value is required and you will
+# be prompted for it if Swift is enabled so just set it to something already:
+SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5
+
+# For development purposes the default of 3 replicas is usually not required.
+# Set this to 1 to save some resources:
+SWIFT_REPLICAS=1
+
+# The data for Swift is stored in the source tree by default (``$DEST/swift/data``)
+# and can be moved by setting ``SWIFT_DATA_DIR``. The directory will be created
+# if it does not exist.
+SWIFT_DATA_DIR=$DEST/data
diff --git a/stack.sh b/stack.sh
index eea283f..ae4ee87 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,8 +1,9 @@
#!/usr/bin/env bash
-# **stack.sh** is an opinionated openstack developer installation.
+# **stack.sh** is an opinionated OpenStack developer installation.
-# This script installs and configures *nova*, *glance*, *horizon* and *keystone*
+# This script installs and configures various combinations of *Glance*,
+# *Horizon*, *Keystone*, *Melange*, *Nova*, *Quantum* and *Swift*
# This script allows you to specify configuration options of what git
# repositories to use, enabled services, network configuration and various
@@ -17,6 +18,7 @@
# Learn more and get the most recent version at http://devstack.org
+
# Sanity Check
# ============
@@ -24,7 +26,7 @@
# installation with ``FORCE=yes ./stack``
DISTRO=$(lsb_release -c -s)
-if [[ ! ${DISTRO} =~ (oneiric) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise) ]]; then
echo "WARNING: this script has only been tested on oneiric"
if [[ "$FORCE" != "yes" ]]; then
echo "If you wish to run this script anyway run with FORCE=yes"
@@ -49,19 +51,18 @@
fi
-
# Settings
# ========
-# This script is customizable through setting environment variables. If you
-# want to override a setting you can either::
+# ``stack.sh`` is customizable through setting environment variables. If you
+# want to override a setting you can set and export it::
#
# export MYSQL_PASSWORD=anothersecret
# ./stack.sh
#
# You can also pass options on a single line ``MYSQL_PASSWORD=simple ./stack.sh``
#
-# Additionally, you can put any local variables into a ``localrc`` file, like::
+# Additionally, you can put any local variables into a ``localrc`` file::
#
# MYSQL_PASSWORD=anothersecret
# MYSQL_USER=hellaroot
@@ -69,21 +70,17 @@
# We try to have sensible defaults, so you should be able to run ``./stack.sh``
# in most cases.
#
+# DevStack distributes ``stackrc`` which contains locations for the OpenStack
+# repositories and branches to configure. ``stackrc`` sources ``localrc`` to
+# allow you to override those settings and not have your changes overwritten
+# when updating DevStack.
+
# We support HTTP and HTTPS proxy servers via the usual environment variables
-# http_proxy and https_proxy. They can be set in localrc if necessary or
+# **http_proxy** and **https_proxy**. They can be set in ``localrc`` if necessary or
# on the command line::
#
# http_proxy=http://proxy.example.com:3128/ ./stack.sh
-#
-# We source our settings from ``stackrc``. This file is distributed with devstack
-# and contains locations for what repositories to use. If you want to use other
-# repositories and branches, you can add your own settings with another file called
-# ``localrc``
-#
-# If ``localrc`` exists, then ``stackrc`` will load those settings. This is
-# useful for changing a branch or repository to test other versions. Also you
-# can store your other settings like **MYSQL_PASSWORD** or **ADMIN_PASSWORD** instead
-# of letting devstack generate random ones for you.
+
source ./stackrc
# Destination path for installation ``DEST``
@@ -99,7 +96,7 @@
# OpenStack is designed to be run as a regular user (Horizon will fail to run
# as root, since apache refused to startup serve content from root user). If
-# stack.sh is run as root, it automatically creates a stack user with
+# ``stack.sh`` is run as **root**, it automatically creates a **stack** user with
# sudo privileges and runs as that user.
if [[ $EUID -eq 0 ]]; then
@@ -135,22 +132,34 @@
fi
exit 1
else
- # Our user needs passwordless priviledges for certain commands which nova
- # uses internally.
- # Natty uec images sudoers does not have a '#includedir'. add one.
+ # We're not root, make sure sudo is available
+ dpkg -l sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo."
+
+ # UEC images /etc/sudoers does not have a '#includedir'. add one.
sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers
+
+ # Set up devstack sudoers
TEMPFILE=`mktemp`
- cat $FILES/sudo/nova > $TEMPFILE
- sed -e "s,%USER%,$USER,g" -i $TEMPFILE
+ echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova
+ sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
+
+ # Set up the rootwrap sudoers
+ TEMPFILE=`mktemp`
+ echo "$USER ALL=(root) NOPASSWD: /usr/local/bin/nova-rootwrap" >$TEMPFILE
+ chmod 0440 $TEMPFILE
+ sudo chown root:root $TEMPFILE
+ sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
+
+ # Remove old file
+ sudo rm -f /etc/sudoers.d/stack_sh_nova
fi
-# Set True to configure stack.sh to run cleanly without Internet access.
-# stack.sh must have been previously run with Internet access to install
-# prerequisites and initialize $DEST.
+# Set True to configure ``stack.sh`` to run cleanly without Internet access.
+# ``stack.sh`` must have been previously run with Internet access to install
+# prerequisites and initialize ``$DEST``.
OFFLINE=`trueorfalse False $OFFLINE`
# Set the destination directories for openstack projects
@@ -179,25 +188,22 @@
# Default Melange Host
M_HOST=${M_HOST:-localhost}
# Melange MAC Address Range
-M_MAC_RANGE=${M_MAC_RANGE:-404040/24}
-
-# Specify which services to launch. These generally correspond to screen tabs
-ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit}
+M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
# Name of the lvm volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
-# Nova hypervisor configuration. We default to libvirt whth **kvm** but will
-# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can
+# Nova hypervisor configuration. We default to libvirt with **kvm** but will
+# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC** based system.
VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
-# nova supports pluggable schedulers. ``SimpleScheduler`` should work in most
-# cases unless you are working on multi-zone mode.
-SCHEDULER=${SCHEDULER:-nova.scheduler.simple.SimpleScheduler}
+# Nova supports pluggable schedulers. ``FilterScheduler`` should work in most
+# cases.
+SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
HOST_IP_IFACE=${HOST_IP_IFACE:-eth0}
# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable
@@ -213,7 +219,7 @@
# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
-# Configure services to syslog instead of writing to individual log files
+# Configure services to use syslog instead of writing to individual log files
SYSLOG=`trueorfalse False $SYSLOG`
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
SYSLOG_PORT=${SYSLOG_PORT:-516}
@@ -262,49 +268,39 @@
set -o xtrace
}
-# This function will check if the service(s) specified in argument is
-# enabled by the user in ENABLED_SERVICES.
-#
-# If there is multiple services specified as argument it will act as a
-# boolean OR or if any of the services specified on the command line
-# return true.
-#
-# There is a special cases for some 'catch-all' services :
-# nova would catch if any service enabled start by n-
-# glance would catch if any service enabled start by g-
-# quantum would catch if any service enabled start by q-
-function is_service_enabled() {
- services=$@
- for service in ${services}; do
- [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
- [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
- [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
- [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
- done
- return 1
-}
-
# Nova Network Configuration
# --------------------------
-# FIXME: more documentation about why these are important flags. Also
-# we should make sure we use the same variable names as the flag names.
+# FIXME: more documentation about why these are important options. Also
+# we should make sure we use the same variable names as the option names.
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ PUBLIC_INTERFACE_DEFAULT=eth3
+ # allow build_domU.sh to specify the flat network bridge via kernel args
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[^.]*' /proc/cmdline | cut -d= -f 2)
+ GUEST_INTERFACE_DEFAULT=eth1
+else
+ PUBLIC_INTERFACE_DEFAULT=br100
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+ GUEST_INTERFACE_DEFAULT=eth0
+fi
+
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100}
FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
NET_MAN=${NET_MAN:-FlatDHCPManager}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100}
-VLAN_INTERFACE=${VLAN_INTERFACE:-eth0}
+FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
+VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
# Test floating pool and range are used for testing. They are defined
# here until the admin APIs can replace nova-manage
TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-# Multi-host is a mode where each compute node runs its own network node. This
+# **MULTI_HOST** is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=${MULTI_HOST:-False}
@@ -323,7 +319,7 @@
# devices other than that node, you can set the flat interface to the same
# value as ``FLAT_NETWORK_BRIDGE``. This will stop the network hiccup from
# occurring.
-FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
@@ -357,12 +353,12 @@
# By default this script will install and configure MySQL. If you want to
# use an existing server, you can pass in the user/password/host parameters.
# You will need to send the same ``MYSQL_PASSWORD`` to every host if you are doing
-# a multi-node devstack installation.
+# a multi-node DevStack installation.
MYSQL_HOST=${MYSQL_HOST:-localhost}
MYSQL_USER=${MYSQL_USER:-root}
read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL."
-# don't specify /db in this string, so we can use it for multiple services
+# NOTE: Don't specify /db in this string so we can use it for multiple services
BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST}
# Rabbit connection info
@@ -372,6 +368,7 @@
# Glance connection info. Note the port must be specified.
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
+
# SWIFT
# -----
# TODO: implement glance support
@@ -408,23 +405,35 @@
# only some quick testing.
SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
-# We only ask for Swift Hash if we have enabled swift service.
if is_service_enabled swift; then
+ # If we are using swift, we can default the s3 port to swift instead
+ # of nova-objectstore
+ S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+ # We only ask for Swift Hash if we have enabled swift service.
# SWIFT_HASH is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
+# Set default port for nova-objectstore
+S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
+
# Keystone
# --------
# Service Token - Openstack components need to have an admin token
# to validate user tokens.
read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+# Services authenticate to Identity with servicename/SERVICE_PASSWORD
+read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
# Horizon currently truncates usernames and passwords at 20 characters
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
+# Set the tenant for service accounts in Keystone
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
+
# Set Keystone interface configuration
+KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
@@ -432,6 +441,7 @@
KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
+
# Horizon
# -------
@@ -440,6 +450,7 @@
APACHE_USER=${APACHE_USER:-$USER}
APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER}
+
# Log files
# ---------
@@ -447,17 +458,22 @@
# Set LOGFILE to turn on logging
# We append '.xxxxxxxx' to the given name to maintain history
# where xxxxxxxx is a representation of the date the file was created
+if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then
+ LOGDAYS=${LOGDAYS:-7}
+ TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+ CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
+fi
+
if [[ -n "$LOGFILE" ]]; then
# First clean up old log files. Use the user-specified LOGFILE
# as the template to search for, appending '.*' to match the date
# we added on earlier runs.
- LOGDAYS=${LOGDAYS:-7}
LOGDIR=$(dirname "$LOGFILE")
LOGNAME=$(basename "$LOGFILE")
+ mkdir -p $LOGDIR
find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \;
- TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
- LOGFILE=$LOGFILE.$(date "+$TIMESTAMP_FORMAT")
+ LOGFILE=$LOGFILE.${CURRENT_LOG_TIME}
# Redirect stdout/stderr to tee to write the log file
exec 1> >( tee "${LOGFILE}" ) 2>&1
echo "stack.sh log $LOGFILE"
@@ -465,6 +481,23 @@
ln -sf $LOGFILE $LOGDIR/$LOGNAME
fi
+# Set up logging of screen windows
+# Set SCREEN_LOGDIR to turn on logging of screen windows to the
+# directory specified in SCREEN_LOGDIR, we will log to the the file
+# screen-$SERVICE_NAME-$TIMESTAMP.log in that dir and have a link
+# screen-$SERVICE_NAME.log to the latest log file.
+# Logs are kept for as long specified in LOGDAYS.
+if [[ -n "$SCREEN_LOGDIR" ]]; then
+
+ # We make sure the directory is created.
+ if [[ -d "$SCREEN_LOGDIR" ]]; then
+ # We cleanup the old logs
+ find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \;
+ else
+ mkdir -p $SCREEN_LOGDIR
+ fi
+fi
+
# So that errors don't compound we exit on any errors so you see only the
# first error that occurred.
trap failed ERR
@@ -485,25 +518,32 @@
sudo chown `whoami` $DEST
fi
+
# Install Packages
# ================
#
# Openstack uses a fair number of other projects.
-# - We are going to install packages only for the services needed.
-# - We are parsing the packages files and detecting metadatas.
-# - If there is a NOPRIME as comment mean we are not doing the install
-# just yet.
-# - If we have the meta-keyword dist:DISTRO or
-# dist:DISTRO1,DISTRO2 it will be installed only for those
-# distros (case insensitive).
+# get_packages() collects a list of package names of any type from the
+# prerequisite files in ``files/{apts|pips}``. The list is intended
+# to be passed to a package installer such as apt or pip.
+#
+# Only packages required for the services in ENABLED_SERVICES will be
+# included. Two bits of metadata are recognized in the prerequisite files:
+# - ``# NOPRIME`` defers installation to be performed later in stack.sh
+# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
+# of the package to the distros listed. The distro names are case insensitive.
function get_packages() {
- local file_to_parse="general"
+ local package_dir=$1
+ local file_to_parse
local service
- for service in ${ENABLED_SERVICES//,/ }; do
- # Allow individual services to specify dependencies
- if [[ -e $FILES/apts/${service} ]]; then
+ if [[ -z "$package_dir" ]]; then
+ echo "No package directory supplied"
+ return 1
+ fi
+ for service in general ${ENABLED_SERVICES//,/ }; do # Allow individual services to specify dependencies
+ if [[ -e ${package_dir}/${service} ]]; then
file_to_parse="${file_to_parse} $service"
fi
if [[ $service == n-* ]]; then
@@ -522,9 +562,9 @@
done
for file in ${file_to_parse}; do
- local fname=${FILES}/apts/${file}
+ local fname=${package_dir}/${file}
local OIFS line package distros distro
- [[ -e $fname ]] || { echo "missing: $fname"; exit 1 ;}
+ [[ -e $fname ]] || continue
OIFS=$IFS
IFS=$'\n'
@@ -550,10 +590,10 @@
# install apt requirements
apt_get update
-apt_get install $(get_packages)
+apt_get install $(get_packages $FILES/apts)
# install python requirements
-pip_install `cat $FILES/pips/* | uniq`
+pip_install $(get_packages $FILES/pips | sort -u)
# compute service
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
@@ -582,14 +622,13 @@
# django powered web control panel for openstack
git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
fi
+if is_service_enabled quantum; then
+ git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH
+fi
if is_service_enabled q-svc; then
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
fi
-if is_service_enabled q-svc horizon; then
- git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH
-fi
-
if is_service_enabled m-svc; then
# melange
git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
@@ -599,10 +638,10 @@
git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
fi
+
# Initialization
# ==============
-
# setup our checkouts so they are installed into python path
# allowing ``import nova`` or ``import glance.client``
cd $KEYSTONECLIENT_DIR; sudo python setup.py develop
@@ -620,12 +659,12 @@
if is_service_enabled horizon; then
cd $HORIZON_DIR; sudo python setup.py develop
fi
+if is_service_enabled quantum; then
+ cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop
+fi
if is_service_enabled q-svc; then
cd $QUANTUM_DIR; sudo python setup.py develop
fi
-if is_service_enabled q-svc horizon; then
- cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop
-fi
if is_service_enabled m-svc; then
cd $MELANGE_DIR; sudo python setup.py develop
fi
@@ -633,8 +672,9 @@
cd $MELANGECLIENT_DIR; sudo python setup.py develop
fi
+
# Syslog
-# ---------
+# ------
if [[ $SYSLOG != "False" ]]; then
apt_get install -y rsyslog-relp
@@ -655,8 +695,9 @@
sudo /usr/sbin/service rsyslog restart
fi
+
# Rabbit
-# ---------
+# ------
if is_service_enabled rabbit; then
# Install and start rabbitmq-server
@@ -669,8 +710,9 @@
sudo rabbitmqctl change_password guest $RABBIT_PASSWORD
fi
+
# Mysql
-# ---------
+# -----
if is_service_enabled mysql; then
@@ -705,9 +747,54 @@
sudo service mysql restart
fi
+# Our screenrc file builder
+function screen_rc {
+ SCREENRC=$TOP_DIR/stack-screenrc
+ if [[ ! -e $SCREENRC ]]; then
+ # Name the screen session
+ echo "sessionname stack" > $SCREENRC
+ # Set a reasonable statusbar
+ echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC
+ echo "screen -t stack bash" >> $SCREENRC
+ fi
+ # If this service doesn't already exist in the screenrc file
+ if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+ NL=`echo -ne '\015'`
+ echo "screen -t $1 bash" >> $SCREENRC
+ echo "stuff \"$2$NL\"" >> $SCREENRC
+ fi
+}
+
+# Our screen helper to launch a service in a hidden named screen
+function screen_it {
+ NL=`echo -ne '\015'`
+ if is_service_enabled $1; then
+ # Append the service to the screen rc file
+ screen_rc "$1" "$2"
+
+ screen -S stack -X screen -t $1
+ # sleep to allow bash to be ready to be send the command - we are
+ # creating a new window in screen and then sends characters, so if
+ # bash isn't running by the time we send the command, nothing happens
+ sleep 1.5
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+ screen -S stack -p $1 -X log on
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+ fi
+ screen -S stack -p $1 -X stuff "$2$NL"
+ fi
+}
+
+# create a new named screen to run processes in
+screen -d -m -S stack -t stack -s /bin/bash
+sleep 1
+# set a reasonable statusbar
+screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
# Horizon
-# ---------
+# -------
# Setup the django horizon application to serve via apache/wsgi
@@ -716,9 +803,6 @@
# Install apache2, which is NOPRIME'd
apt_get install apache2 libapache2-mod-wsgi
- # Link to quantum client directory.
- rm -fr ${HORIZON_DIR}/openstack_dashboard/quantum
- ln -s ${QUANTUM_CLIENT_DIR}/quantum ${HORIZON_DIR}/openstack_dashboard/quantum
# Remove stale session database.
rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3
@@ -727,11 +811,6 @@
local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
cp $FILES/horizon_settings.py $local_settings
- # Enable quantum in dashboard, if requested
- if is_service_enabled quantum; then
- sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings
- fi
-
# Initialize the horizon database (it stores sessions and notices shown to
# users). The user system is external (keystone).
cd $HORIZON_DIR
@@ -755,6 +834,11 @@
# ------
if is_service_enabled g-reg; then
+ GLANCE_CONF_DIR=/etc/glance
+ if [[ ! -d $GLANCE_CONF_DIR ]]; then
+ sudo mkdir -p $GLANCE_CONF_DIR
+ fi
+ sudo chown `whoami` $GLANCE_CONF_DIR
GLANCE_IMAGE_DIR=$DEST/glance/images
# Delete existing images
rm -rf $GLANCE_IMAGE_DIR
@@ -764,17 +848,21 @@
# (re)create glance database
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;'
function glance_config {
sudo sed -e "
+ s,%KEYSTONE_API_PORT%,$KEYSTONE_API_PORT,g;
s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g;
s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
- s,%SQL_CONN%,$BASE_SQL_CONN/glance,g;
+ s,%SQL_CONN%,$BASE_SQL_CONN/glance?charset=utf8,g;
+ s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+ s,%SERVICE_USERNAME%,glance,g;
+ s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
s,%DEST%,$DEST,g;
s,%SYSLOG%,$SYSLOG,g;
@@ -782,61 +870,137 @@
}
# Copy over our glance configurations and update them
- GLANCE_REGISTRY_CONF=$GLANCE_DIR/etc/glance-registry.conf
+ GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF
glance_config $GLANCE_REGISTRY_CONF
if [[ -e $FILES/glance-registry-paste.ini ]]; then
- GLANCE_REGISTRY_PASTE_INI=$GLANCE_DIR/etc/glance-registry-paste.ini
+ GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
glance_config $GLANCE_REGISTRY_PASTE_INI
fi
- GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf
+ GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
cp $FILES/glance-api.conf $GLANCE_API_CONF
glance_config $GLANCE_API_CONF
if [[ -e $FILES/glance-api-paste.ini ]]; then
- GLANCE_API_PASTE_INI=$GLANCE_DIR/etc/glance-api-paste.ini
+ GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI
glance_config $GLANCE_API_PASTE_INI
fi
fi
+# Quantum
+# -------
+
+# Quantum service
+if is_service_enabled q-svc; then
+ QUANTUM_CONF_DIR=/etc/quantum
+ if [[ ! -d $QUANTUM_CONF_DIR ]]; then
+ sudo mkdir -p $QUANTUM_CONF_DIR
+ fi
+ sudo chown `whoami` $QUANTUM_CONF_DIR
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ # Install deps
+ # FIXME add to files/apts/quantum, but don't install if not needed!
+ kernel_version=`cat /proc/version | cut -d " " -f3`
+ apt_get install linux-headers-$kernel_version
+ apt_get install openvswitch-switch openvswitch-datapath-dkms
+ # Create database for the plugin/agent
+ if is_service_enabled mysql; then
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;'
+ else
+ echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+ exit 1
+ fi
+ QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini
+ sudo cp $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE
+ # Make sure we're using the openvswitch plugin
+ sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
+ fi
+ sudo cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf
+ screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf"
+fi
+
+# Quantum agent (for compute nodes)
+if is_service_enabled q-agt; then
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ # Set up integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
+ sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
+ sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
+
+ # Start up the quantum <-> openvswitch agent
+ QUANTUM_OVS_CONFIG_FILE=$QUANTUM_CONF_DIR/ovs_quantum_plugin.ini
+ sudo cp $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE
+ sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE
+ screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
+ fi
+
+fi
+
+# Melange service
+if is_service_enabled m-svc; then
+ if is_service_enabled mysql; then
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;'
+ else
+ echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
+ exit 1
+ fi
+ MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
+ cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
+ sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE
+ cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
+ screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
+ echo "Waiting for melange to start..."
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
+ echo "melange-server did not start"
+ exit 1
+ fi
+ melange mac_address_range create cidr=$M_MAC_RANGE
+fi
+
+
+
# Nova
# ----
# Put config files in /etc/nova for everyone to find
-NOVA_CONF=/etc/nova
-if [[ ! -d $NOVA_CONF ]]; then
- sudo mkdir -p $NOVA_CONF
+NOVA_CONF_DIR=/etc/nova
+if [[ ! -d $NOVA_CONF_DIR ]]; then
+ sudo mkdir -p $NOVA_CONF_DIR
fi
-sudo chown `whoami` $NOVA_CONF
+sudo chown `whoami` $NOVA_CONF_DIR
if is_service_enabled n-api; then
- # We are going to use a sample http middleware configuration based on the
- # one from the keystone project to launch nova. This paste config adds
- # the configuration required for nova to validate keystone tokens.
+ # Use the sample http middleware configuration supplied in the
+ # Nova sources. This paste config adds the configuration required
+ # for Nova to validate Keystone tokens.
- # Remove legacy paste config
+ # Allow rate limiting to be turned off for testing, like for Tempest
+ # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
+ API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
+
+ # Remove legacy paste config if present
rm -f $NOVA_DIR/bin/nova-api-paste.ini
- # First we add a some extra data to the default paste config from nova
- cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF
+ # Get the sample configuration file in place
+ cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR
- # Then we add our own service token to the configuration
- sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_CONF/api-paste.ini
-
- # Finally, we change the pipelines in nova to use keystone
- function replace_pipeline() {
- sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_CONF/api-paste.ini
- }
- replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor"
- replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor"
- # allow people to turn off rate limiting for testing, like when using tempest, by setting OSAPI_RATE_LIMIT=" "
- OSAPI_RATE_LIMIT=${OSAPI_RATE_LIMIT:-"ratelimit"}
- replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_compute_app_v2"
- replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext $OSAPI_RATE_LIMIT osapi_volume_app_v1"
+ # Rewrite the authtoken configration for our Keystone service.
+ # This is a bit defensive to allow the sample file some varaince.
+ sed -e "
+ /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME
+ /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/;
+ /admin_user/s/^.*$/admin_user = nova/;
+ /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/;
+ s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+ s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
+ " -i $NOVA_CONF_DIR/api-paste.ini
fi
# Helper to clean iptables rules
@@ -857,6 +1021,9 @@
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
apt_get install libvirt-bin
+ # Force IP forwarding on, just on case
+ sudo sysctl -w net.ipv4.ip_forward=1
+
# attempt to load modules: network block device - used to manage qcow images
sudo modprobe nbd || true
@@ -918,10 +1085,10 @@
clean_iptables
# Destroy old instances
- instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+ instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
- echo $instances | xargs -n1 virsh destroy || true
- echo $instances | xargs -n1 virsh undefine || true
+ echo $instances | xargs -n1 sudo virsh destroy || true
+ echo $instances | xargs -n1 sudo virsh undefine || true
fi
# Logout and delete iscsi sessions
@@ -938,10 +1105,16 @@
clean_iptables
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
+
+ # Force IP forwarding on, just on case
+ sudo sysctl -w net.ipv4.ip_forward=1
fi
# Storage Service
if is_service_enabled swift; then
+ # Install memcached for swift.
+ apt_get install memcached
+
# We first do a bit of setup by creating the directories and
# changing the permissions so we can run it as our user.
@@ -1004,23 +1177,29 @@
# which has some default username and password if you have
# configured keystone it will checkout the directory.
if is_service_enabled key; then
- swift_auth_server="s3token tokenauth keystone"
+ swift_auth_server="s3token authtoken keystone"
else
swift_auth_server=tempauth
fi
# We do the install of the proxy-server and swift configuration
# replacing a few directives to match our configuration.
- sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g;
- s,%USER%,$USER,g;
- s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g;
- s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g;
- s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g;
- s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g;
- s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g;
- s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
- s/%AUTH_SERVER%/${swift_auth_server}/g;" \
- $FILES/swift/proxy-server.conf | \
+ sed -e "
+ s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},g;
+ s,%USER%,$USER,g;
+ s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
+ s,%SERVICE_USERNAME%,swift,g;
+ s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
+ s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
+ s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g;
+ s,%KEYSTONE_SERVICE_PORT%,${KEYSTONE_SERVICE_PORT},g;
+ s,%KEYSTONE_SERVICE_HOST%,${KEYSTONE_SERVICE_HOST},g;
+ s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g;
+ s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g;
+ s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g;
+ s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
+ s/%AUTH_SERVER%/${swift_auth_server}/g;
+ " $FILES/swift/proxy-server.conf | \
sudo tee ${SWIFT_CONFIG_LOCATION}/proxy-server.conf
sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_LOCATION}/swift.conf
@@ -1094,13 +1273,17 @@
# We then can start rsync.
sudo /etc/init.d/rsync restart || :
- # TODO: Bring some services in foreground.
- # Launch all services.
- swift-init all start
+ # First spawn all the swift services then kill the
+ # proxy service so we can run it in foreground in screen.
+ # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
+ # ignore it just in case
+ swift-init all restart || true
+ swift-init proxy stop || true
unset s swift_hash swift_auth_server
fi
+
# Volume Service
# --------------
@@ -1144,63 +1327,70 @@
sudo start tgt
fi
-function add_nova_flag {
- echo "$1" >> $NOVA_CONF/nova.conf
+NOVA_CONF=nova.conf
+function add_nova_opt {
+ echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
}
# remove legacy nova.conf
rm -f $NOVA_DIR/bin/nova.conf
# (re)create nova.conf
-rm -f $NOVA_CONF/nova.conf
-add_nova_flag "--verbose"
-add_nova_flag "--allow_admin_api"
-add_nova_flag "--scheduler_driver=$SCHEDULER"
-add_nova_flag "--dhcpbridge_flagfile=$NOVA_CONF/nova.conf"
-add_nova_flag "--fixed_range=$FIXED_RANGE"
-if is_service_enabled n-obj; then
- add_nova_flag "--s3_host=$SERVICE_HOST"
-fi
+rm -f $NOVA_CONF_DIR/$NOVA_CONF
+add_nova_opt "[DEFAULT]"
+add_nova_opt "verbose=True"
+add_nova_opt "auth_strategy=keystone"
+add_nova_opt "allow_resize_to_same_host=True"
+add_nova_opt "root_helper=sudo /usr/local/bin/nova-rootwrap"
+add_nova_opt "compute_scheduler_driver=$SCHEDULER"
+add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF"
+add_nova_opt "fixed_range=$FIXED_RANGE"
+add_nova_opt "s3_host=$SERVICE_HOST"
+add_nova_opt "s3_port=$S3_SERVICE_PORT"
if is_service_enabled quantum; then
- add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
- add_nova_flag "--quantum_connection_host=$Q_HOST"
- add_nova_flag "--quantum_connection_port=$Q_PORT"
+ add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager"
+ add_nova_opt "quantum_connection_host=$Q_HOST"
+ add_nova_opt "quantum_connection_port=$Q_PORT"
if is_service_enabled melange; then
- add_nova_flag "--quantum_ipam_lib=nova.network.quantum.melange_ipam_lib"
- add_nova_flag "--use_melange_mac_generation"
- add_nova_flag "--melange_host=$M_HOST"
- add_nova_flag "--melange_port=$M_PORT"
+ add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib"
+ add_nova_opt "use_melange_mac_generation=True"
+ add_nova_opt "melange_host=$M_HOST"
+ add_nova_opt "melange_port=$M_PORT"
fi
if is_service_enabled q-svc && [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- add_nova_flag "--libvirt_vif_type=ethernet"
- add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
- add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
- add_nova_flag "--quantum_use_dhcp"
+ add_nova_opt "libvirt_vif_type=ethernet"
+ add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
+ add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
+ add_nova_opt "quantum_use_dhcp=True"
fi
else
- add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
+ add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
fi
if is_service_enabled n-vol; then
- add_nova_flag "--volume_group=$VOLUME_GROUP"
- add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x"
+ add_nova_opt "volume_group=$VOLUME_GROUP"
+ add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%08x"
# oneiric no longer supports ietadm
- add_nova_flag "--iscsi_helper=tgtadm"
+ add_nova_opt "iscsi_helper=tgtadm"
fi
-add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions"
-add_nova_flag "--my_ip=$HOST_IP"
-add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
-add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
-add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova"
-add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
-add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
+add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions"
+add_nova_opt "my_ip=$HOST_IP"
+add_nova_opt "public_interface=$PUBLIC_INTERFACE"
+add_nova_opt "vlan_interface=$VLAN_INTERFACE"
+add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
+if [ -n "$FLAT_INTERFACE" ]; then
+ add_nova_opt "flat_interface=$FLAT_INTERFACE"
+fi
+add_nova_opt "sql_connection=$BASE_SQL_CONN/nova?charset=utf8"
+add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
+add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
if is_service_enabled n-cpu; then
NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
- add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL"
+ add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL"
XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
- add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL"
+ add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL"
fi
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
@@ -1210,65 +1400,71 @@
# Address on which instance vncservers will listen on compute hosts.
# For multi-host, this should be the management ip of the compute host.
VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
-add_nova_flag "--vncserver_listen=$VNCSERVER_LISTEN"
-add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
-add_nova_flag "--api_paste_config=$NOVA_CONF/api-paste.ini"
-add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
-add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
-add_nova_flag "--rabbit_host=$RABBIT_HOST"
-add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
-add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
-add_nova_flag "--force_dhcp_release"
+add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN"
+add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
+add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini"
+add_nova_opt "image_service=nova.image.glance.GlanceImageService"
+add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST"
+add_nova_opt "rabbit_host=$RABBIT_HOST"
+add_nova_opt "rabbit_password=$RABBIT_PASSWORD"
+add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
+add_nova_opt "force_dhcp_release=True"
if [ -n "$INSTANCES_PATH" ]; then
- add_nova_flag "--instances_path=$INSTANCES_PATH"
+ add_nova_opt "instances_path=$INSTANCES_PATH"
fi
if [ "$MULTI_HOST" != "False" ]; then
- add_nova_flag "--multi_host"
- add_nova_flag "--send_arp_for_ha"
+ add_nova_opt "multi_host=True"
+ add_nova_opt "send_arp_for_ha=True"
fi
if [ "$SYSLOG" != "False" ]; then
- add_nova_flag "--use_syslog"
+ add_nova_opt "use_syslog=True"
+fi
+if [ "$API_RATE_LIMIT" != "True" ]; then
+ add_nova_opt "api_rate_limit=False"
fi
-# You can define extra nova conf flags by defining the array EXTRA_FLAGS,
-# For Example: EXTRA_FLAGS=(--foo --bar=2)
-for I in "${EXTRA_FLAGS[@]}"; do
- add_nova_flag $I
+
+# Provide some transition from EXTRA_FLAGS to EXTRA_OPTS
+if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
+ EXTRA_OPTS=$EXTRA_FLAGS
+fi
+
+# You can define extra nova conf flags by defining the array EXTRA_OPTS,
+# For Example: EXTRA_OPTS=(foo=true bar=2)
+for I in "${EXTRA_OPTS[@]}"; do
+ # Attempt to convert flags to options
+ add_nova_opt ${I//-}
done
+
# XenServer
# ---------
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
- add_nova_flag "--connection_type=xenapi"
- add_nova_flag "--xenapi_connection_url=http://169.254.0.1"
- add_nova_flag "--xenapi_connection_username=root"
- add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD"
- add_nova_flag "--noflat_injected"
- add_nova_flag "--flat_interface=eth1"
- add_nova_flag "--flat_network_bridge=xapi1"
- add_nova_flag "--public_interface=eth3"
+ add_nova_opt "connection_type=xenapi"
+ XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
+ add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL"
+ add_nova_opt "xenapi_connection_username=root"
+ add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD"
+ add_nova_opt "flat_injected=False"
# Need to avoid crash due to new firewall support
XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
- add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER"
+ add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
else
- add_nova_flag "--connection_type=libvirt"
+ add_nova_opt "connection_type=libvirt"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- add_nova_flag "--firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
- add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE"
- if [ -n "$FLAT_INTERFACE" ]; then
- add_nova_flag "--flat_interface=$FLAT_INTERFACE"
- fi
+ add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
fi
+
# Nova Database
# ~~~~~~~~~~~~~
# All nova components talk to a central database. We will need to do this step
# only once for an entire cluster.
-if is_service_enabled mysql; then
+if is_service_enabled mysql && is_service_enabled nova; then
# (re)create nova database
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova;'
@@ -1285,54 +1481,14 @@
# so send the start command by forcing text into the window.
# Only run the services specified in ``ENABLED_SERVICES``
-# Our screenrc file builder
-function screen_rc {
- SCREENRC=$TOP_DIR/stack-screenrc
- if [[ ! -e $SCREENRC ]]; then
- # Name the screen session
- echo "sessionname stack" > $SCREENRC
- # Set a reasonable statusbar
- echo 'hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"' >> $SCREENRC
- echo "screen -t stack bash" >> $SCREENRC
- fi
- # If this service doesn't already exist in the screenrc file
- if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
- NL=`echo -ne '\015'`
- echo "screen -t $1 bash" >> $SCREENRC
- echo "stuff \"$2$NL\"" >> $SCREENRC
- fi
-}
-
-# Our screen helper to launch a service in a hidden named screen
-function screen_it {
- NL=`echo -ne '\015'`
- if is_service_enabled $1; then
- # Append the service to the screen rc file
- screen_rc "$1" "$2"
-
- screen -S stack -X screen -t $1
- # sleep to allow bash to be ready to be send the command - we are
- # creating a new window in screen and then sends characters, so if
- # bash isn't running by the time we send the command, nothing happens
- sleep 1.5
- screen -S stack -p $1 -X stuff "$2$NL"
- fi
-}
-
-# create a new named screen to run processes in
-screen -d -m -S stack -t stack -s /bin/bash
-sleep 1
-# set a reasonable statusbar
-screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
-
# launch the glance registry service
if is_service_enabled g-reg; then
- screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=etc/glance-registry.conf"
+ screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
fi
# launch the glance api and wait for it to answer before continuing
if is_service_enabled g-api; then
- screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
+ screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
echo "g-api did not start"
@@ -1343,12 +1499,12 @@
if is_service_enabled key; then
# (re)create keystone database
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
# Configure keystone.conf
KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf
cp $FILES/keystone.conf $KEYSTONE_CONF
- sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF
+ sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone?charset=utf8,g" -i $KEYSTONE_CONF
sudo sed -e "s,%DEST%,$DEST,g" -i $KEYSTONE_CONF
sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_CONF
sudo sed -e "s,%KEYSTONE_DIR%,$KEYSTONE_DIR,g" -i $KEYSTONE_CONF
@@ -1361,7 +1517,7 @@
echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.object_store.name = 'Swift Service'" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
fi
# Add quantum endpoints to service catalog if quantum is enabled
@@ -1369,11 +1525,12 @@
echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.network.name = 'Quantum Service'" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
fi
sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_CATALOG
+ sudo sed -e "s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g" -i $KEYSTONE_CATALOG
if [ "$SYSLOG" != "False" ]; then
cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_DIR/etc/logging.conf
@@ -1389,7 +1546,7 @@
if is_service_enabled key; then
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/; do sleep 1; done"; then
echo "keystone did not start"
exit 1
fi
@@ -1401,9 +1558,19 @@
# keystone_data.sh creates services, admin and demo users, and roles.
SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
- ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES bash $FILES/keystone_data.sh
-fi
+ ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
+ bash $FILES/keystone_data.sh
+ # create an access key and secret key for nova ec2 register image
+ if is_service_enabled swift && is_service_enabled nova; then
+ CREDS=$(keystone --os_auth_url=$SERVICE_ENDPOINT --os_username=nova --os_password=$SERVICE_PASSWORD --os_tenant_name=$SERVICE_TENANT_NAME ec2-credentials-create)
+ ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
+ SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
+ add_nova_opt "s3_access_key=$ACCESS_KEY"
+ add_nova_opt "s3_secret_key=$SECRET_KEY"
+ add_nova_opt "s3_affix_tenant=True"
+ fi
+fi
# launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
@@ -1415,107 +1582,40 @@
fi
fi
-# Quantum service
-if is_service_enabled q-svc; then
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- # Install deps
- # FIXME add to files/apts/quantum, but don't install if not needed!
- apt_get install openvswitch-switch openvswitch-datapath-dkms
- # Create database for the plugin/agent
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
- else
- echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
- fi
- QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini
- # Make sure we're using the openvswitch plugin
- sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
- fi
- screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf"
-fi
-
-# Quantum agent (for compute nodes)
-if is_service_enabled q-agt; then
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- # Set up integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
- sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
- sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
- sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-
- # Start up the quantum <-> openvswitch agent
- QUANTUM_OVS_CONFIG_FILE=$QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
- sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE
- screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
- fi
-
-fi
-
-# Melange service
-if is_service_enabled m-svc; then
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange;'
- else
- echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
- fi
- MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
- cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
- sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange/g" $MELANGE_CONFIG_FILE
- cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
- screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
- echo "Waiting for melange to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
- echo "melange-server did not start"
- exit 1
- fi
- melange mac_address_range create cidr=$M_MAC_RANGE
-fi
-
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
# happen after we've started the Quantum service.
-if is_service_enabled mysql; then
+if is_service_enabled mysql && is_service_enabled nova; then
# create a small network
$NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
- if is_service_enabled q-svc; then
- echo "Not creating floating IPs (not supported by QuantumManager)"
- else
- # create some floating ips
- $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
+ # create some floating ips
+ $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
- # create a second pool
- $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
- fi
+ # create a second pool
+ $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
-
# Launching nova-compute should be as simple as running ``nova-compute`` but
# have to do a little more than that in our script. Since we add the group
# ``libvirtd`` to our user in this script, when nova-compute is run it is
# within the context of our original shell (so our groups won't be updated).
# Use 'sg' to execute nova-compute as a member of the libvirtd group.
+# We don't check for is_service_enable as screen_it does it for us
screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute"
screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert"
-screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore"
screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
-if is_service_enabled n-novnc; then
- screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_CONF/nova.conf --web ."
-fi
-if is_service_enabled n-xvnc; then
- screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_CONF/nova.conf"
-fi
-if is_service_enabled n-cauth; then
- screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
-fi
-if is_service_enabled horizon; then
- screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
-fi
+screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
+screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
+screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
+screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
+screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_LOCATION}/proxy-server.conf -v"
+
+# Starting the nova-objectstore only if swift service is not enabled.
+# Swift will act as s3 objectstore.
+is_service_enabled swift || \
+ screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore"
# Install Images
# ==============
@@ -1525,7 +1625,7 @@
# The default image is a small ***TTY*** testing image, which lets you login
# the username/password of root/password.
#
-# TTY also uses cloud-init, supporting login via keypair and sending scripts as
+# TTY also uses ``cloud-init``, supporting login via keypair and sending scripts as
# userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init
#
# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
@@ -1542,17 +1642,8 @@
TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
# Option to upload legacy ami-tty, which works with xenserver
- if [ $UPLOAD_LEGACY_TTY ]; then
- if [ ! -f $FILES/tty.tgz ]; then
- wget -c http://images.ansolabs.com/tty.tgz -O $FILES/tty.tgz
- fi
-
- tar -zxf $FILES/tty.tgz -C $FILES/images
- RVAL=`glance add -A $TOKEN name="tty-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/aki-tty/image`
- KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
- RVAL=`glance add -A $TOKEN name="tty-ramdisk" is_public=true container_format=ari disk_format=ari < $FILES/images/ari-tty/image`
- RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
- glance add -A $TOKEN name="tty" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < $FILES/images/ami-tty/image
+ if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+ IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz"
fi
for image_url in ${IMAGE_URLS//,/ }; do
@@ -1564,6 +1655,8 @@
KERNEL=""
RAMDISK=""
+ DISK_FORMAT=""
+ CONTAINER_FORMAT=""
case "$IMAGE_FNAME" in
*.tar.gz|*.tgz)
# Extract ami and aki files
@@ -1574,47 +1667,74 @@
rm -Rf "$xdir";
mkdir "$xdir"
tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
- KERNEL=$(for f in "$xdir/"*-vmlinuz*; do
+ KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
- RAMDISK=$(for f in "$xdir/"*-initrd*; do
+ RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
- IMAGE=$(for f in "$xdir/"*.img; do
+ IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
[ -f "$f" ] && echo "$f" && break; done; true)
- [ -n "$IMAGE_NAME" ]
- IMAGE_NAME=$(basename "$IMAGE" ".img")
+ if [[ -z "$IMAGE_NAME" ]]; then
+ IMAGE_NAME=$(basename "$IMAGE" ".img")
+ fi
;;
*.img)
IMAGE="$FILES/$IMAGE_FNAME";
IMAGE_NAME=$(basename "$IMAGE" ".img")
+ DISK_FORMAT=raw
+ CONTAINER_FORMAT=bare
;;
*.img.gz)
IMAGE="$FILES/${IMAGE_FNAME}"
IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
+ DISK_FORMAT=raw
+ CONTAINER_FORMAT=bare
+ ;;
+ *.qcow2)
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
+ DISK_FORMAT=qcow2
+ CONTAINER_FORMAT=bare
;;
*) echo "Do not know what to do with $IMAGE_FNAME"; false;;
esac
- # Use glance client to add the kernel the root filesystem.
- # We parse the results of the first upload to get the glance ID of the
- # kernel for use when uploading the root filesystem.
- KERNEL_ID=""; RAMDISK_ID="";
- if [ -n "$KERNEL" ]; then
- RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
- KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+ if [ "$CONTAINER_FORMAT" = "bare" ]; then
+ glance add --silent-upload -A $TOKEN name="$IMAGE_NAME" is_public=true container_format=$CONTAINER_FORMAT disk_format=$DISK_FORMAT < <(zcat --force "${IMAGE}")
+ else
+ # Use glance client to add the kernel the root filesystem.
+ # We parse the results of the first upload to get the glance ID of the
+ # kernel for use when uploading the root filesystem.
+ KERNEL_ID=""; RAMDISK_ID="";
+ if [ -n "$KERNEL" ]; then
+ RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
+ KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+ fi
+ if [ -n "$RAMDISK" ]; then
+ RVAL=`glance add --silent-upload -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
+ RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+ fi
+ glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
fi
- if [ -n "$RAMDISK" ]; then
- RVAL=`glance add -A $TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
- RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
- fi
- glance add -A $TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
done
fi
+
+# Run local script
+# ================
+
+# Run ``local.sh`` if it exists to perform user-managed tasks
+if [[ -x $TOP_DIR/local.sh ]]; then
+ echo "Running user script $TOP_DIR/local.sh"
+ $TOP_DIR/local.sh
+fi
+
+
# Fin
# ===
set +o xtrace
+
# Using the cloud
# ===============
@@ -1622,22 +1742,27 @@
echo ""
echo ""
-# If you installed the horizon on this server, then you should be able
+# If you installed Horizon on this server you should be able
# to access the site using your browser.
if is_service_enabled horizon; then
- echo "horizon is now available at http://$SERVICE_HOST/"
+ echo "Horizon is now available at http://$SERVICE_HOST/"
fi
-# If keystone is present, you can point nova cli to this server
+# If Keystone is present you can point ``nova`` cli to this server
if is_service_enabled key; then
- echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
- echo "examples on using novaclient command line is in exercise.sh"
- echo "the default users are: admin and demo"
- echo "the password: $ADMIN_PASSWORD"
+ echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/"
+ echo "Examples on using novaclient command line is in exercise.sh"
+ echo "The default users are: admin and demo"
+ echo "The password: $ADMIN_PASSWORD"
fi
# Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address
echo "This is your host ip: $HOST_IP"
+# Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS``
+if [[ -n "$EXTRA_FLAGS" ]]; then
+ echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS"
+fi
+
# Indicate how long this took to run (bash maintained variable 'SECONDS')
echo "stack.sh completed in $SECONDS seconds."
diff --git a/stackrc b/stackrc
index a20426b..d0fa1c2 100644
--- a/stackrc
+++ b/stackrc
@@ -1,3 +1,14 @@
+# Find the other rc files
+RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+
+# Specify which services to launch. These generally correspond to
+# screen tabs. If you like to add other services that are not enabled
+# by default you can append them in your ENABLED_SERVICES variable in
+# your localrc. For example for swift you can just add this in your
+# localrc to add it with the other services:
+# ENABLED_SERVICES="$ENABLED_SERVICES,swift"
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit
+
# compute service
NOVA_REPO=https://github.com/openstack/nova.git
NOVA_BRANCH=master
@@ -76,12 +87,7 @@
IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";;
esac
-# use stored ec2 env variables
-if [ -f ./ec2rc ]; then
- source ./ec2rc
-fi
-
# allow local overrides of env variables
-if [ -f ./localrc ]; then
- source ./localrc
+if [ -f $RC_DIR/localrc ]; then
+ source $RC_DIR/localrc
fi
diff --git a/tests/functions.sh b/tests/functions.sh
new file mode 100755
index 0000000..69e8c0a
--- /dev/null
+++ b/tests/functions.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+
+# Tests for DevStack functions
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import common functions
+source $TOP/functions
+
+# Import configuration
+source $TOP/openrc
+
+
+echo "Testing die_if_not_set()"
+
+bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'"
+if [[ $? != 0 ]]; then
+ echo "die_if_not_set [X='Y' true] Failed"
+else
+ echo 'OK'
+fi
+
+bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'"
+if [[ $? = 0 ]]; then
+ echo "die_if_not_set [X='' true] Failed"
+fi
+
+bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'"
+if [[ $? != 0 ]]; then
+ echo "die_if_not_set [X='Y' false] Failed"
+else
+ echo 'OK'
+fi
+
+bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'"
+if [[ $? = 0 ]]; then
+ echo "die_if_not_set [X='' false] Failed"
+fi
+
diff --git a/tools/build_uec.sh b/tools/build_uec.sh
index ed5a017..35a4d6d 100755
--- a/tools/build_uec.sh
+++ b/tools/build_uec.sh
@@ -147,7 +147,7 @@
<interface type='network'>
<source network='$NET_NAME'/>
</interface>
-
+
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='$vm_dir/console.log'/>
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index f6ef0d3..9b25b7e 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -133,7 +133,7 @@
sed -e "
/^api_key=/s|=.*\$|=$ADMIN_PASSWORD|;
- /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/tokens/|;
+ /^auth_url=/s|=.*\$|=${OS_AUTH_URL%/}/|;
/^host=/s|=.*\$|=$HOST_IP|;
/^image_ref=/s|=.*\$|=$IMAGE_UUID|;
/^password=/s|=.*\$|=$ADMIN_PASSWORD|;
diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh
index 727b42a..d9a160a 100755
--- a/tools/jenkins/configurations/kvm.sh
+++ b/tools/jenkins/configurations/kvm.sh
@@ -48,6 +48,7 @@
MYSQL_PASSWORD=chicken
RABBIT_PASSWORD=chicken
SERVICE_TOKEN=chicken
+SERVICE_PASSWORD=chicken
ADMIN_PASSWORD=chicken
USERNAME=admin
TENANT=admin
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
index d0fa6af..49a57f0 100644
--- a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
+++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
@@ -78,7 +78,7 @@
. localrc
# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network
-ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'"
+ssh root@$XEN_IP "cd devstack && . localrc && cd tools/jenkins && ./run_test.sh $EXECUTOR_NUMBER $ADAPTER '$RC'"
</command>
</hudson.tasks.Shell>
</builders>
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index 1d71a4a..ea943e1 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -5,8 +5,8 @@
def print_usage():
- print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
- % sys.argv[0]
+ print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
+ % sys.argv[0])
sys.exit()
diff --git a/tools/rfc.sh b/tools/rfc.sh
index 0bc1531..d4dc597 100755
--- a/tools/rfc.sh
+++ b/tools/rfc.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
+# Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
# This initial version of this file was taken from the source tree
# of GlusterFS. It was not directly attributed, but is assumed to be
# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3
diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_and_pips_for_uec.sh
index ec7e916..23a28de 100755
--- a/tools/warm_apts_and_pips_for_uec.sh
+++ b/tools/warm_apts_and_pips_for_uec.sh
@@ -30,7 +30,7 @@
if [ ! -d files/apts ]; then
echo "Please run this script from devstack/tools/"
exit 1
-fi
+fi
# Mount the image
STAGING_DIR=/tmp/`echo $1 | sed "s/\//_/g"`.stage
diff --git a/tools/xen/README.md b/tools/xen/README.md
index a3398a7..d487a99 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -4,12 +4,19 @@
a XenServer 5.6 + Openstack development environment. This file gives
some pointers on how to get started.
+Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal.
+The Openstack services are configured to run within a "privileged" virtual
+machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack
+to communicate with the host.
+
Step 1: Install Xenserver
------------------------
-Install XenServer 5.6 on a clean box. You can get XenServer by signing
+Install XenServer 5.6+ on a clean box. You can get XenServer by signing
up for an account on citrix.com, and then visiting:
https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148
+For details on installation, see: http://wiki.openstack.org/XenServer/Install
+
Here are some sample Xenserver network settings for when you are just
getting started (I use settings like this with a lappy + cheap wifi router):
@@ -18,16 +25,25 @@
* XenServer Gateway: 192.168.1.1
* XenServer DNS: 192.168.1.1
+Note:
+------
+It is advisable (and necessary if you are using Xenserver 6.0, due to space
+limitations), to create the above mentioned OS domU, on a separate dev machine.
+To do this, you will need to run Steps 2 on the dev machine (if required) as
+well as the Xenserver host. Steps 3 and 4 should be run on the dev machine.
+This process requires you to be root on the dev machine.
+
Step 2: Prepare DOM0
-------------------
-At this point, your server is missing some critical software that you will
+At this point, your host is missing some critical software that you will
need to run devstack (like git). Do this to install required software:
- wget --no-check-certificate https://github.com/cloudbuilders/devstack/raw/xen/tools/xen/prepare_dom0.sh
+ wget --no-check-certificate https://raw.github.com/openstack-dev/devstack/master/tools/xen/prepare_dom0.sh
chmod 755 prepare_dom0.sh
./prepare_dom0.sh
-This script will also clone devstack in /root/devstack
+This step will also clone devstack in $DEVSTACKSRCROOT/devstack.
+$DEVSTACKSRCROOT=/root by default.
Step 3: Configure your localrc
-----------------------------
@@ -35,7 +51,7 @@
the XENAPI_PASSWORD must be your dom0 root password.
Of course, use real passwords if this machine is exposed.
- cat > /root/devstack/localrc <<EOF
+ cat > $DEVSTACKSRCROOT/devstack/localrc <<EOF
MYSQL_PASSWORD=my_super_secret
SERVICE_TOKEN=my_super_secret
ADMIN_PASSWORD=my_super_secret
@@ -52,16 +68,20 @@
MULTI_HOST=1
# Give extra time for boot
ACTIVE_TIMEOUT=45
+ # Interface on which you would like to access services
+ HOST_IP_IFACE=ethX
EOF
Step 4: Run ./build_xva.sh
--------------------------
-This script prpares your nova xva image. This script can be run on a separate machine
-and copied to dom0. If you run this on a different machine, copy the resulting xva
-file to tools/xen/xvas/[GUEST_NAME].xva (by default tools/xen/xvas/ALLINONE.xva)
+This script prepares your nova xva image. If you run this on a different machine,
+copy the resulting xva file to tools/xen/xvas/[GUEST_NAME].xva
+(by default tools/xen/xvas/ALLINONE.xva) on the Xenserver host.
-It is likely that for XS6 you will need to build_xva.sh on a separate machine due
-to dom0 space constraints.
+cd $DEVSTACKSRCROOT/devstack/tools/xen
+./build_xva.sh
+
+You will also need to copy your localrc to the Xenserver host.
Step 5: Run ./build_domU.sh
--------------------------
@@ -72,6 +92,9 @@
* Creates and installs a OpenStack all-in-one domU in an HA-FlatDHCP configuration
* A script to create a multi-domU (ie. head node separated from compute) configuration is coming soon!
+cd $DEVSTACKSRCROOT/devstack/tools/xen
+./build_domU.sh
+
Step 6: Do cloudy stuff!
--------------------------
* Play with horizon
diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh
index 455ad26..5fa7aa8 100755
--- a/tools/xen/build_domU.sh
+++ b/tools/xen/build_domU.sh
@@ -10,7 +10,10 @@
# This directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
-# Source params - override xenrc params in your localrc to suite your taste
+# Source lower level functions
+. $TOP_DIR/../../functions
+
+# Source params - override xenrc params in your localrc to suit your taste
source xenrc
# Echo commands
@@ -134,17 +137,8 @@
SR_UUID=`xe sr-list --minimal name-label="Local storage"`
xe sr-param-set uuid=$SR_UUID other-config:i18n-key=local-storage
-# Clean nova if desired
-if [ "$CLEAN" = "1" ]; then
- rm -rf $TOP_DIR/nova
-fi
-
# Checkout nova
-if [ ! -d $TOP_DIR/nova ]; then
- env GIT_SSL_NO_VERIFY=true git clone $NOVA_REPO
- cd $TOP_DIR/nova
- git checkout $NOVA_BRANCH
-fi
+git_clone $NOVA_REPO $TOP_DIR/nova $NOVA_BRANCH
# Install plugins
cp -pr $TOP_DIR/nova/plugins/xenserver/xenapi/etc/xapi.d /etc/
@@ -182,7 +176,11 @@
if [ -z $PUB_BR ]; then
PUB_BR=$(xe network-list --minimal uuid=$PUB_NET params=bridge)
fi
-$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w
+$TOP_DIR/scripts/install-os-vpx.sh -f $XVA -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -w -k "flat_network_bridge=${VM_BR}"
+
+if [ $PUB_IP == "dhcp" ]; then
+ PUB_IP=$(xe vm-list --minimal name-label=$GUEST_NAME params=networks | sed -ne 's,^.*3/ip: \([0-9.]*\).*$,\1,p')
+fi
# If we have copied our ssh credentials, use ssh to monitor while the installation runs
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index 4eb4b91..c235485 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -187,4 +187,5 @@
UID=0 $SCRIPT_DIR/mkxva -o $XVA -t xva -x $OVA $STAGING_DIR $VDI_MB /tmp/
fi
-echo "Built $XVA. If your dom0 is on a different machine, copy this to [devstackdir]/tools/xen/$XVA"
+echo "Built $(basename $XVA). If your dom0 is on a different machine, copy this to [devstackdir]/tools/xen/$(basename $XVA)"
+echo "Also copy your localrc to [devstackdir]"
diff --git a/tools/xen/prepare_dom0.sh b/tools/xen/prepare_dom0.sh
index d28a07f..71e9d6d 100755
--- a/tools/xen/prepare_dom0.sh
+++ b/tools/xen/prepare_dom0.sh
@@ -21,7 +21,7 @@
ln -s /usr/bin/vim /bin/vi
fi
-# Install git
+# Install git
if ! which git; then
DEST=/tmp/
GITDIR=$DEST/git-1.7.7
@@ -34,3 +34,8 @@
make install
fi
+# Clone devstack
+DEVSTACK=${DEVSTACKROOT:-"/root/devstack"}
+if [ ! -d $DEVSTACK ]; then
+ git clone git://github.com/openstack-dev/devstack.git $DEVSTACK
+fi
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index f0dc3c2..d45c370 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -88,7 +88,7 @@
get_params()
{
- while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION;
+ while getopts "hicwbf:d:v:m:p:k:r:l:" OPTION;
do
case $OPTION in
h) usage
@@ -246,7 +246,7 @@
# This installs the interface for public traffic, only if a bridge is specified
-# The interface is not configured at this stage, but it will be, once the admin
+# The interface is not configured at this stage, but it will be, once the admin
# tasks are complete for the services of this VPX
create_public_vif()
{
diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva
index dcdee61..a316da2 100755
--- a/tools/xen/scripts/mkxva
+++ b/tools/xen/scripts/mkxva
@@ -177,7 +177,7 @@
/sbin/mkfs.ext3 -I 128 -m0 -F "$partition"
/sbin/e2label "$partition" vpxroot
make_fs_inner "$staging" "$partition" ""
-
+
# Now run grub on the image we've created
CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX")
@@ -203,7 +203,7 @@
$SUDO umount "$CLEAN_MOUNTPOINT"
CLEAN_MOUNTPOINT=
-
+
# Grub expects a disk with name /dev/xxxx with a first partition
# named /dev/xxxx1, so we give it what it wants using symlinks
# Note: /dev is linked to the real /dev of the build machine, so
@@ -214,14 +214,14 @@
rm -f "$disk_part1_name"
ln -s "$CLEAN_LOSETUP" "$disk_name"
ln -s "$partition" "$disk_part1_name"
-
+
# Feed commands into the grub shell to setup the disk
grub --no-curses --device-map=/dev/null <<EOF
device (hd0) $disk_name
setup (hd0) (hd0,0)
quit
EOF
-
+
# Cleanup
rm -f "$disk_name"
rm -f "$disk_part1_name"
@@ -253,7 +253,7 @@
local n_bytes=$(stat --printf=%s "$diskimg")
local n_meg=$((($n_bytes+$((1024*1024 -1)))/$((1024*1024))))
local i=0
- while [ $i -lt $n_meg ] ; do
+ while [ $i -lt $n_meg ] ; do
if [ $rio -eq 0 ] ; then
local file="$outputdir"/chunk-$(printf "%08d" $i)
dd if="$diskimg" of="$file" skip=$i bs=1M count=1 2>/dev/null
@@ -359,7 +359,7 @@
# cleanup
-if [ -z "${DO_NOT_CLEANUP:-}" ] ; then
+if [ -z "${DO_NOT_CLEANUP:-}" ] ; then
rm -rf "$XVA_TARBALL_STAGING"
rm -f "$FS_TMPFILE"
fi
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 73f9c02..58fda31 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -36,7 +36,7 @@
MGT_DEV=${MGT_DEV:-eth0}
# XVA Directory
-XVA_DIR=${XVA_DIR:-xvas}
+XVA_DIR=${XVA_DIR:-`pwd`/xvas}
# Path to xva file
XVA=${XVA:-$XVA_DIR/$GUEST_NAME.xva }