Merge remote-tracking branch 'origin/ci-tests' into ci-tests
diff --git a/exercise.sh b/exercise.sh
index 77d3a3b..cca9a13 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -165,7 +165,7 @@
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver"]; then
+if [ "$VIRT_DRIVER" != "xenserver" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
print "Security group failure - ping should not be allowed!"
diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini
new file mode 100644
index 0000000..2c642f8
--- /dev/null
+++ b/files/nova-api-paste.ini
@@ -0,0 +1,127 @@
+#######
+# EC2 #
+#######
+
+[composite:ec2]
+use = egg:Paste#urlmap
+/: ec2versions
+/services/Cloud: ec2cloud
+/services/Admin: ec2admin
+/latest: ec2metadata
+/2007-01-19: ec2metadata
+/2007-03-01: ec2metadata
+/2007-08-29: ec2metadata
+/2007-10-10: ec2metadata
+/2007-12-15: ec2metadata
+/2008-02-01: ec2metadata
+/2008-09-01: ec2metadata
+/2009-04-04: ec2metadata
+/1.0: ec2metadata
+
+[pipeline:ec2cloud]
+pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
+
+[pipeline:ec2admin]
+pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
+
+[pipeline:ec2metadata]
+pipeline = logrequest ec2md
+
+[pipeline:ec2versions]
+pipeline = logrequest ec2ver
+
+[filter:logrequest]
+paste.filter_factory = nova.api.ec2:RequestLogging.factory
+
+[filter:ec2lockout]
+paste.filter_factory = nova.api.ec2:Lockout.factory
+
+[filter:totoken]
+paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
+
+[filter:ec2noauth]
+paste.filter_factory = nova.api.ec2:NoAuth.factory
+
+[filter:authenticate]
+paste.filter_factory = nova.api.ec2:Authenticate.factory
+
+[filter:cloudrequest]
+controller = nova.api.ec2.cloud.CloudController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:adminrequest]
+controller = nova.api.ec2.admin.AdminController
+paste.filter_factory = nova.api.ec2:Requestify.factory
+
+[filter:authorizer]
+paste.filter_factory = nova.api.ec2:Authorizer.factory
+
+[app:ec2executor]
+paste.app_factory = nova.api.ec2:Executor.factory
+
+[app:ec2ver]
+paste.app_factory = nova.api.ec2:Versions.factory
+
+[app:ec2md]
+paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
+
+#############
+# Openstack #
+#############
+
+[composite:osapi]
+use = egg:Paste#urlmap
+/: osversions
+/v1.0: openstackapi10
+/v1.1: openstackapi11
+
+[pipeline:openstackapi10]
+pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
+
+[pipeline:openstackapi11]
+pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
+
+[filter:faultwrap]
+paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+
+[filter:auth]
+paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
+
+[filter:noauth]
+paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+
+[filter:ratelimit]
+paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
+
+[filter:extensions]
+paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
+
+[app:osapiapp10]
+paste.app_factory = nova.api.openstack:APIRouterV10.factory
+
+[app:osapiapp11]
+paste.app_factory = nova.api.openstack:APIRouterV11.factory
+
+[pipeline:osversions]
+pipeline = faultwrap osversionapp
+
+[app:osversionapp]
+paste.app_factory = nova.api.openstack.versions:Versions.factory
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+service_protocol = http
+service_host = 127.0.0.1
+service_port = 5000
+auth_host = 127.0.0.1
+auth_port = 35357
+auth_protocol = http
+auth_uri = http://127.0.0.1:5000/
+admin_token = %SERVICE_TOKEN%
diff --git a/stack.sh b/stack.sh
index 6b3e09c..841cbb4 100755
--- a/stack.sh
+++ b/stack.sh
@@ -103,8 +103,7 @@
# since this script runs as a normal user, we need to give that user
# ability to run sudo
- apt_get update
- apt_get install sudo
+ dpkg -l sudo || apt_get update && apt_get install sudo
if ! getent passwd stack >/dev/null; then
echo "Creating a user called stack"
@@ -121,7 +120,7 @@
echo "Copying files to stack user"
STACK_DIR="$DEST/${PWD##*/}"
cp -r -f "$PWD" "$STACK_DIR"
- chown -R $USER "$STACK_DIR"
+ chown -R stack "$STACK_DIR"
if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
else
@@ -233,7 +232,7 @@
# Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=${MULTI_HOST:-0}
+MULTI_HOST=${MULTI_HOST:-False}
# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
# variable but make sure that the interface doesn't already have an
@@ -326,7 +325,7 @@
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
-
+
# Keystone
# --------
@@ -565,13 +564,12 @@
# ----
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
- # We are going to use the sample http middleware configuration from the
- # keystone project to launch nova. This paste config adds the configuration
- # required for nova to validate keystone tokens - except we need to switch
- # the config to use our service token instead (instead of the invalid token
- # 999888777666).
- cp $KEYSTONE_DIR/examples/paste/nova-api-paste.ini $NOVA_DIR/bin
- sed -e "s,999888777666,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
+ # We are going to use a sample http middleware configuration based on the
+ # one from the keystone project to launch nova. This paste config adds
+ # the configuration required for nova to validate keystone tokens. We add
+ # our own service token to the configuration.
+ cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
+ sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
fi
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
@@ -653,13 +651,13 @@
USER_GROUP=$(id -g)
sudo mkdir -p ${SWIFT_DATA_LOCATION}/drives
sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_LOCATION}/drives
-
+
# We then create a loopback disk and format it to XFS.
if [[ ! -e ${SWIFT_DATA_LOCATION}/drives/images/swift.img ]];then
mkdir -p ${SWIFT_DATA_LOCATION}/drives/images
sudo touch ${SWIFT_DATA_LOCATION}/drives/images/swift.img
sudo chown $USER: ${SWIFT_DATA_LOCATION}/drives/images/swift.img
-
+
dd if=/dev/zero of=${SWIFT_DATA_LOCATION}/drives/images/swift.img \
bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
mkfs.xfs -f -i size=1024 ${SWIFT_DATA_LOCATION}/drives/images/swift.img
@@ -676,9 +674,9 @@
# We then create link to that mounted location so swift would know
# where to go.
for x in {1..4}; do sudo ln -sf ${SWIFT_DATA_LOCATION}/drives/sdb1/$x ${SWIFT_DATA_LOCATION}/$x; done
-
+
# We now have to emulate a few different servers into one we
- # create all the directories needed for swift
+ # create all the directories needed for swift
tmpd=""
for d in ${SWIFT_DATA_LOCATION}/drives/sdb1/{1..4} \
${SWIFT_CONFIG_LOCATION}/{object,container,account}-server \
@@ -694,7 +692,7 @@
# swift-init has a bug using /etc/swift until bug #885595 is fixed
# we have to create a link
sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
-
+
# Swift use rsync to syncronize between all the different
# partitions (which make more sense when you have a multi-node
# setup) we configure it with our version of rsync.
@@ -730,7 +728,7 @@
local bind_port=$2
local log_facility=$3
local node_number
-
+
for node_number in {1..4};do
node_path=${SWIFT_DATA_LOCATION}/${node_number}
sed -e "s,%SWIFT_CONFIG_LOCATION%,${SWIFT_CONFIG_LOCATION},;s,%USER%,$USER,;s,%NODE_PATH%,${node_path},;s,%BIND_PORT%,${bind_port},;s,%LOG_FACILITY%,${log_facility}," \
@@ -757,14 +755,14 @@
# We then can start rsync.
sudo /etc/init.d/rsync restart || :
-
+
# Create our ring for the object/container/account.
/usr/local/bin/swift-remakerings
# And now we launch swift-startmain to get our cluster running
# ready to be tested.
/usr/local/bin/swift-startmain || :
-
+
unset s swift_hash swift_auth_server tmpd
fi
@@ -831,12 +829,12 @@
if [ -n "$INSTANCES_PATH" ]; then
add_nova_flag "--instances_path=$INSTANCES_PATH"
fi
-if [ -n "$MULTI_HOST" ]; then
- add_nova_flag "--multi_host=$MULTI_HOST"
- add_nova_flag "--send_arp_for_ha=1"
+if [ "$MULTI_HOST" != "False" ]; then
+ add_nova_flag "--multi_host"
+ add_nova_flag "--send_arp_for_ha"
fi
if [ "$SYSLOG" != "False" ]; then
- add_nova_flag "--use_syslog=1"
+ add_nova_flag "--use_syslog"
fi
# XenServer
@@ -912,6 +910,10 @@
NL=`echo -ne '\015'`
if [[ "$ENABLED_SERVICES" =~ "$1" ]]; then
screen -S stack -X screen -t $1
+ # sleep to allow bash to be ready to be send the command - we are
+ # creating a new window in screen and then sends characters, so if
+ # bash isn't running by the time we send the command, nothing happens
+ sleep 1
screen -S stack -p $1 -X stuff "$2$NL"
fi
}
diff --git a/tools/build_uec.sh b/tools/build_uec.sh
new file mode 100755
index 0000000..6bab526
--- /dev/null
+++ b/tools/build_uec.sh
@@ -0,0 +1,248 @@
+#!/usr/bin/env bash
+
+# Make sure that we have the proper version of ubuntu (only works on natty/oneiric)
+if ! egrep -q "oneiric|natty" /etc/lsb-release; then
+ echo "This script only works with ubuntu oneiric and natty"
+ exit 1
+fi
+
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
+
+cd $TOP_DIR
+
+# Source params
+source ./stackrc
+
+# Ubuntu distro to install
+DIST_NAME=${DIST_NAME:-oneiric}
+
+# Configure how large the VM should be
+GUEST_SIZE=${GUEST_SIZE:-10G}
+
+# exit on error to stop unexpected errors
+set -o errexit
+set -o xtrace
+
+# Abort if localrc is not set
+if [ ! -e $TOP_DIR/localrc ]; then
+ echo "You must have a localrc with ALL necessary passwords defined before proceeding."
+ echo "See stack.sh for required passwords."
+ exit 1
+fi
+
+# Install deps if needed
+DEPS="kvm libvirt-bin kpartx"
+dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
+
+# Where to store files and instances
+WORK_DIR=${WORK_DIR:-/opt/kvmstack}
+
+# Where to store images
+image_dir=$WORK_DIR/images/$DIST_NAME
+mkdir -p $image_dir
+
+# Original version of built image
+uec_url=http://uec-images.ubuntu.com/$DIST_NAME/current/$DIST_NAME-server-cloudimg-amd64.tar.gz
+tarball=$image_dir/$(basename $uec_url)
+
+# download the base uec image if we haven't already
+if [ ! -f $tarball ]; then
+ curl $uec_url -o $tarball
+ (cd $image_dir && tar -Sxvzf $tarball)
+ resize-part-image $image_dir/*.img $GUEST_SIZE $image_dir/disk
+ cp $image_dir/*-vmlinuz-virtual $image_dir/kernel
+fi
+
+
+# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
+ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
+
+# Name of our instance, used by libvirt
+GUEST_NAME=${GUEST_NAME:-devstack}
+
+# Mop up after previous runs
+virsh destroy $GUEST_NAME || true
+
+# Where this vm is stored
+vm_dir=$WORK_DIR/instances/$GUEST_NAME
+
+# Create vm dir and remove old disk
+mkdir -p $vm_dir
+rm -f $vm_dir/disk
+
+# Create a copy of the base image
+qemu-img create -f qcow2 -b $image_dir/disk $vm_dir/disk
+
+# Back to devstack
+cd $TOP_DIR
+
+GUEST_NETWORK=${GUEST_NETWORK:-1}
+GUEST_RECREATE_NET=${GUEST_RECREATE_NET:-yes}
+GUEST_IP=${GUEST_IP:-192.168.$GUEST_NETWORK.50}
+GUEST_CIDR=${GUEST_CIDR:-$GUEST_IP/24}
+GUEST_NETMASK=${GUEST_NETMASK:-255.255.255.0}
+GUEST_GATEWAY=${GUEST_GATEWAY:-192.168.$GUEST_NETWORK.1}
+GUEST_MAC=${GUEST_MAC:-"02:16:3e:07:69:`printf '%02X' $GUEST_NETWORK`"}
+GUEST_RAM=${GUEST_RAM:-1524288}
+GUEST_CORES=${GUEST_CORES:-1}
+
+# libvirt.xml configuration
+NET_XML=$vm_dir/net.xml
+cat > $NET_XML <<EOF
+<network>
+ <name>devstack-$GUEST_NETWORK</name>
+ <bridge name="stackbr%d" />
+ <forward/>
+ <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
+ <dhcp>
+ <range start='192.168.$GUEST_NETWORK.2' end='192.168.$GUEST_NETWORK.127' />
+ </dhcp>
+ </ip>
+</network>
+EOF
+
+if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
+ virsh net-destroy devstack-$GUEST_NETWORK || true
+ # destroying the network isn't enough to delete the leases
+ rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
+ virsh net-create $vm_dir/net.xml
+fi
+
+# libvirt.xml configuration
+LIBVIRT_XML=$vm_dir/libvirt.xml
+cat > $LIBVIRT_XML <<EOF
+<domain type='kvm'>
+ <name>$GUEST_NAME</name>
+ <memory>$GUEST_RAM</memory>
+ <os>
+ <type>hvm</type>
+ <kernel>$image_dir/kernel</kernel>
+ <cmdline>root=/dev/vda ro console=ttyS0 init=/usr/lib/cloud-init/uncloud-init ds=nocloud-net;s=http://192.168.$GUEST_NETWORK.1:4567/ ubuntu-pass=ubuntu</cmdline>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <clock offset='utc'/>
+ <vcpu>$GUEST_CORES</vcpu>
+ <devices>
+ <disk type='file'>
+ <driver type='qcow2'/>
+ <source file='$vm_dir/disk'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+
+ <interface type='network'>
+ <source network='devstack-$GUEST_NETWORK'/>
+ </interface>
+
+ <!-- The order is significant here. File must be defined first -->
+ <serial type="file">
+ <source path='$vm_dir/console.log'/>
+ <target port='1'/>
+ </serial>
+
+ <console type='pty' tty='/dev/pts/2'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </console>
+
+ <serial type='pty'>
+ <source path='/dev/pts/2'/>
+ <target port='0'/>
+ </serial>
+
+ <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='0.0.0.0'/>
+ </devices>
+</domain>
+EOF
+
+
+rm -rf $vm_dir/uec
+cp -r $TOOLS_DIR/uec $vm_dir/uec
+
+# set metadata
+cat > $vm_dir/uec/meta-data<<EOF
+hostname: $GUEST_NAME
+instance-id: i-hop
+instance-type: m1.ignore
+local-hostname: $GUEST_NAME.local
+EOF
+
+# set metadata
+cat > $vm_dir/uec/user-data<<EOF
+#!/bin/bash
+# hostname needs to resolve for rabbit
+sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
+apt-get update
+apt-get install git sudo -y
+git clone https://github.com/cloudbuilders/devstack.git
+cd devstack
+git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
+git fetch
+git checkout `git rev-parse HEAD`
+cat > localrc <<LOCAL_EOF
+ROOTSLEEP=0
+`cat $TOP_DIR/localrc`
+LOCAL_EOF
+./stack.sh
+EOF
+
+# (re)start a metadata service
+(
+ pid=`lsof -iTCP@192.168.$GUEST_NETWORK.1:4567 -n | awk '{print $2}' | tail -1`
+ [ -z "$pid" ] || kill -9 $pid
+)
+cd $vm_dir/uec
+python meta.py 192.168.$GUEST_NETWORK.1:4567 &
+
+# Create the instance
+virsh create $vm_dir/libvirt.xml
+
+# Tail the console log till we are done
+WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
+if [ "$WAIT_TILL_LAUNCH" = "1" ]; then
+ set +o xtrace
+ # Done creating the container, let's tail the log
+ echo
+ echo "============================================================="
+ echo " -- YAY! --"
+ echo "============================================================="
+ echo
+ echo "We're done launching the vm, about to start tailing the"
+ echo "stack.sh log. It will take a second or two to start."
+ echo
+ echo "Just CTRL-C at any time to stop tailing."
+
+ while [ ! -e "$vm_dir/console.log" ]; do
+ sleep 1
+ done
+
+ tail -F $vm_dir/console.log &
+
+ TAIL_PID=$!
+
+ function kill_tail() {
+ kill $TAIL_PID
+ exit 1
+ }
+
+ # Let Ctrl-c kill tail and exit
+ trap kill_tail SIGINT
+
+ echo "Waiting stack.sh to finish..."
+ while ! egrep -q '^stack.sh (completed|failed)' $vm_dir/console.log ; do
+ sleep 1
+ done
+
+ set -o xtrace
+
+ kill $TAIL_PID
+
+ if ! grep -q "^stack.sh completed in" $vm_dir/console.log; then
+ exit 1
+ fi
+ echo ""
+ echo "Finished - Zip-a-dee Doo-dah!"
+fi
diff --git a/tools/uec/meta.py b/tools/uec/meta.py
new file mode 100644
index 0000000..5b845d8
--- /dev/null
+++ b/tools/uec/meta.py
@@ -0,0 +1,29 @@
+import sys
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+
+def main(host, port, HandlerClass = SimpleHTTPRequestHandler,
+ ServerClass = HTTPServer, protocol="HTTP/1.0"):
+ """simple http server that listens on a give address:port"""
+
+ server_address = (host, port)
+
+ HandlerClass.protocol_version = protocol
+ httpd = ServerClass(server_address, HandlerClass)
+
+ sa = httpd.socket.getsockname()
+ print "Serving HTTP on", sa[0], "port", sa[1], "..."
+ httpd.serve_forever()
+
+if __name__ == '__main__':
+ if sys.argv[1:]:
+ address = sys.argv[1]
+ else:
+ address = '0.0.0.0'
+ if ':' in address:
+ host, port = address.split(':')
+ else:
+ host = address
+ port = 8080
+
+ main(host, int(port))