Merge "if using lxc, use cirros rootfs image"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index f999609..67150e4 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -12,7 +12,6 @@
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
-
# Settings
# ========
@@ -21,16 +20,52 @@
source ./openrc
popd
-# find a machine image to boot
+# Find a machine image to boot
IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
-# launch it
-INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
+# Define secgroup
+SECGROUP=euca_secgroup
-# assure it has booted within a reasonable time
+# Add a secgroup
+euca-add-group -d description $SECGROUP
+
+# Launch it
+INSTANCE=`euca-run-instances -g $SECGROUP -t m1.tiny $IMAGE | grep INSTANCE | cut -f2`
+
+# Assure it has booted within a reasonable time
if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
echo "server didn't become active within $RUNNING_TIMEOUT seconds"
exit 1
fi
+# Allocate floating address
+FLOATING_IP=`euca-allocate-address | cut -f2`
+
+# Release floating address
+euca-associate-address -i $INSTANCE $FLOATING_IP
+
+
+# Authorize pinging
+euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+ echo "Couldn't ping server"
+ exit 1
+fi
+
+# Revoke pinging
+euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+
+# Delete group
+euca-delete-group $SECGROUP
+
+# Release floating address
+euca-disassociate-address $FLOATING_IP
+
+# Release floating address
+euca-release-address $FLOATING_IP
+
+# Terminate instance
euca-terminate-instances $INSTANCE
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
new file mode 100755
index 0000000..fe06b6e
--- /dev/null
+++ b/exercises/volumes.sh
@@ -0,0 +1,160 @@
+#!/usr/bin/env bash
+
+# Test nova volumes with the nova command from python-novaclient
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Use openrc + stackrc + localrc for settings
+pushd $(cd $(dirname "$0")/.. && pwd)
+source ./openrc
+popd
+
+# Get a token for clients that don't support service catalog
+# ==========================================================
+
+# manually create a token by querying keystone (sending JSON data). Keystone
+# returns a token and catalog of endpoints. We use python to parse the token
+# and save it.
+
+TOKEN=`curl -s -d "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
+
+# Launching a server
+# ==================
+
+# List servers for tenant:
+nova list
+
+# Images
+# ------
+
+# Nova has a **deprecated** way of listing images.
+nova image-list
+
+# But we recommend using glance directly
+glance -A $TOKEN index
+
+# Let's grab the id of the first AMI image to launch
+IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1`
+
+# determinine instance type
+# -------------------------
+
+# List of instance types:
+nova flavor-list
+
+INSTANCE_NAME=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+INSTANCE_TYPE=`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2`
+if [[ -z "`nova flavor-list | grep $INSTANCE_NAME | cut -d"|" -f2`" ]]; then
+ # and grab the first flavor in the list to launch
+ INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+fi
+
+NAME="myserver"
+
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
+
+# Testing
+# =======
+
+# First check if it spins up (becomes active and responds to ping on
+# internal ip). If you run this script from a nova node, you should
+# bypass security groups and have direct access to the server.
+
+# Waiting for boot
+# ----------------
+
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
+
+# check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $BOOT_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+ echo "server didn't become active!"
+ exit 1
+fi
+
+# get the IP of the server
+IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
+#VM_UUID=`nova list | grep $NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
+
+# for single node deployments, we can ping private ips
+MULTI_HOST=${MULTI_HOST:-0}
+if [ "$MULTI_HOST" = "0" ]; then
+ # sometimes the first ping fails (10 seconds isn't enough time for the VM's
+ # network to respond?), so let's ping for a default of 15 seconds with a
+ # timeout of a second for each ping.
+ if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
+ echo "Couldn't ping server"
+ exit 1
+ fi
+else
+ # On a multi-host system, without vm net access, do a sleep to wait for the boot
+ sleep $BOOT_TIMEOUT
+fi
+
+# Volumes
+# -------
+
+VOL_NAME="myvol-$(openssl rand -hex 4)"
+
+# Verify it doesn't exist
+if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f3 | sed 's/ //g'`" ]]; then
+ echo "Volume $VOL_NAME already exists"
+ exit 1
+fi
+
+# Create a new volume
+nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not created"
+ exit 1
+fi
+
+# Get volume ID
+VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
+
+# Attach to server
+DEVICE=/dev/vdb
+nova volume-attach $VM_UUID $VOL_ID $DEVICE
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not attached to $NAME"
+ exit 1
+fi
+
+VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f6 | sed 's/ //g'`
+if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
+ echo "Volume not attached to correct instance"
+ exit 1
+fi
+
+# Detach volume
+nova volume-detach $VM_UUID $VOL_ID
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not detached from $NAME"
+ exit 1
+fi
+
+# Delete volume
+nova volume-delete $VOL_ID
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then
+ echo "Volume $VOL_NAME not deleted"
+ exit 1
+fi
+
+# shutdown the server
+nova delete $NAME
diff --git a/files/apts/nova b/files/apts/nova
index 4c91470..bc0c23b 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -40,3 +40,4 @@
python-m2crypto
python-boto
python-kombu
+python-feedparser
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index be2d576..6d298d2 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -1,52 +1,54 @@
#!/bin/bash
BIN_DIR=${BIN_DIR:-.}
# Tenants
-$BIN_DIR/keystone-manage $* tenant add admin
-$BIN_DIR/keystone-manage $* tenant add demo
-$BIN_DIR/keystone-manage $* tenant add invisible_to_admin
+$BIN_DIR/keystone-manage tenant add admin
+$BIN_DIR/keystone-manage tenant add demo
+$BIN_DIR/keystone-manage tenant add invisible_to_admin
# Users
-$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD%
-$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD%
+$BIN_DIR/keystone-manage user add admin %ADMIN_PASSWORD%
+$BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD%
# Roles
-$BIN_DIR/keystone-manage $* role add Admin
-$BIN_DIR/keystone-manage $* role add Member
-$BIN_DIR/keystone-manage $* role add KeystoneAdmin
-$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin
-$BIN_DIR/keystone-manage $* role add sysadmin
-$BIN_DIR/keystone-manage $* role add netadmin
-$BIN_DIR/keystone-manage $* role grant Admin admin admin
-$BIN_DIR/keystone-manage $* role grant Member demo demo
-$BIN_DIR/keystone-manage $* role grant sysadmin demo demo
-$BIN_DIR/keystone-manage $* role grant netadmin demo demo
-$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin
-$BIN_DIR/keystone-manage $* role grant Admin admin demo
-$BIN_DIR/keystone-manage $* role grant Admin admin
-$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin
-$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
+$BIN_DIR/keystone-manage role add Admin
+$BIN_DIR/keystone-manage role add Member
+$BIN_DIR/keystone-manage role add KeystoneAdmin
+$BIN_DIR/keystone-manage role add KeystoneServiceAdmin
+$BIN_DIR/keystone-manage role add sysadmin
+$BIN_DIR/keystone-manage role add netadmin
+$BIN_DIR/keystone-manage role grant Admin admin admin
+$BIN_DIR/keystone-manage role grant Member demo demo
+$BIN_DIR/keystone-manage role grant sysadmin demo demo
+$BIN_DIR/keystone-manage role grant netadmin demo demo
+$BIN_DIR/keystone-manage role grant Member demo invisible_to_admin
+$BIN_DIR/keystone-manage role grant Admin admin demo
+$BIN_DIR/keystone-manage role grant Admin admin
+$BIN_DIR/keystone-manage role grant KeystoneAdmin admin
+$BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin
# Services
-$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
-$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
-$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
+$BIN_DIR/keystone-manage service add nova compute "Nova Compute Service"
+$BIN_DIR/keystone-manage service add ec2 ec2 "EC2 Compatability Layer"
+$BIN_DIR/keystone-manage service add glance image "Glance Image Service"
+$BIN_DIR/keystone-manage service add keystone identity "Keystone Identity Service"
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
- $BIN_DIR/keystone-manage $* service add swift object-store "Swift Service"
+ $BIN_DIR/keystone-manage service add swift object-store "Swift Service"
fi
#endpointTemplates
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
+$BIN_DIR/keystone-manage endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
+$BIN_DIR/keystone-manage endpointTemplates add RegionOne ec2 http://%HOST_IP%:8773/services/Cloud http://%HOST_IP%:8773/services/Admin http://%HOST_IP%:8773/services/Cloud 1 1
+$BIN_DIR/keystone-manage endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
+$BIN_DIR/keystone-manage endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
- $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
+ $BIN_DIR/keystone-manage endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
fi
# Tokens
-$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
+$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
# EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD
# but keystone doesn't parse them - it is just a blob from keystone's
# point of view
-$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
-$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"
+$BIN_DIR/keystone-manage credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
+$BIN_DIR/keystone-manage credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"
diff --git a/files/pips/horizon b/files/pips/horizon
index f35a01d..893efb7 100644
--- a/files/pips/horizon
+++ b/files/pips/horizon
@@ -1,6 +1,4 @@
django-nose-selenium
pycrypto==2.3
--e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx
-e git+https://github.com/jacobian/openstack.compute.git#egg=openstack
--e git+https://github.com/4P/python-keystoneclient.git#egg=python-keystoneclient
diff --git a/stack.sh b/stack.sh
index f978e44..19543ca 100755
--- a/stack.sh
+++ b/stack.sh
@@ -82,12 +82,20 @@
# apt-get wrapper to just get arguments set correctly
function apt_get() {
+ [[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[ "$(id -u)" = "0" ] && sudo="env"
$sudo DEBIAN_FRONTEND=noninteractive apt-get \
--option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
+# Check to see if we are already running a stack.sh
+if screen -ls | egrep -q "[0-9].stack"; then
+ echo "You are already running a stack.sh session."
+ echo "To rejoin this session type 'screen -x stack'."
+ echo "To destroy this session, kill the running screen."
+ exit 1
+fi
# OpenStack is designed to be run as a regular user (Horizon will fail to run
# as root, since apache refused to startup serve content from root user). If
@@ -140,12 +148,30 @@
sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova
fi
+# Normalize config values to True or False
+# VAR=`trueorfalse default-value test-value`
+function trueorfalse() {
+ local default=$1
+ local testval=$2
+
+ [[ -z "$testval" ]] && { echo "$default"; return; }
+ [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
+ [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
+ echo "$default"
+}
+
+# Set True to configure stack.sh to run cleanly without Internet access.
+# stack.sh must have been previously run with Internet access to install
+# prerequisites and initialize $DEST.
+OFFLINE=`trueorfalse False $OFFLINE`
+
# Set the destination directories for openstack projects
NOVA_DIR=$DEST/nova
HORIZON_DIR=$DEST/horizon
GLANCE_DIR=$DEST/glance
KEYSTONE_DIR=$DEST/keystone
NOVACLIENT_DIR=$DEST/python-novaclient
+KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
OPENSTACKX_DIR=$DEST/openstackx
NOVNC_DIR=$DEST/noVNC
SWIFT_DIR=$DEST/swift
@@ -165,6 +191,7 @@
# Name of the lvm volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
# Nova hypervisor configuration. We default to libvirt whth **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. Stack.sh can
@@ -188,18 +215,6 @@
fi
fi
-# Normalize config values to True or False
-# VAR=`trueorfalse default-value test-value`
-function trueorfalse() {
- local default=$1
- local testval=$2
-
- [[ -z "$testval" ]] && { echo "$default"; return; }
- [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
- [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
- echo "$default"
-}
-
# Configure services to syslog instead of writing to individual log files
SYSLOG=`trueorfalse False $SYSLOG`
SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
@@ -452,17 +467,23 @@
done
}
+function pip_install {
+ [[ "$OFFLINE" = "True" ]] && return
+ sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors $@
+}
+
# install apt requirements
apt_get update
apt_get install $(get_packages)
# install python requirements
-sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*`
+pip_install `cat $FILES/pips/* | uniq`
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
function git_clone {
+ [[ "$OFFLINE" = "True" ]] && return
GIT_REMOTE=$1
GIT_DEST=$2
@@ -531,6 +552,7 @@
if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
# django powered web control panel for openstack
git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
+ git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
fi
if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then
# openstackx is a collection of extensions to openstack.compute & nova
@@ -568,6 +590,7 @@
cd $OPENSTACKX_DIR; sudo python setup.py develop
fi
if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
+ cd $KEYSTONECLIENT_DIR; sudo python setup.py develop
cd $HORIZON_DIR/horizon; sudo python setup.py develop
cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
fi
@@ -733,6 +756,18 @@
sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
fi
+# Helper to clean iptables rules
+function clean_iptables() {
+ # Delete rules
+ sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
+ # Delete nat rules
+ sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
+ # Delete chains
+ sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
+ # Delete nat chains
+ sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
+}
+
if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
# Virtualization Configuration
@@ -796,13 +831,24 @@
fi
fi
+ # Clean iptables from previous runs
+ clean_iptables
+
+ # Destroy old instances
+ instances=`virsh list | grep $INSTANCE_NAME_PREFIX | cut -d " " -f3`
+ if [ ! $instances = "" ]; then
+ echo $instances | xargs -n1 virsh destroy
+ echo $instances | xargs -n1 virsh undefine
+ fi
+
# Clean out the instances directory.
sudo rm -rf $NOVA_DIR/instances/*
fi
if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
- # delete traces of nova networks from prior runs
+ # Delete traces of nova networks from prior runs
sudo killall dnsmasq || true
+ clean_iptables
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
fi
@@ -1012,6 +1058,7 @@
add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova"
add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
+add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then
add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions"
add_nova_flag "--osapi_extension=extensions.admin.Admin"
diff --git a/stackrc b/stackrc
index 5be96fc..9bc3be6 100644
--- a/stackrc
+++ b/stackrc
@@ -30,6 +30,10 @@
NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git
NOVACLIENT_BRANCH=master
+# python keystone client library to nova that horizon uses
+KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient
+KEYSTONECLIENT_BRANCH=master
+
# openstackx is a collection of extensions to openstack.compute & nova
# that is *deprecated*. The code is being moved into python-novaclient & nova.
OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
diff --git a/tools/jenkins/adapters/swift.sh b/tools/jenkins/adapters/swift.sh
new file mode 100755
index 0000000..c1362ee
--- /dev/null
+++ b/tools/jenkins/adapters/swift.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./swift.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
new file mode 100755
index 0000000..ec29209
--- /dev/null
+++ b/tools/jenkins/adapters/volumes.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises && ./volumes.sh'