Merge "Splits out build config params in Tempest."
diff --git a/AUTHORS b/AUTHORS
index 6141d67..4f771ce 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -29,11 +29,13 @@
Justin Shepherd <galstrom21@gmail.com>
Ken Pepple <ken.pepple@rabbityard.com>
Kiall Mac Innes <kiall@managedit.ie>
+Osamu Habuka <xiu.yushen@gmail.com>
Russell Bryant <rbryant@redhat.com>
Scott Moser <smoser@ubuntu.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <xtoddx@gmail.com>
Tres Henry <tres@treshenry.net>
+Vincent Untz <vuntz@suse.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Yun Mao <yunmao@gmail.com>
Yong Sheng Gong <gongysh@cn.ibm.com>
diff --git a/HACKING.rst b/HACKING.rst
index 7262cff..e8f90c7 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -53,9 +53,23 @@
source $TOP_DIR/openrc
``stack.sh`` is a rather large monolithic script that flows through from beginning
-to end. There is a proposal to segment it to put the OpenStack projects
-into their own sub-scripts to better document the projects as a unit rather than
-have it scattered throughout ``stack.sh``. Someday.
+to end. The process of breaking it down into project-level sub-scripts has begun
+with the introduction of ``lib/cinder`` and ``lib/ceilometer``.
+
+These library sub-scripts have a number of fixed entry points, some of which may
+just be stubs. These entry points will be called by ``stack.sh`` in the
+following order::
+
+ install_XXXX
+ configure_XXXX
+ init_XXXX
+ start_XXXX
+ stop_XXXX
+ cleanup_XXXX
+
+There is a sub-script template in ``lib/templates`` to be used in creating new
+service sub-scripts. The comments in ``<>`` are meta comments describing
+how to use the template and should be removed.
Documentation
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 38fac12..8a4f9c1 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -7,14 +7,15 @@
# * Updating Aggregate details
# * Testing Aggregate metadata
# * Testing Aggregate delete
-# * TODO(johngar) - test adding a host (idealy with two hosts)
+# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
+# * Testing add/remove hosts (with one host)
echo "**************************************************"
echo "Begin DevStack Exercise: $0"
echo "**************************************************"
# This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
set -o errexit
# Print the commands being run so that we can see the command that triggers
@@ -47,6 +48,7 @@
# ===================
AGGREGATE_NAME=test_aggregate_$RANDOM
+AGGREGATE2_NAME=test_aggregate_$RANDOM
AGGREGATE_A_ZONE=nova
exit_if_aggregate_present() {
@@ -63,6 +65,7 @@
exit_if_aggregate_present $AGGREGATE_NAME
AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1`
+AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1`
# check aggregate created
nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
@@ -120,13 +123,23 @@
# Test aggregate-add/remove-host
# ==============================
if [ "$VIRT_DRIVER" == "xenserver" ]; then
- echo "TODO(johngarbutt) add tests for add/remove host from aggregate"
+ echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
fi
-
+HOST=`nova host-list | grep compute | get_field 1`
+# Make sure can add two aggregates to same host
+nova aggregate-add-host $AGGREGATE_ID $HOST
+nova aggregate-add-host $AGGREGATE2_ID $HOST
+if nova aggregate-add-host $AGGREGATE2_ID $HOST; then
+ echo "ERROR could add duplicate host to single aggregate"
+ exit -1
+fi
+nova aggregate-remove-host $AGGREGATE2_ID $HOST
+nova aggregate-remove-host $AGGREGATE_ID $HOST
# Test aggregate-delete
# =====================
nova aggregate-delete $AGGREGATE_ID
+nova aggregate-delete $AGGREGATE2_ID
exit_if_aggregate_present $AGGREGATE_NAME
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
new file mode 100755
index 0000000..cff29d2
--- /dev/null
+++ b/exercises/quantum-adv-test.sh
@@ -0,0 +1,486 @@
+#!/usr/bin/env bash
+#
+
+# **quantum.sh**
+
+# We will use this test to perform integration testing of nova and
+# other components with Quantum.
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+
+set -o errtrace
+trap failed ERR
+failed() {
+ local r=$?
+ set +o errtrace
+ set +o xtrace
+ echo "Failed to execute"
+ echo "Starting cleanup..."
+ delete_all
+ echo "Finished cleanup"
+ exit $r
+}
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+#------------------------------------------------------------------------------
+# Quantum config check
+#------------------------------------------------------------------------------
+# Warn if quantum is not enabled
+if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+ echo "WARNING: Running quantum test without enabling quantum"
+fi
+
+#------------------------------------------------------------------------------
+# Environment
+#------------------------------------------------------------------------------
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# If quantum is not enabled we exit with exitcode 55 which mean
+# exercise is skipped.
+is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55
+
+#------------------------------------------------------------------------------
+# Test settings for quantum
+#------------------------------------------------------------------------------
+
+TENANTS="DEMO1"
+# TODO (nati)_Test public network
+#TENANTS="DEMO1,DEMO2"
+
+PUBLIC_NAME="admin"
+DEMO1_NAME="demo1"
+DEMO2_NAME="demo2"
+
+PUBLIC_NUM_NET=1
+DEMO1_NUM_NET=1
+DEMO2_NUM_NET=2
+
+PUBLIC_NET1_CIDR="200.0.0.0/24"
+DEMO1_NET1_CIDR="190.0.0.0/24"
+DEMO2_NET1_CIDR="191.0.0.0/24"
+DEMO2_NET2_CIDR="191.0.1.0/24"
+
+PUBLIC_NET1_GATEWAY="200.0.0.1"
+DEMO1_NET1_GATEWAY="190.0.0.1"
+DEMO2_NET1_GATEWAY="191.0.0.1"
+DEMO2_NET2_GATEWAY="191.0.1.1"
+
+PUBLIC_NUM_VM=1
+DEMO1_NUM_VM=1
+DEMO2_NUM_VM=2
+
+PUBLIC_VM1_NET='admin-net1'
+DEMO1_VM1_NET='demo1-net1'
+# Multinic settings. But this is fail without nic setting in OS image
+DEMO2_VM1_NET='demo2-net1'
+DEMO2_VM2_NET='demo2-net2'
+
+PUBLIC_NUM_ROUTER=1
+DEMO1_NUM_ROUTER=1
+DEMO2_NUM_ROUTER=1
+
+PUBLIC_ROUTER1_NET="admin-net1"
+DEMO1_ROUTER1_NET="demo1-net1"
+DEMO2_ROUTER1_NET="demo2-net1"
+
+#------------------------------------------------------------------------------
+# Keystone settings.
+#------------------------------------------------------------------------------
+KEYSTONE="keystone"
+
+#------------------------------------------------------------------------------
+# Get a token for clients that don't support service catalog
+#------------------------------------------------------------------------------
+
+# manually create a token by querying keystone (sending JSON data). Keystone
+# returns a token and catalog of endpoints. We use python to parse the token
+# and save it.
+
+TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'`
+
+#------------------------------------------------------------------------------
+# Various functions.
+#------------------------------------------------------------------------------
+function foreach_tenant {
+ COMMAND=$1
+ for TENANT in ${TENANTS//,/ };do
+ eval ${COMMAND//%TENANT%/$TENANT}
+ done
+}
+
+function foreach_tenant_resource {
+ COMMAND=$1
+ RESOURCE=$2
+ for TENANT in ${TENANTS//,/ };do
+ eval 'NUM=$'"${TENANT}_NUM_$RESOURCE"
+ for i in `seq $NUM`;do
+ local COMMAND_LOCAL=${COMMAND//%TENANT%/$TENANT}
+ COMMAND_LOCAL=${COMMAND_LOCAL//%NUM%/$i}
+ eval $COMMAND_LOCAL
+ done
+ done
+}
+
+function foreach_tenant_vm {
+ COMMAND=$1
+ foreach_tenant_resource "$COMMAND" 'VM'
+}
+
+function foreach_tenant_net {
+ COMMAND=$1
+ foreach_tenant_resource "$COMMAND" 'NET'
+}
+
+function get_image_id {
+ local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+ echo "$IMAGE_ID"
+}
+
+function get_tenant_id {
+ local TENANT_NAME=$1
+ local TENANT_ID=`keystone tenant-list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+ echo "$TENANT_ID"
+}
+
+function get_user_id {
+ local USER_NAME=$1
+ local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'`
+ echo "$USER_ID"
+}
+
+function get_role_id {
+ local ROLE_NAME=$1
+ local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'`
+ echo "$ROLE_ID"
+}
+
+function get_network_id {
+ local NETWORK_NAME="$1"
+ local NETWORK_ID=`quantum net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+ echo $NETWORK_ID
+}
+
+function get_flavor_id {
+ local INSTANCE_TYPE=$1
+ local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+ echo "$FLAVOR_ID"
+}
+
+function confirm_server_active {
+ local VM_UUID=$1
+ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova --no_cache show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+ echo "server '$VM_UUID' did not become active!"
+ false
+fi
+
+}
+
+function add_tenant {
+ local TENANT=$1
+ local USER=$2
+
+ $KEYSTONE tenant-create --name=$TENANT
+ $KEYSTONE user-create --name=$USER --pass=${ADMIN_PASSWORD}
+
+ local USER_ID=$(get_user_id $USER)
+ local TENANT_ID=$(get_tenant_id $TENANT)
+
+ $KEYSTONE user-role-add --user-id $USER_ID --role-id $(get_role_id Member) --tenant-id $TENANT_ID
+}
+
+function remove_tenant {
+ local TENANT=$1
+ local TENANT_ID=$(get_tenant_id $TENANT)
+
+ $KEYSTONE tenant-delete $TENANT_ID
+}
+
+function remove_user {
+ local USER=$1
+ local USER_ID=$(get_user_id $USER)
+
+ $KEYSTONE user-delete $USER_ID
+}
+
+
+
+#------------------------------------------------------------------------------
+# "Create" functions
+#------------------------------------------------------------------------------
+
+function create_tenants {
+ source $TOP_DIR/openrc admin admin
+ add_tenant demo1 demo1 demo1
+ add_tenant demo2 demo2 demo2
+}
+
+function delete_tenants_and_users {
+ source $TOP_DIR/openrc admin admin
+ remove_user demo1
+ remove_tenant demo1
+ remove_user demo2
+ remove_tenant demo2
+ echo "removed all tenants"
+}
+
+function create_network {
+ local TENANT=$1
+ local GATEWAY=$2
+ local CIDR=$3
+ local NUM=$4
+ local EXTRA=$5
+ local NET_NAME="${TENANT}-net$NUM"
+ local ROUTER_NAME="${TENANT}-router${NUM}"
+ source $TOP_DIR/openrc admin admin
+ local TENANT_ID=$(get_tenant_id $TENANT)
+ source $TOP_DIR/openrc $TENANT $TENANT
+ local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+ quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
+ #T0DO(nati) comment out until l3-agent is merged
+ #local ROUTER_ID=$($QUANTUM router-create --tenant_id $TENANT_ID $ROUTER_NAME| grep ' id ' | awk '{print $4}' )
+ #for NET_NAME in ${NET_NAMES//,/ };do
+ # SUBNET_ID=`get_subnet_id $NET_NAME`
+ # $QUANTUM router-interface-create $NAME --subnet_id $SUBNET_ID
+ #done
+}
+
+function create_networks {
+ foreach_tenant_net 'create_network ${%TENANT%_NAME} ${%TENANT%_NET%NUM%_GATEWAY} ${%TENANT%_NET%NUM%_CIDR} %NUM% ${%TENANT%_NET%NUM%_EXTRA}'
+ #TODO(nati) test security group function
+ # allow ICMP for both tenant's security groups
+ #source $TOP_DIR/openrc demo1 demo1
+ #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+ #source $TOP_DIR/openrc demo2 demo2
+ #$NOVA secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+}
+
+function create_vm {
+ local TENANT=$1
+ local NUM=$2
+ local NET_NAMES=$3
+ source $TOP_DIR/openrc $TENANT $TENANT
+ local NIC=""
+ for NET_NAME in ${NET_NAMES//,/ };do
+ NIC="$NIC --nic net-id="`get_network_id $NET_NAME`
+ done
+ #TODO (nati) Add multi-nic test
+ #TODO (nati) Add public-net test
+ local VM_UUID=`nova --no_cache boot --flavor $(get_flavor_id m1.tiny) \
+ --image $(get_image_id) \
+ $NIC \
+ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
+ die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID
+ confirm_server_active $VM_UUID
+}
+
+function create_vms {
+ foreach_tenant_vm 'create_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}'
+}
+
+function ping_ip {
+ # Test agent connection. Assumes namespaces are disabled, and
+ # that DHCP is in use, but not L3
+ local VM_NAME=$1
+ IP=`nova --no_cache show $VM_NAME | grep 'network' | awk '{print $5}'`
+ if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
+ echo "Could not ping $VM_NAME"
+ false
+ fi
+}
+
+function check_vm {
+ local TENANT=$1
+ local NUM=$2
+ local VM_NAME="$TENANT-server$NUM"
+ source $TOP_DIR/openrc $TENANT $TENANT
+ ping_ip $VM_NAME
+ # TODO (nati) test ssh connection
+ # TODO (nati) test inter connection between vm
+ # TODO (nati) test namespace dhcp
+ # TODO (nati) test dhcp host routes
+ # TODO (nati) test multi-nic
+ # TODO (nati) use test-agent
+ # TODO (nati) test L3 forwarding
+ # TODO (nati) test floating ip
+ # TODO (nati) test security group
+}
+
+function check_vms {
+ foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM%'
+}
+
+function shutdown_vm {
+ local TENANT=$1
+ local NUM=$2
+ source $TOP_DIR/openrc $TENANT $TENANT
+ VM_NAME=${TENANT}-server$NUM
+ nova --no_cache delete $VM_NAME
+}
+
+function shutdown_vms {
+ foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%'
+ if ! timeout $TERMINATE_TIMEOUT sh -c "while nova --no_cache list | grep -q ACTIVE; do sleep 1; done"; then
+ echo "Some VMs failed to shutdown"
+ false
+ fi
+}
+
+function delete_network {
+ local TENANT=$1
+ source $TOP_DIR/openrc admin admin
+ local TENANT_ID=$(get_tenant_id $TENANT)
+ #TODO(nati) comment out until l3-agent merged
+ #for res in port subnet net router;do
+ for res in port subnet net;do
+ quantum ${res}-list -F id -F tenant_id | grep $TENANT_ID | awk '{print $2}' | xargs -I % quantum ${res}-delete %
+ done
+}
+
+function delete_networks {
+ foreach_tenant 'delete_network ${%TENANT%_NAME}'
+ #TODO(nati) add secuirty group check after it is implemented
+ # source $TOP_DIR/openrc demo1 demo1
+ # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
+ # source $TOP_DIR/openrc demo2 demo2
+ # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
+}
+
+function create_all {
+ create_tenants
+ create_networks
+ create_vms
+}
+
+function delete_all {
+ shutdown_vms
+ delete_networks
+ delete_tenants_and_users
+}
+
+function all {
+ create_all
+ check_vms
+ delete_all
+}
+
+#------------------------------------------------------------------------------
+# Test functions.
+#------------------------------------------------------------------------------
+function test_functions {
+ IMAGE=$(get_image_id)
+ echo $IMAGE
+
+ TENANT_ID=$(get_tenant_id demo)
+ echo $TENANT_ID
+
+ FLAVOR_ID=$(get_flavor_id m1.tiny)
+ echo $FLAVOR_ID
+
+ NETWORK_ID=$(get_network_id admin)
+ echo $NETWORK_ID
+}
+
+#------------------------------------------------------------------------------
+# Usage and main.
+#------------------------------------------------------------------------------
+usage() {
+ echo "$0: [-h]"
+ echo " -h, --help Display help message"
+ echo " -t, --tenant Create tenants"
+ echo " -n, --net Create networks"
+ echo " -v, --vm Create vms"
+ echo " -c, --check Check connection"
+ echo " -x, --delete-tenants Delete tenants"
+ echo " -y, --delete-nets Delete networks"
+ echo " -z, --delete-vms Delete vms"
+ echo " -T, --test Test functions"
+}
+
+main() {
+
+ echo Description
+ echo
+ echo Copyright 2012, Cisco Systems
+ echo Copyright 2012, Nicira Networks, Inc.
+ echo Copyright 2012, NTT MCL, Inc.
+ echo
+ echo Please direct any questions to dedutta@cisco.com, dan@nicira.com, nachi@nttmcl.com
+ echo
+
+
+ if [ $# -eq 0 ] ; then
+ # if no args are provided, run all tests
+ all
+ else
+
+ while [ "$1" != "" ]; do
+ case $1 in
+ -h | --help ) usage
+ exit
+ ;;
+ -n | --net ) create_networks
+ exit
+ ;;
+ -v | --vm ) create_vms
+ exit
+ ;;
+ -t | --tenant ) create_tenants
+ exit
+ ;;
+ -c | --check ) check_vms
+ exit
+ ;;
+ -T | --test ) test_functions
+ exit
+ ;;
+ -x | --delete-tenants ) delete_tenants_and_users
+ exit
+ ;;
+ -y | --delete-nets ) delete_networks
+ exit
+ ;;
+ -z | --delete-vms ) shutdown_vms
+ exit
+ ;;
+ -a | --all ) all
+ exit
+ ;;
+ * ) usage
+ exit 1
+ esac
+ shift
+ done
+ fi
+}
+
+
+#-------------------------------------------------------------------------------
+# Kick off script.
+#-------------------------------------------------------------------------------
+echo $*
+main $*
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/quantum.sh b/exercises/quantum.sh
deleted file mode 100755
index e19a78e..0000000
--- a/exercises/quantum.sh
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/usr/bin/env bash
-#
-
-# **quantum.sh**
-
-# We will use this test to perform integration testing of nova and
-# other components with Quantum.
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-#------------------------------------------------------------------------------
-# Quantum config check
-#------------------------------------------------------------------------------
-# Warn if quantum is not enabled
-if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then
- echo "WARNING: Running quantum test without enabling quantum"
-fi
-
-#------------------------------------------------------------------------------
-# Environment
-#------------------------------------------------------------------------------
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# If quantum is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled quantum || exit 55
-
-#------------------------------------------------------------------------------
-# Various default parameters.
-#------------------------------------------------------------------------------
-
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-60}
-
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
-
-# Max time to wait before delete VMs and delete Networks
-VM_NET_DELETE_TIMEOUT=${VM_NET_TIMEOUT:-10}
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMi image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# OVS Hosts
-OVS_HOSTS=${DEFAULT_OVS_HOSTS:-"localhost"}
-
-#------------------------------------------------------------------------------
-# Nova settings.
-#------------------------------------------------------------------------------
-NOVA_MANAGE=/opt/stack/nova/bin/nova-manage
-NOVA=/usr/local/bin/nova
-NOVA_CONF=/etc/nova/nova.conf
-
-#------------------------------------------------------------------------------
-# Mysql settings.
-#------------------------------------------------------------------------------
-MYSQL="/usr/bin/mysql --skip-column-name --host=$MYSQL_HOST"
-
-#------------------------------------------------------------------------------
-# Keystone settings.
-#------------------------------------------------------------------------------
-KEYSTONE="keystone"
-
-#------------------------------------------------------------------------------
-# Get a token for clients that don't support service catalog
-#------------------------------------------------------------------------------
-
-# manually create a token by querying keystone (sending JSON data). Keystone
-# returns a token and catalog of endpoints. We use python to parse the token
-# and save it.
-
-TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'`
-
-#------------------------------------------------------------------------------
-# Various functions.
-#------------------------------------------------------------------------------
-function get_image_id {
- local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
- echo "$IMAGE_ID"
-}
-
-function get_tenant_id {
- local TENANT_NAME=$1
- local TENANT_ID=`keystone tenant-list | grep $TENANT_NAME | awk '{print $2}'`
- echo "$TENANT_ID"
-}
-
-function get_user_id {
- local USER_NAME=$1
- local USER_ID=`keystone user-list | grep $USER_NAME | awk '{print $2}'`
- echo "$USER_ID"
-}
-
-function get_role_id {
- local ROLE_NAME=$1
- local ROLE_ID=`keystone role-list | grep $ROLE_NAME | awk '{print $2}'`
- echo "$ROLE_ID"
-}
-
-# TODO: (Debo) Change Quantum client CLI and then remove the MYSQL stuff.
-function get_network_id {
- local NETWORK_NAME=$1
- local QUERY="select uuid from networks where label='$NETWORK_NAME'"
- local NETWORK_ID=`echo $QUERY | $MYSQL -u root -p$MYSQL_PASSWORD nova`
- echo "$NETWORK_ID"
-}
-
-function get_flavor_id {
- local INSTANCE_TYPE=$1
- local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
- echo "$FLAVOR_ID"
-}
-
-function add_tenant {
- local TENANT=$1
- local USER=$3
- local PASSWORD=$2
-
- $KEYSTONE tenant-create --name=$TENANT
- $KEYSTONE user-create --name=$USER --pass=${PASSWORD}
-
- local USER_ID=$(get_user_id $USER)
- local TENANT_ID=$(get_tenant_id $TENANT)
-
- $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id Member) --tenant_id $TENANT_ID
- $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id admin) --tenant_id $TENANT_ID
- $KEYSTONE user-role-add --user $USER_ID --role $(get_role_id anotherrole) --tenant_id $TENANT_ID
- #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id sysadmin) --tenant_id $TENANT_ID
- #$KEYSTONE user-role-add --user $USER_ID --role $(get_role_id netadmin) --tenant_id $TENANT_ID
-}
-
-function remove_tenant {
- local TENANT=$1
- local TENANT_ID=$(get_tenant_id $TENANT)
-
- $KEYSTONE tenant-delete $TENANT_ID
-}
-
-function remove_user {
- local USER=$1
- local USER_ID=$(get_user_id $USER)
-
- $KEYSTONE user-delete $USER_ID
-}
-
-
-#------------------------------------------------------------------------------
-# "Create" functions
-#------------------------------------------------------------------------------
-
-function create_tenants {
- add_tenant demo1 nova demo1
- add_tenant demo2 nova demo2
-}
-
-function delete_tenants_and_users {
- remove_tenant demo1
- remove_tenant demo2
- remove_user demo1
- remove_user demo2
-}
-
-function create_networks {
- $NOVA_MANAGE --flagfile=$NOVA_CONF network create \
- --label=public-net1 \
- --fixed_range_v4=11.0.0.0/24
-
- $NOVA_MANAGE --flagfile=$NOVA_CONF network create \
- --label=demo1-net1 \
- --fixed_range_v4=12.0.0.0/24 \
- --project_id=$(get_tenant_id demo1) \
- --priority=1
-
- $NOVA_MANAGE --flagfile=$NOVA_CONF network create \
- --label=demo2-net1 \
- --fixed_range_v4=13.0.0.0/24 \
- --project_id=$(get_tenant_id demo2) \
- --priority=1
-}
-
-function create_vms {
- PUBLIC_NET1_ID=$(get_network_id public-net1)
- DEMO1_NET1_ID=$(get_network_id demo1-net1)
- DEMO2_NET1_ID=$(get_network_id demo2-net1)
-
- export OS_TENANT_NAME=demo1
- export OS_USERNAME=demo1
- export OS_PASSWORD=nova
- VM_UUID1=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \
- --image $(get_image_id) \
- --nic net-id=$PUBLIC_NET1_ID \
- --nic net-id=$DEMO1_NET1_ID \
- demo1-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
- die_if_not_set VM_UUID1 "Failure launching demo1-server1"
-
- export OS_TENANT_NAME=demo2
- export OS_USERNAME=demo2
- export OS_PASSWORD=nova
- VM_UUID2=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \
- --image $(get_image_id) \
- --nic net-id=$PUBLIC_NET1_ID \
- --nic net-id=$DEMO2_NET1_ID \
- demo2-server1 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
- die_if_not_set VM_UUID2 "Failure launching demo2-server1"
-
- VM_UUID3=`$NOVA boot --flavor $(get_flavor_id m1.tiny) \
- --image $(get_image_id) \
- --nic net-id=$PUBLIC_NET1_ID \
- --nic net-id=$DEMO2_NET1_ID \
- demo2-server2 | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
- die_if_not_set VM_UUID3 "Failure launching demo2-server2"
-
-}
-
-function ping_vms {
-
- echo "Sleeping a bit let the VMs come up"
- sleep $ACTIVE_TIMEOUT
-
- export OS_TENANT_NAME=demo1
- export OS_USERNAME=demo1
- export OS_PASSWORD=nova
- # get the IP of the servers
- PUBLIC_IP1=`nova show $VM_UUID1 | grep public-net1 | awk '{print $5}'`
- export OS_TENANT_NAME=demo2
- export OS_USERNAME=demo2
- export OS_PASSWORD=nova
- PUBLIC_IP2=`nova show $VM_UUID2 | grep public-net1 | awk '{print $5}'`
-
- MULTI_HOST=`trueorfalse False $MULTI_HOST`
- if [ "$MULTI_HOST" = "False" ]; then
- # sometimes the first ping fails (10 seconds isn't enough time for the VM's
- # network to respond?), so let's ping for a default of 15 seconds with a
- # timeout of a second for each ping.
- if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP1; do sleep 1; done"; then
- echo "Couldn't ping server"
- exit 1
- fi
- if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $PUBLIC_IP2; do sleep 1; done"; then
- echo "Couldn't ping server"
- exit 1
- fi
- else
- # On a multi-host system, without vm net access, do a sleep to wait for the boot
- sleep $BOOT_TIMEOUT
- fi
-}
-
-function shutdown_vms {
- export OS_TENANT_NAME=demo1
- export OS_USERNAME=demo1
- export OS_PASSWORD=nova
- nova delete $VM_UUID1
-
- export OS_TENANT_NAME=demo2
- export OS_USERNAME=demo2
- export OS_PASSWORD=nova
- nova delete $VM_UUID2
- nova delete $VM_UUID3
-
-}
-
-function delete_networks {
- PUBLIC_NET1_ID=$(get_network_id public-net1)
- DEMO1_NET1_ID=$(get_network_id demo1-net1)
- DEMO2_NET1_ID=$(get_network_id demo2-net1)
- nova-manage network delete --uuid=$PUBLIC_NET1_ID
- nova-manage network delete --uuid=$DEMO1_NET1_ID
- nova-manage network delete --uuid=$DEMO2_NET1_ID
-}
-
-function all {
- create_tenants
- create_networks
- create_vms
- ping_vms
- shutdown_vms
- delete_networks
- delete_tenants_and_users
-}
-
-#------------------------------------------------------------------------------
-# Test functions.
-#------------------------------------------------------------------------------
-function test_functions {
- IMAGE=$(get_image_id)
- echo $IMAGE
-
- TENANT_ID=$(get_tenant_id demo)
- echo $TENANT_ID
-
- FLAVOR_ID=$(get_flavor_id m1.tiny)
- echo $FLAVOR_ID
-
- NETWORK_ID=$(get_network_id private)
- echo $NETWORK_ID
-}
-
-#------------------------------------------------------------------------------
-# Usage and main.
-#------------------------------------------------------------------------------
-usage() {
- echo "$0: [-h]"
- echo " -h, --help Display help message"
- echo " -n, --net Create networks"
- echo " -v, --vm Create vms"
- echo " -t, --tenant Create tenants"
- echo " -T, --test Test functions"
-}
-
-main() {
- if [ $# -eq 0 ] ; then
- usage
- exit
- fi
-
- echo Description
- echo
- echo Copyright 2012, Cisco Systems
- echo Copyright 2012, Nicira Networks, Inc.
- echo
- echo Please direct any questions to dedutta@cisco.com, dlapsley@nicira.com
- echo
-
- while [ "$1" != "" ]; do
- case $1 in
- -h | --help ) usage
- exit
- ;;
- -n | --net ) create_networks
- exit
- ;;
- -v | --vm ) create_vms
- exit
- ;;
- -t | --tenant ) create_tenants
- exit
- ;;
- -p | --ping ) ping_vms
- exit
- ;;
- -T | --test ) test_functions
- exit
- ;;
- -a | --all ) all
- exit
- ;;
- * ) usage
- exit 1
- esac
- shift
- done
-}
-
-
-#-------------------------------------------------------------------------------
-# Kick off script.
-#-------------------------------------------------------------------------------
-echo $*
-main -a
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
new file mode 100644
index 0000000..c67ade3
--- /dev/null
+++ b/files/apts/ceilometer-collector
@@ -0,0 +1,2 @@
+python-pymongo
+mongodb-server
diff --git a/files/apts/quantum b/files/apts/quantum
new file mode 100644
index 0000000..568438f
--- /dev/null
+++ b/files/apts/quantum
@@ -0,0 +1,16 @@
+iptables
+mysql #NOPRIME
+sudo
+python-paste
+python-routes
+python-netaddr
+python-pastedeploy
+python-greenlet
+python-kombu
+python-eventlet
+python-sqlalchemy
+python-mysqldb
+python-pyudev
+python-qpid # dist:precise
+dnsmasq-base
+dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index 66052b6..ceb6458 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -34,3 +34,8 @@
catalog.RegionOne.image.adminURL = http://%SERVICE_HOST%:9292
catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292
catalog.RegionOne.image.name = Image Service
+
+catalog.RegionOne.heat.publicURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.heat.adminURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.heat.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.heat.name = Heat Service
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 6987797..2a8d070 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -10,6 +10,7 @@
# service quantum admin # if enabled
# service swift admin # if enabled
# service cinder admin # if enabled
+# service heat admin # if enabled
# demo admin admin
# demo demo Member, anotherrole
# invisible_to_admin demo Member
@@ -154,6 +155,29 @@
fi
fi
+# Heat
+if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
+ HEAT_USER=$(get_id keystone user-create --name=heat \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=heat@example.com)
+ keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user_id $HEAT_USER \
+ --role_id $ADMIN_ROLE
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ HEAT_SERVICE=$(get_id keystone service-create \
+ --name=heat \
+ --type=orchestration \
+ --description="Heat Service")
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $HEAT_SERVICE \
+ --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \
+ --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1" \
+ --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1"
+ fi
+fi
+
# Glance
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
GLANCE_USER=$(get_id keystone user-create \
@@ -296,3 +320,4 @@
--internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
fi
fi
+
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
new file mode 100644
index 0000000..c5c855c
--- /dev/null
+++ b/files/rpms/ceilometer-collector
@@ -0,0 +1,2 @@
+mongodb-server
+pymongo
diff --git a/files/rpms/quantum b/files/rpms/quantum
new file mode 100644
index 0000000..6ca9c35
--- /dev/null
+++ b/files/rpms/quantum
@@ -0,0 +1,23 @@
+MySQL-python
+dnsmasq-utils # for dhcp_release
+ebtables
+iptables
+iputils
+mysql-server # NOPRIME
+python-boto
+python-eventlet
+python-greenlet
+python-iso8601
+python-kombu
+python-netaddr
+python-paste
+python-paste-deploy
+python-qpid
+python-routes
+python-sqlalchemy
+python-suds
+rabbitmq-server # NOPRIME
+qpid-cpp-server # NOPRIME
+sqlite
+sudo
+vconfig
diff --git a/functions b/functions
index b66dc15..386af09 100644
--- a/functions
+++ b/functions
@@ -9,6 +9,18 @@
set +o xtrace
+# Exit 0 if address is in network or 1 if
+# address is not in network or netaddr library
+# is not installed.
+function address_in_net() {
+ python -c "
+import netaddr
+import sys
+sys.exit(netaddr.IPAddress('$1') not in netaddr.IPNetwork('$2'))
+"
+}
+
+
# apt-get wrapper to set arguments correctly
# apt_get operation package [package ...]
function apt_get() {
@@ -17,6 +29,7 @@
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=$http_proxy https_proxy=$https_proxy \
+ no_proxy=$no_proxy \
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
@@ -116,6 +129,10 @@
if [[ ! $file_to_parse =~ cinder ]]; then
file_to_parse="${file_to_parse} cinder"
fi
+ elif [[ $service == ceilometer-* ]]; then
+ if [[ ! $file_to_parse =~ ceilometer ]]; then
+ file_to_parse="${file_to_parse} ceilometer"
+ fi
elif [[ $service == n-* ]]; then
if [[ ! $file_to_parse =~ nova ]]; then
file_to_parse="${file_to_parse} nova"
@@ -223,6 +240,30 @@
export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
}
+# git update using reference as a branch.
+function git_update_branch() {
+
+ GIT_BRANCH=$1
+
+ git checkout -f origin/$GIT_BRANCH
+ # a local branch might not exist
+ git branch -D $GIT_BRANCH || true
+ git checkout -b $GIT_BRANCH
+}
+
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+function git_update_tag() {
+
+ GIT_TAG=$1
+
+ git tag -d $GIT_TAG
+ # fetching given tag only
+ git fetch origin tag $GIT_TAG
+ git checkout -f $GIT_TAG
+}
+
# Translate the OS version values into common nomenclature
# Sets ``DISTRO`` from the ``os_*`` values
@@ -254,16 +295,16 @@
GIT_REMOTE=$1
GIT_DEST=$2
- GIT_BRANCH=$3
+ GIT_REF=$3
- if echo $GIT_BRANCH | egrep -q "^refs"; then
+ if echo $GIT_REF | egrep -q "^refs"; then
# If our branch name is a gerrit style refs/changes/...
if [[ ! -d $GIT_DEST ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && exit 1
git clone $GIT_REMOTE $GIT_DEST
fi
cd $GIT_DEST
- git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD
+ git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $GIT_DEST ]]; then
@@ -271,7 +312,7 @@
git clone $GIT_REMOTE $GIT_DEST
cd $GIT_DEST
# This checkout syntax works for both branches and tags
- git checkout $GIT_BRANCH
+ git checkout $GIT_REF
elif [[ "$RECLONE" == "yes" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
cd $GIT_DEST
@@ -282,10 +323,17 @@
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
find $GIT_DEST -name '*.pyc' -delete
- git checkout -f origin/$GIT_BRANCH
- # a local branch might not exist
- git branch -D $GIT_BRANCH || true
- git checkout -b $GIT_BRANCH
+
+ # handle GIT_REF accordingly to type (tag, branch)
+ if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
+ git_update_tag $GIT_REF
+ elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
+ git_update_branch $GIT_REF
+ else
+ echo $GIT_REF is neither branch nor tag
+ exit 1
+ fi
+
fi
fi
}
@@ -362,6 +410,7 @@
[[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
+ [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
[[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
done
@@ -370,7 +419,7 @@
# remove extra commas from the input string (ENABLED_SERVICES)
function _cleanup_service_list () {
- echo "$1" | sed -e '
+ echo "$1" | sed -e '
s/,,/,/g;
s/^,//;
s/,$//
@@ -486,6 +535,7 @@
$SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
+ NO_PROXY=$no_proxy \
$CMD_PIP install --use-mirrors $@
}
@@ -521,6 +571,7 @@
$SUDO_CMD \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
+ NO_PROXY=$no_proxy \
python setup.py develop \
)
}
@@ -567,6 +618,105 @@
}
+# Retrieve an image from a URL and upload into Glance
+# Uses the following variables:
+# **FILES** must be set to the cache dir
+# **GLANCE_HOSTPORT**
+# upload_image image-url glance-token
+function upload_image() {
+ local image_url=$1
+ local token=$2
+
+ # Create a directory for the downloaded image tarballs.
+ mkdir -p $FILES/images
+
+ # Downloads the image (uec ami+aki style), then extracts it.
+ IMAGE_FNAME=`basename "$image_url"`
+ if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
+ wget -c $image_url -O $FILES/$IMAGE_FNAME
+ if [[ $? -ne 0 ]]; then
+ echo "Not found: $image_url"
+ return
+ fi
+ fi
+
+ # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
+ if [[ "$image_url" =~ 'openvz' ]]; then
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}"
+ return
+ fi
+
+ KERNEL=""
+ RAMDISK=""
+ DISK_FORMAT=""
+ CONTAINER_FORMAT=""
+ UNPACK=""
+ case "$IMAGE_FNAME" in
+ *.tar.gz|*.tgz)
+ # Extract ami and aki files
+ [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
+ IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
+ IMAGE_NAME="${IMAGE_FNAME%.tgz}"
+ xdir="$FILES/images/$IMAGE_NAME"
+ rm -Rf "$xdir";
+ mkdir "$xdir"
+ tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
+ KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
+ [ -f "$f" ] && echo "$f" && break; done; true)
+ if [[ -z "$IMAGE_NAME" ]]; then
+ IMAGE_NAME=$(basename "$IMAGE" ".img")
+ fi
+ ;;
+ *.img)
+ IMAGE="$FILES/$IMAGE_FNAME";
+ IMAGE_NAME=$(basename "$IMAGE" ".img")
+ DISK_FORMAT=raw
+ CONTAINER_FORMAT=bare
+ ;;
+ *.img.gz)
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
+ DISK_FORMAT=raw
+ CONTAINER_FORMAT=bare
+ UNPACK=zcat
+ ;;
+ *.qcow2)
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
+ DISK_FORMAT=qcow2
+ CONTAINER_FORMAT=bare
+ ;;
+ *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+ esac
+
+ if [ "$CONTAINER_FORMAT" = "bare" ]; then
+ if [ "$UNPACK" = "zcat" ]; then
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ else
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+ fi
+ else
+ # Use glance client to add the kernel the root filesystem.
+ # We parse the results of the first upload to get the glance ID of the
+ # kernel for use when uploading the root filesystem.
+ KERNEL_ID=""; RAMDISK_ID="";
+ if [ -n "$KERNEL" ]; then
+ KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ fi
+ if [ -n "$RAMDISK" ]; then
+ RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ fi
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ fi
+}
+
+
# yum wrapper to set arguments correctly
# yum_install package [package ...]
function yum_install() {
@@ -574,6 +724,7 @@
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+ no_proxy=$no_proxy \
yum install -y "$@"
}
diff --git a/lib/ceilometer b/lib/ceilometer
new file mode 100644
index 0000000..4c3bb52
--- /dev/null
+++ b/lib/ceilometer
@@ -0,0 +1,66 @@
+# lib/ceilometer
+# Install and start Ceilometer service
+
+# Dependencies:
+# - functions
+
+# stack.sh
+# ---------
+# install_XXX
+# configure_XXX
+# init_XXX
+# start_XXX
+# stop_XXX
+# cleanup_XXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# set up default directories
+CEILOMETER_DIR=$DEST/ceilometer
+# Support potential entry-points console scripts
+if [ -d $CEILOMETER_DIR/bin ] ; then
+ CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin
+else
+ CEILOMETER_BIN_DIR=/usr/local/bin
+fi
+CEILOMETER_CONF_DIR=/etc/ceilometer
+CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf
+CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf
+
+# cleanup_ceilometer() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ceilometer() {
+ # This function intentionally left blank
+ :
+}
+
+# configure_ceilometer() - Set config files, create data dirs, etc
+function configure_ceilometer() {
+ setup_develop $CEILOMETER_DIR
+ if [ ! -d $CEILOMETER_CONF_DIR ]; then
+ sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR
+ fi
+ sudo chown `whoami` $CEILOMETER_CONF_DIR
+
+ # ceilometer confs are copy of /etc/nova/nova.conf which must exist first
+ grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_AGENT_CONF
+ grep -v format_string $NOVA_CONF_DIR/$NOVA_CONF > $CEILOMETER_COLLECTOR_CONF
+}
+
+# install_ceilometer() - Collect source and prepare
+function install_ceilometer() {
+ git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH
+}
+
+# start_ceilometer() - Start running processes, including screen
+function start_ceilometer() {
+ screen_it ceilometer-acompute "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF"
+ screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF"
+ screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF"
+}
diff --git a/lib/cinder b/lib/cinder
index 49ad4af..1bad5c0 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -25,6 +25,11 @@
# set up default directories
CINDER_DIR=$DEST/cinder
+if [ -d $CINDER_DIR/bin ] ; then
+ CINDER_BIN_DIR=$CINDER_DIR/bin
+else
+ CINDER_BIN_DIR=/usr/local/bin
+fi
CINDERCLIENT_DIR=$DEST/python-cinderclient
CINDER_CONF_DIR=/etc/cinder
CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
@@ -104,11 +109,24 @@
iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8
- iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST
- iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}"
+ iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions
+ if is_service_enabled qpid ; then
+ iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST
+ iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ fi
+
+ if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+ # Add color to logging output
+ iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_id)s %(project_id)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [[00;36m-%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
+ iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "[00;33mfrom (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d[00m"
+ iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s [01;35m%(instance)s[00m"
+ fi
}
# init_cinder() - Initialize database and volume group
@@ -122,7 +140,7 @@
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;'
# (re)create cinder database
- $CINDER_DIR/bin/cinder-manage db sync
+ $CINDER_BIN_DIR/cinder-manage db sync
fi
if is_service_enabled c-vol; then
@@ -186,9 +204,9 @@
fi
fi
- screen_it c-api "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-api --config-file $CINDER_CONF"
- screen_it c-vol "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-volume --config-file $CINDER_CONF"
- screen_it c-sch "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-scheduler --config-file $CINDER_CONF"
+ screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
+ screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+ screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
}
# stop_cinder() - Stop running processes (non-screen)
diff --git a/lib/heat b/lib/heat
new file mode 100644
index 0000000..6f442f8
--- /dev/null
+++ b/lib/heat
@@ -0,0 +1,156 @@
+# lib/heat
+# Install and start Heat service
+# To enable, add the following to localrc
+# ENABLED_SERVICES+=,heat,h-api,h-eng,h-meta
+
+# Dependencies:
+# - functions
+
+# stack.sh
+# ---------
+# install_XXX
+# configure_XXX
+# init_XXX
+# start_XXX
+# stop_XXX
+# cleanup_XXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+HEAT_DIR=$DEST/heat
+
+# set up default directories
+
+# cleanup_heat() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_heat() {
+ # This function intentionally left blank
+ :
+}
+
+# configure_heat() - Set config files, create data dirs, etc
+function configure_heat() {
+ setup_develop $HEAT_DIR
+
+ HEAT_CONF_DIR=/etc/heat
+ if [[ ! -d $HEAT_CONF_DIR ]]; then
+ sudo mkdir -p $HEAT_CONF_DIR
+ fi
+ sudo chown `whoami` $HEAT_CONF_DIR
+
+ HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
+ HEAT_API_PORT=${HEAT_API_PORT:-8000}
+ HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST}
+ HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
+ HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST}
+ HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002}
+
+ HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf
+ cp $HEAT_DIR/etc/heat-api.conf $HEAT_API_CONF
+ iniset $HEAT_API_CONF DEFAULT debug True
+ inicomment $HEAT_API_CONF DEFAULT log_file
+ iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG
+ iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST
+ iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT
+
+ if is_service_enabled rabbit; then
+ iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
+ iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST
+ elif is_service_enabled qpid; then
+ iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
+ fi
+
+ HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini
+ cp $HEAT_DIR/etc/heat-api-paste.ini $HEAT_API_PASTE_INI
+ iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+ iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat
+ iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+ iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
+
+ HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf
+ cp $HEAT_DIR/etc/heat-engine.conf $HEAT_ENGINE_CONF
+ iniset $HEAT_ENGINE_CONF DEFAULT debug True
+ inicomment $HEAT_ENGINE_CONF DEFAULT log_file
+ iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG
+ iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST
+ iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT
+ iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $BASE_SQL_CONN/heat?charset=utf8
+ iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random`
+
+ if is_service_enabled rabbit; then
+ iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
+ iniset $HEAT_ENGINE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $HEAT_ENGINE_CONF DEFAULT rabbit_host $RABBIT_HOST
+ elif is_service_enabled qpid; then
+ iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
+ fi
+
+ HEAT_ENGINE_PASTE_INI=$HEAT_CONF_DIR/heat-engine-paste.ini
+ cp $HEAT_DIR/etc/heat-engine-paste.ini $HEAT_ENGINE_PASTE_INI
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_user heat
+ iniset $HEAT_ENGINE_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+
+ HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf
+ cp $HEAT_DIR/etc/heat-metadata.conf $HEAT_METADATA_CONF
+ iniset $HEAT_METADATA_CONF DEFAULT debug True
+ inicomment $HEAT_METADATA_CONF DEFAULT log_file
+ iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG
+ iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST
+ iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT
+
+ if is_service_enabled rabbit; then
+ iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
+ iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST
+ elif is_service_enabled qpid; then
+ iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
+ fi
+
+ HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini
+ cp $HEAT_DIR/etc/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI
+
+}
+
+# init_heat() - Initialize database
+function init_heat() {
+
+ # (re)create heat database
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS heat;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE heat CHARACTER SET utf8;'
+
+ $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD
+}
+
+# install_heat() - Collect source and prepare
+function install_heat() {
+ git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
+}
+
+# start_heat() - Start running processes, including screen
+function start_heat() {
+ screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf"
+ screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf"
+ screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf"
+}
+
+# stop_heat() - Stop running processes (non-screen)
+function stop_heat() {
+ # This function intentionally left blank
+ :
+}
diff --git a/lib/template b/lib/template
new file mode 100644
index 0000000..d70f218
--- /dev/null
+++ b/lib/template
@@ -0,0 +1,77 @@
+# lib/template
+# Functions to control the configuration and operation of the XXXX service
+# <do not include this template file in ``stack.sh``!>
+
+# Dependencies:
+# ``functions`` file
+# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# <list other global vars that are assumed to be defined>
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_XXXX
+# configure_XXXX
+# init_XXXX
+# start_XXXX
+# stop_XXXX
+# cleanup_XXXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+XXXX_DIR=$DEST/XXXX
+XXX_CONF_DIR=/etc/XXXX
+
+
+# Entry Points
+# ------------
+
+# cleanup_XXXX() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_XXXX() {
+ # kill instances (nova)
+ # delete image files (glance)
+ # This function intentionally left blank
+ :
+}
+
+# configure_XXXX() - Set config files, create data dirs, etc
+function configure_XXXX() {
+ # sudo python setup.py deploy
+ # iniset $XXXX_CONF ...
+ # This function intentionally left blank
+ :
+}
+
+# init_XXXX() - Initialize databases, etc.
+function init_XXXX() {
+ # clean up from previous (possibly aborted) runs
+ # create required data files
+ :
+}
+
+# install_XXXX() - Collect source and prepare
+function install_XXXX() {
+ # git clone xxx
+ :
+}
+
+# start_XXXX() - Start running processes, including screen
+function start_XXXX()
+ # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
+ :
+}
+
+# stop_XXXX() - Stop running processes (non-screen)
+function stop_XXXX() {
+ # FIXME(dtroyer): stop only our screen screen window?
+ :
+}
diff --git a/openrc b/openrc
index 4430e82..08ef98b 100644
--- a/openrc
+++ b/openrc
@@ -41,6 +41,10 @@
# or NOVA_PASSWORD.
export OS_PASSWORD=${ADMIN_PASSWORD:-secrete}
+# Don't put the key into a keyring by default. Testing for development is much
+# easier with this off.
+export OS_NO_CACHE=${OS_NO_CACHE:-1}
+
# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint,
# which is convenient for some localrc configurations.
HOST_IP=${HOST_IP:-127.0.0.1}
diff --git a/stack.sh b/stack.sh
index 20e3e0c..665a366 100755
--- a/stack.sh
+++ b/stack.sh
@@ -2,7 +2,7 @@
# ``stack.sh`` is an opinionated OpenStack developer installation. It
# installs and configures various combinations of **Glance**, **Horizon**,
-# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift**
+# **Keystone**, **Nova**, **Quantum**, **Heat** and **Swift**
# This script allows you to specify configuration options of what git
# repositories to use, enabled services, network configuration and various
@@ -60,16 +60,21 @@
source $TOP_DIR/stackrc
# HTTP and HTTPS proxy servers are supported via the usual environment variables
-# ``http_proxy`` and ``https_proxy``. They can be set in ``localrc`` if necessary
+# ``http_proxy`` and ``https_proxy``. Additionally if you would like to access
+# to specific server directly and not through the proxy server, you can use
+# ``no_proxy`` environment variable. They can be set in ``localrc`` if necessary
# or on the command line::
#
-# http_proxy=http://proxy.example.com:3128/ ./stack.sh
+# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
if [[ -n "$http_proxy" ]]; then
export http_proxy=$http_proxy
fi
if [[ -n "$https_proxy" ]]; then
export https_proxy=$https_proxy
fi
+if [[ -n "$no_proxy" ]]; then
+ export no_proxy=$no_proxy
+fi
# Destination path for installation ``DEST``
DEST=${DEST:-/opt/stack}
@@ -235,6 +240,8 @@
# Get project function libraries
source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
# Set the destination directories for openstack projects
NOVA_DIR=$DEST/nova
@@ -251,8 +258,6 @@
SWIFTCLIENT_DIR=$DEST/python-swiftclient
QUANTUM_DIR=$DEST/quantum
QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
-MELANGE_DIR=$DEST/melange
-MELANGECLIENT_DIR=$DEST/python-melangeclient
# Default Quantum Plugin
Q_PLUGIN=${Q_PLUGIN:-openvswitch}
@@ -261,19 +266,12 @@
# Default Quantum Host
Q_HOST=${Q_HOST:-localhost}
# Which Quantum API nova should use
-NOVA_USE_QUANTUM_API=${NOVA_USE_QUANTUM_API:-v1}
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
# Default auth strategy
Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-
-
-# Default Melange Port
-M_PORT=${M_PORT:-9898}
-# Default Melange Host
-M_HOST=${M_HOST:-localhost}
-# Melange MAC Address Range
-M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
+# Use namespace or not
+Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
# Name of the lvm volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
@@ -284,13 +282,30 @@
# cases.
SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
-HOST_IP_IFACE=${HOST_IP_IFACE:-eth0}
-# Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable
+# Set fixed and floating range here so we can make sure not to use addresses
+# from either range when attempting to guess the ip to use for the host
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+
+# Find the interface used for the default route
+HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')}
+# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then
- HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}' | head -n1`
- if [ "$HOST_IP" = "" ]; then
+ HOST_IP=""
+ HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/"); print parts[1]}'`
+ for IP in $HOST_IPS; do
+ # Attempt to filter out ip addresses that are part of the fixed and
+ # floating range. Note that this method only works if the 'netaddr'
+ # python library is installed. If it is not installed, an error
+ # will be printed and the first ip from the interface will be used.
+ if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then
+ HOST_IP=$IP
+ break;
+ fi
+ done
+ if [ "$HOST_IP" == "" ]; then
echo "Could not determine host ip address."
- echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0"
+ echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
exit 1
fi
fi
@@ -369,11 +384,8 @@
fi
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-br100}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
NET_MAN=${NET_MAN:-FlatDHCPManager}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
@@ -419,14 +431,6 @@
#
# With Quantum networking the NET_MAN variable is ignored.
-# Using Melange IPAM:
-#
-# Make sure that quantum and melange are enabled in ENABLED_SERVICES.
-# If they are then the melange IPAM lib will be set in the QuantumManager.
-# Adding m-svc to ENABLED_SERVICES will start the melange service on this
-# host.
-
-
# MySQL & (RabbitMQ or Qpid)
# --------------------------
@@ -785,16 +789,15 @@
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
fi
-if is_service_enabled m-svc; then
- # melange
- git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
-fi
-if is_service_enabled melange; then
- git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
+if is_service_enabled heat; then
+ install_heat
fi
if is_service_enabled cinder; then
install_cinder
fi
+if is_service_enabled ceilometer; then
+ install_ceilometer
+fi
# Initialization
# ==============
@@ -829,11 +832,8 @@
setup_develop $QUANTUM_CLIENT_DIR
setup_develop $QUANTUM_DIR
fi
-if is_service_enabled m-svc; then
- setup_develop $MELANGE_DIR
-fi
-if is_service_enabled melange; then
- setup_develop $MELANGECLIENT_DIR
+if is_service_enabled heat; then
+ configure_heat
fi
if is_service_enabled cinder; then
configure_cinder
@@ -1116,20 +1116,12 @@
Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
Q_DB_NAME="ovs_quantum"
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
- fi
+ Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
Q_DB_NAME="quantum_linux_bridge"
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin"
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
- fi
+ Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
else
echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting"
exit 1
@@ -1153,20 +1145,15 @@
sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
fi
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api False
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True
- fi
+ Q_CONF_FILE=/etc/quantum/quantum.conf
+ cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
fi
# Quantum service (for controller node)
if is_service_enabled q-svc; then
- Q_CONF_FILE=/etc/quantum/quantum.conf
Q_API_PASTE_FILE=/etc/quantum/api-paste.ini
Q_POLICY_FILE=/etc/quantum/policy.json
- cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
@@ -1188,8 +1175,6 @@
iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME
iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD
-
- screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
fi
# Quantum agent (for compute nodes)
@@ -1210,11 +1195,9 @@
# Start up the quantum <-> linuxbridge agent
# set the default network interface
QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
- sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE
+ iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings default:$QUANTUM_LB_PRIVATE_INTERFACE
AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py"
fi
- # Start up the quantum agent
- screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
fi
# Quantum DHCP
@@ -1223,14 +1206,13 @@
Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini
- if [[ -e $QUANTUM_DIR/etc/dhcp_agent.ini ]]; then
- sudo cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
- fi
+ cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
# Set verbose
iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
# Set debug
iniset $Q_DHCP_CONF_FILE DEFAULT debug True
+ iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
# Update database
iniset $Q_DHCP_CONF_FILE DEFAULT db_connection "mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8"
@@ -1244,32 +1226,27 @@
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
fi
- # Start up the quantum agent
- screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE"
fi
-# Melange service
-if is_service_enabled m-svc; then
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;'
- else
- echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
+# Quantum RPC support - must be updated prior to starting any of the services
+if is_service_enabled quantum; then
+ iniset $Q_CONF_FILE DEFAULT control_exchange quantum
+ if is_service_enabled qpid ; then
+ iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST
+ iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD
fi
- MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
- cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
- sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE
- cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
- screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
- echo "Waiting for melange to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
- echo "melange-server did not start"
- exit 1
- fi
- melange mac_address_range create cidr=$M_MAC_RANGE
fi
+# Start the Quantum services
+screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
# Nova
# ----
@@ -1564,7 +1541,7 @@
# which has some default username and password if you have
# configured keystone it will checkout the directory.
if is_service_enabled key; then
- swift_auth_server+="authtoken keystone"
+ swift_auth_server+="authtoken keystoneauth"
else
swift_auth_server=tempauth
fi
@@ -1594,23 +1571,20 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
- cat <<EOF>>${SWIFT_CONFIG_PROXY_SERVER}
+ # Configure Keystone
+ sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
-[filter:keystone]
-paste.filter_factory = keystone.middleware.swift_auth:filter_factory
-operator_roles = Member,admin
+ iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
+ iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = ${KEYSTONE_AUTH_HOST}
-auth_port = ${KEYSTONE_AUTH_PORT}
-auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
-auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/
-admin_tenant_name = ${SERVICE_TENANT_NAME}
-admin_user = swift
-admin_password = ${SERVICE_PASSWORD}
-delay_auth_decision = 1
-EOF
if is_service_enabled swift3;then
cat <<EOF>>${SWIFT_CONFIG_PROXY_SERVER}
# NOTE(chmou): s3token middleware is not updated yet to use only
@@ -1782,6 +1756,7 @@
# Setup the tgt configuration file
if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
+ sudo mkdir -p /etc/tgt/conf.d
echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
fi
@@ -1794,6 +1769,13 @@
fi
fi
+# Support entry points installation of console scripts
+if [ -d $NOVA_DIR/bin ] ; then
+ NOVA_BIN_DIR=$NOVA_DIR/bin
+else
+ NOVA_BIN_DIR=/usr/local/bin
+fi
+
NOVA_CONF=nova.conf
function add_nova_opt {
echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
@@ -1815,28 +1797,13 @@
add_nova_opt "s3_host=$SERVICE_HOST"
add_nova_opt "s3_port=$S3_SERVICE_PORT"
if is_service_enabled quantum; then
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager"
- add_nova_opt "quantum_connection_host=$Q_HOST"
- add_nova_opt "quantum_connection_port=$Q_PORT"
- add_nova_opt "quantum_use_dhcp=True"
-
- if is_service_enabled melange; then
- add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib"
- add_nova_opt "use_melange_mac_generation=True"
- add_nova_opt "melange_host=$M_HOST"
- add_nova_opt "melange_port=$M_PORT"
- fi
-
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
- add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
- add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
- add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
- add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
- add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
- fi
+ add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
+ add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
+ add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
+ add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+ add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
+ add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
+ add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
@@ -1984,9 +1951,14 @@
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;'
# (re)create nova database
- $NOVA_DIR/bin/nova-manage db sync
+ $NOVA_BIN_DIR/nova-manage db sync
fi
+# Heat
+# ------
+if is_service_enabled heat; then
+ init_heat
+fi
# Launch Services
# ===============
@@ -2126,7 +2098,7 @@
# Launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
- screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
+ screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
echo "Waiting for nova-api to start..."
if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
echo "nova-api did not start"
@@ -2136,25 +2108,23 @@
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
# happen after we've started the Quantum service.
-if is_service_enabled mysql && is_service_enabled nova; then
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- # Create a small network
- $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+if is_service_enabled q-svc; then
+ TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
- # Create some floating ips
- $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
+ # Create a small network
+ # Since quantum command is executed in admin context at this point,
+ # --tenant_id needs to be specified.
+ NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
+ quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
+elif is_service_enabled mysql && is_service_enabled nova; then
+ # Create a small network
+ $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
- # Create a second pool
- $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+ # Create some floating ips
+ $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE
- # Create a small network
- # Since quantum command is executed in admin context at this point,
- # --tenant_id needs to be specified.
- NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
- quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
- fi
+ # Create a second pool
+ $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
# Launching nova-compute should be as simple as running ``nova-compute`` but
@@ -2163,46 +2133,50 @@
# within the context of our original shell (so our groups won't be updated).
# Use 'sg' to execute nova-compute as a member of the libvirtd group.
# We don't check for is_service_enable as screen_it does it for us
-screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_DIR/bin/nova-compute"
-screen_it n-crt "cd $NOVA_DIR && $NOVA_DIR/bin/nova-cert"
-screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
-screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
-screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
+screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
+screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
+screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume"
+screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
+screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler"
screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
if is_service_enabled cinder; then
start_cinder
fi
+if is_service_enabled ceilometer; then
+ configure_ceilometer
+ start_ceilometer
+fi
screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
# Starting the nova-objectstore only if swift3 service is not enabled.
# Swift will act as s3 objectstore.
is_service_enabled swift3 || \
- screen_it n-obj "cd $NOVA_DIR && $NOVA_DIR/bin/nova-objectstore"
+ screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
+# launch heat engine, api and metadata
+if is_service_enabled heat; then
+ start_heat
+fi
# Install Images
# ==============
# Upload an image to glance.
#
-# The default image is a small ***TTY*** testing image, which lets you login
-# the username/password of root/password.
+# The default image is cirros, a small testing image, which lets you login as root
#
-# TTY also uses ``cloud-init``, supporting login via keypair and sending scripts as
+# cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as
# userdata. See https://help.ubuntu.com/community/CloudInit for more on cloud-init
#
# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
#
-# * **natty**: http://uec-images.ubuntu.com/natty/current/natty-server-cloudimg-amd64.tar.gz
# * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
+# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then
- # Create a directory for the downloaded image tarballs.
- mkdir -p $FILES/images
-
TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
# Option to upload legacy ami-tty, which works with xenserver
@@ -2211,80 +2185,7 @@
fi
for image_url in ${IMAGE_URLS//,/ }; do
- # Downloads the image (uec ami+aki style), then extracts it.
- IMAGE_FNAME=`basename "$image_url"`
- if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
- wget -c $image_url -O $FILES/$IMAGE_FNAME
- fi
-
- # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
- if [[ "$image_url" =~ 'openvz' ]]; then
- IMAGE="$FILES/${IMAGE_FNAME}"
- IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
- glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format ami --disk-format ami < "$IMAGE"
- continue
- fi
-
- KERNEL=""
- RAMDISK=""
- DISK_FORMAT=""
- CONTAINER_FORMAT=""
- case "$IMAGE_FNAME" in
- *.tar.gz|*.tgz)
- # Extract ami and aki files
- [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
- IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
- IMAGE_NAME="${IMAGE_FNAME%.tgz}"
- xdir="$FILES/images/$IMAGE_NAME"
- rm -Rf "$xdir";
- mkdir "$xdir"
- tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
- KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
- [ -f "$f" ] && echo "$f" && break; done; true)
- RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
- [ -f "$f" ] && echo "$f" && break; done; true)
- IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
- [ -f "$f" ] && echo "$f" && break; done; true)
- if [[ -z "$IMAGE_NAME" ]]; then
- IMAGE_NAME=$(basename "$IMAGE" ".img")
- fi
- ;;
- *.img)
- IMAGE="$FILES/$IMAGE_FNAME";
- IMAGE_NAME=$(basename "$IMAGE" ".img")
- DISK_FORMAT=raw
- CONTAINER_FORMAT=bare
- ;;
- *.img.gz)
- IMAGE="$FILES/${IMAGE_FNAME}"
- IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
- DISK_FORMAT=raw
- CONTAINER_FORMAT=bare
- ;;
- *.qcow2)
- IMAGE="$FILES/${IMAGE_FNAME}"
- IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
- DISK_FORMAT=qcow2
- CONTAINER_FORMAT=bare
- ;;
- *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
- esac
-
- if [ "$CONTAINER_FORMAT" = "bare" ]; then
- glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
- else
- # Use glance client to add the kernel the root filesystem.
- # We parse the results of the first upload to get the glance ID of the
- # kernel for use when uploading the root filesystem.
- KERNEL_ID=""; RAMDISK_ID="";
- if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
- fi
- if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
- fi
- glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
- fi
+ upload_image $image_url $TOKEN
done
fi
diff --git a/stackrc b/stackrc
index bd4fe14..d8d1008 100644
--- a/stackrc
+++ b/stackrc
@@ -24,6 +24,10 @@
# Another option is http://review.openstack.org/p
GIT_BASE=https://github.com
+# metering service
+CEILOMETER_REPO=https://github.com/stackforge/ceilometer.git
+CEILOMETER_BRANCH=master
+
# volume service
CINDER_REPO=${GIT_BASE}/openstack/cinder
CINDER_BRANCH=master
@@ -91,13 +95,9 @@
TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git
TEMPEST_BRANCH=master
-# melange service
-MELANGE_REPO=${GIT_BASE}/openstack/melange.git
-MELANGE_BRANCH=master
-
-# python melange client library
-MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git
-MELANGECLIENT_BRANCH=master
+# heat service
+HEAT_REPO=${GIT_BASE}/heat-api/heat.git
+HEAT_BRANCH=master
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index ffb3777..0da5597 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -139,9 +139,9 @@
FLAVORS=""
for line in $FLAVOR_LINES; do
if [ -z $DEFAULT_INSTANCE_TYPE ]; then
- FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | cut -d' ' -f2`"
+ FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`"
else
- FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`"
+ FLAVORS="$FLAVORS `echo $line | grep -v "^\(|\s*ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`"
fi
done
IFS=" "
diff --git a/tools/upload_image.sh b/tools/upload_image.sh
new file mode 100755
index 0000000..dd21c9f
--- /dev/null
+++ b/tools/upload_image.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# upload_image.sh - Retrieve and upload an image into Glance
+#
+# upload_image.sh <image-url>
+#
+# Assumes credentials are set via OS_* environment variables
+
+function usage {
+ echo "$0 - Retrieve and upload an image into Glance"
+ echo ""
+ echo "Usage: $0 <image-url> [...]"
+ echo ""
+ echo "Assumes credentials are set via OS_* environment variables"
+ exit 1
+}
+
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc "" "" "" ""
+
+# Find the cache dir
+FILES=$TOP_DIR/files
+
+if [[ -z "$1" ]]; then
+ usage
+fi
+
+# Get a token to authenticate to glance
+TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+
+# Glance connection info. Note the port must be specified.
+GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_HOST:9292}
+
+for IMAGE in "$*"; do
+ upload_image $IMAGE $TOKEN
+done
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index fdc6a60..9eae190 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -44,19 +44,14 @@
exit 1
fi
-# Directory where our conf files are stored
-FILES_DIR=$TOP_DIR/files
-TEMPLATES_DIR=$TOP_DIR/templates
-
-# Directory for supporting script files
-SCRIPT_DIR=$TOP_DIR/scripts
-
-# Version of ubuntu with which we are working
-UBUNTU_VERSION=`cat $STAGING_DIR/etc/lsb-release | grep "DISTRIB_CODENAME=" | sed "s/DISTRIB_CODENAME=//"`
-KERNEL_VERSION=`ls $STAGING_DIR/boot/vmlinuz* | head -1 | sed "s/.*vmlinuz-//"`
-
# Configure dns (use same dns as dom0)
-cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
+# but only when not precise
+if [ "$UBUNTU_INST_RELEASE" != "precise" ]; then
+ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
+elif [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then
+ echo "Configuration without DHCP not supported on Precise"
+ exit 1
+fi
# Copy over devstack
rm -f /tmp/devstack.tar
@@ -90,6 +85,7 @@
# Configure the network
INTERFACES=$STAGING_DIR/etc/network/interfaces
+TEMPLATES_DIR=$TOP_DIR/templates
cp $TEMPLATES_DIR/interfaces.in $INTERFACES
if [ $VM_IP == "dhcp" ]; then
echo 'eth1 on dhcp'
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 19453c1..0bb6ac8 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -169,7 +169,7 @@
HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`}
# Set up ip forwarding, but skip on xcp-xapi
-if [ -a /etc/sysconfig/network]; then
+if [ -a /etc/sysconfig/network ]; then
if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then
# FIXME: This doesn't work on reboot!
echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network
@@ -218,7 +218,7 @@
#
GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
-TNAME="devstack_template_folsom_11.10"
+TNAME="devstack_template"
SNAME_PREPARED="template_prepared"
SNAME_FIRST_BOOT="before_first_boot"
@@ -242,19 +242,6 @@
# Install Ubuntu over network
#
- # try to find ubuntu template
- ubuntu_template_name="Ubuntu 11.10 for DevStack (64-bit)"
- ubuntu_template=$(xe_min template-list name-label="$ubuntu_template_name")
-
- # remove template, if we are in CLEAN_TEMPLATE mode
- if [ -n "$ubuntu_template" ]; then
- if $CLEAN_TEMPLATES; then
- xe template-param-clear param-name=other-config uuid=$ubuntu_template
- xe template-uninstall template-uuid=$ubuntu_template force=true
- ubuntu_template=""
- fi
- fi
-
# always update the preseed file, incase we have a newer one
PRESEED_URL=${PRESEED_URL:-""}
if [ -z "$PRESEED_URL" ]; then
@@ -272,13 +259,12 @@
fi
fi
- if [ -z "$ubuntu_template" ]; then
- $TOP_DIR/scripts/xenoneirictemplate.sh $PRESEED_URL
- fi
+ # Update the template
+ $TOP_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL
# create a new VM with the given template
# creating the correct VIFs and metadata
- $TOP_DIR/scripts/install-os-vpx.sh -t "$ubuntu_template_name" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
+ $TOP_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
# wait for install to finish
wait_for_VM_to_halt
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 89a0169..4aa4554 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -18,6 +18,7 @@
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
STAGING_DIR=${STAGING_DIR:-stage}
DO_TGZ=${DO_TGZ:-1}
+XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"}
# Install basics
chroot $STAGING_DIR apt-get update
@@ -26,10 +27,8 @@
chroot $STAGING_DIR pip install xenapi
# Install XenServer guest utilities
-XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb
-wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST
-cp $XEGUEST $STAGING_DIR/root
-chroot $STAGING_DIR dpkg -i /root/$XEGUEST
+cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH}
+chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH
chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove
chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
index 7c6dec4..60782d0 100755
--- a/tools/xen/prepare_guest_template.sh
+++ b/tools/xen/prepare_guest_template.sh
@@ -44,6 +44,28 @@
exit 1
fi
+# Copy XenServer tools deb into the VM
+ISO_DIR="/opt/xensource/packages/iso"
+XS_TOOLS_FILE_NAME="xs-tools.deb"
+XS_TOOLS_PATH="/root/$XS_TOOLS_FILE_NAME"
+if [ -e "$ISO_DIR" ]; then
+ TOOLS_ISO=$(ls $ISO_DIR/xs-tools-*.iso)
+ TMP_DIR=/tmp/temp.$RANDOM
+ mkdir -p $TMP_DIR
+ mount -o loop $TOOLS_ISO $TMP_DIR
+ DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb)
+ echo "Copying XenServer tools into VM from: $DEB_FILE"
+ cp $DEB_FILE "${STAGING_DIR}${XS_TOOLS_PATH}"
+ umount $TMP_DIR
+ rm -rf $TMP_DIR
+else
+ echo "WARNING: no XenServer tools found, falling back to 5.6 tools"
+ TOOLS_URL="http://images.ansolabs.com/xen/xe-guest-utilities_5.6.100-651_amd64.deb"
+ wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME
+ cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}"
+ rm -rf $XS_TOOLS_FILE_NAME
+fi
+
# Copy prepare_guest.sh to VM
mkdir -p $STAGING_DIR/opt/stack/
cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh
@@ -53,5 +75,7 @@
# run prepare_guest.sh on boot
cat <<EOF >$STAGING_DIR/etc/rc.local
-GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
+GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \
+ DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \
+ bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
EOF
diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh
new file mode 100755
index 0000000..f67547b
--- /dev/null
+++ b/tools/xen/scripts/install_ubuntu_template.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# This creates an Ubuntu Server 32bit or 64bit template
+# on Xenserver 5.6.x, 6.0.x and 6.1.x
+# The template does a net install only
+#
+# Based on a script by: David Markey <david.markey@citrix.com>
+#
+
+# Exit on errors
+set -o errexit
+# Echo commands
+set -o xtrace
+
+# This directory
+BASE_DIR=$(cd $(dirname "$0") && pwd)
+
+# For default setings see xenrc
+source $BASE_DIR/../xenrc
+
+# Get the params
+preseed_url=$1
+
+# Delete template or skip template creation as required
+previous_template=$(xe template-list name-label="$UBUNTU_INST_TEMPLATE_NAME" \
+ params=uuid --minimal)
+if [ -n "$previous_template" ]; then
+ if $CLEAN_TEMPLATES; then
+ xe template-param-clear param-name=other-config uuid=$previous_template
+ xe template-uninstall template-uuid=$previous_template force=true
+ else
+ echo "Template $UBUNTU_INST_TEMPLATE_NAME already present"
+ exit 0
+ fi
+fi
+
+# Get built-in template
+builtin_name="Debian Squeeze 6.0 (32-bit)"
+builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal)
+if [[ -z $builtin_uuid ]]; then
+ echo "Cant find the Debian Squeeze 32bit template on your XenServer."
+ exit 1
+fi
+
+# Clone built-in template to create new template
+new_uuid=$(xe vm-clone uuid=$builtin_uuid \
+ new-name-label="$UBUNTU_INST_TEMPLATE_NAME")
+
+# Some of these settings can be found in example preseed files
+# however these need to be answered before the netinstall
+# is ready to fetch the preseed file, and as such must be here
+# to get a fully automated install
+pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \
+console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \
+keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \
+netcfg/choose_interface=${HOST_IP_IFACE} \
+netcfg/get_hostname=os netcfg/get_domain=os auto \
+url=${preseed_url}"
+
+if [ "$NETINSTALLIP" != "dhcp" ]; then
+ netcfgargs="netcfg/disable_autoconfig=true \
+netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \
+netcfg/get_ipaddress=${UBUNTU_INST_IP} \
+netcfg/get_netmask=${UBUNTU_INST_NETMASK} \
+netcfg/get_gateway=${UBUNTU_INST_GATEWAY} \
+netcfg/confirm_static=true"
+ pvargs="${pvargs} ${netcfgargs}"
+fi
+
+xe template-param-set uuid=$new_uuid \
+ other-config:install-methods=http \
+ other-config:install-repository="$UBUNTU_INST_REPOSITORY" \
+ PV-args="$pvargs" \
+ other-config:debian-release="$UBUNTU_INST_RELEASE" \
+ other-config:default_template=true \
+ other-config:install-arch="$UBUNTU_INST_ARCH"
+
+echo "Ubuntu template installed uuid:$new_uuid"
diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh
deleted file mode 100755
index 7f10c33..0000000
--- a/tools/xen/scripts/xenoneirictemplate.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-## makeubuntu.sh, this creates Ubuntu server 11.10 32 and 64 bit templates
-## on Xenserver 6.0.2 Net install only
-## Original Author: David Markey <david.markey@citrix.com>
-## Author: Renuka Apte <renuka.apte@citrix.com>
-## This is not an officially supported guest OS on XenServer 6.0.2
-
-BASE_DIR=$(cd $(dirname "$0") && pwd)
-source $BASE_DIR/../../../localrc
-
-LENNY=$(xe template-list name-label=Debian\ Squeeze\ 6.0\ \(32-bit\) --minimal)
-
-if [[ -z $LENNY ]] ; then
- echo "Cant find Squeeze 32bit template."
- exit 1
-fi
-
-distro="Ubuntu 11.10 for DevStack"
-arches=("32-bit" "64-bit")
-
-preseedurl=${1:-"http://images.ansolabs.com/devstackubuntupreseed.cfg"}
-
-NETINSTALL_LOCALE=${NETINSTALL_LOCALE:-en_US}
-NETINSTALL_KEYBOARD=${NETINSTALL_KEYBOARD:-us}
-NETINSTALL_IFACE=${NETINSTALL_IFACE:-eth3}
-
-for arch in ${arches[@]} ; do
- echo "Attempting $distro ($arch)"
- if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then
- echo "$distro ($arch)" already exists, Skipping
- else
- if [ -z $NETINSTALLIP ]
- then
- echo "NETINSTALLIP not set in localrc"
- exit 1
- fi
- # Some of these settings can be found in example preseed files
- # however these need to be answered before the netinstall
- # is ready to fetch the preseed file, and as such must be here
- # to get a fully automated install
- pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=${NETINSTALL_LOCALE} console-setup/ask_detect=false keyboard-configuration/layoutcode=${NETINSTALL_KEYBOARD} netcfg/choose_interface=${NETINSTALL_IFACE} netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}"
- if [ "$NETINSTALLIP" != "dhcp" ]
- then
- netcfgargs="netcfg/disable_autoconfig=true netcfg/get_nameservers=${NAMESERVERS} netcfg/get_ipaddress=${NETINSTALLIP} netcfg/get_netmask=${NETMASK} netcfg/get_gateway=${GATEWAY} netcfg/confirm_static=true"
- pvargs="${pvargs} ${netcfgargs}"
- fi
- NEWUUID=$(xe vm-clone uuid=$LENNY new-name-label="$distro ($arch)")
- xe template-param-set uuid=$NEWUUID other-config:install-methods=http,ftp \
- other-config:install-repository=http://archive.ubuntu.net/ubuntu \
- PV-args="$pvargs" \
- other-config:debian-release=oneiric \
- other-config:default_template=true
-
- if [[ "$arch" == "32-bit" ]] ; then
- xe template-param-set uuid=$NEWUUID other-config:install-arch="i386"
- else
- xe template-param-set uuid=$NEWUUID other-config:install-arch="amd64"
- fi
- echo "Success"
- fi
-done
-
-echo "Done"
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 102a492..0365a25 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -1,5 +1,10 @@
#!/bin/bash
+#
+# XenServer specific defaults for the /tools/xen/ scripts
+# Similar to stackrc, you can override these in your localrc
+#
+
# Name of this guest
GUEST_NAME=${GUEST_NAME:-DevStackOSDomU}
@@ -10,13 +15,18 @@
# VM Password
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
-# Host Interface, i.e. the interface on the nova vm you want to expose the services on
-# Usually either eth2 (management network) or eth3 (public network)
+# Host Interface, i.e. the interface on the nova vm you want to expose the
+# services on. Usually eth2 (management network) or eth3 (public network) and
# not eth0 (private network with XenServer host) or eth1 (VM traffic network)
+# This is also used as the interface for the Ubuntu install
HOST_IP_IFACE=${HOST_IP_IFACE:-eth3}
+#
# Our nova host's network info
-VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused
+#
+
+# A host-only ip that let's the interface come up, otherwise unused
+VM_IP=${VM_IP:-10.255.255.255}
MGT_IP=${MGT_IP:-172.16.100.55}
PUB_IP=${PUB_IP:-192.168.1.55}
@@ -38,8 +48,28 @@
MGT_VLAN=${MGT_VLAN:-101}
MGT_DEV=${MGT_DEV:-eth0}
-# Guest installer network
+# Decide if you should enable eth0,
+# the guest installer network
+# You need to disable this on xcp-xapi on Ubuntu 12.04
ENABLE_GI=true
-# Source params
+# Ubuntu install settings
+UBUNTU_INST_RELEASE="oneiric"
+UBUNTU_INST_TEMPLATE_NAME="Ubuntu 11.10 (64-bit) for DevStack"
+# For 12.04 use "precise" and update template name
+# However, for 12.04, you should be using
+# XenServer 6.1 and later or XCP 1.6 or later
+# 11.10 is only really supported with XenServer 6.0.2 and later
+UBUNTU_INST_ARCH="amd64"
+UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu"
+UBUNTU_INST_LOCALE="en_US"
+UBUNTU_INST_KEYBOARD="us"
+# network configuration for HOST_IP_IFACE during install
+UBUNTU_INST_IP="dhcp"
+UBUNTU_INST_NAMESERVERS=""
+UBUNTU_INST_NETMASK=""
+UBUNTU_INST_GATEWAY=""
+
+# Load stackrc defaults
+# then override with settings from localrc
cd ../.. && source ./stackrc && cd $TOP_DIR
diff --git a/unstack.sh b/unstack.sh
index 6a55a0a..17752a8 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -37,7 +37,7 @@
# Swift runs daemons
if is_service_enabled swift; then
- swift-init all stop
+ swift-init all stop 2>/dev/null || true
fi
# Apache has the WSGI processes
@@ -54,7 +54,12 @@
echo "iSCSI target cleanup needed:"
echo "$TARGETS"
fi
- stop_service tgt
+
+ if [[ "$os_PACKAGE" = "deb" ]]; then
+ stop_service tgt
+ else
+ stop_service tgtd
+ fi
fi
if [[ -n "$UNSTACK_ALL" ]]; then