Merge "clear screen rc file every time you run stack.sh"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index c967e39..460b50c 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -3,10 +3,8 @@
# **boot_from_volume.sh**
# This script demonstrates how to boot from a volume. It does the following:
-# * Create a 'builder' instance
-# * Attach a volume to the instance
-# * Format and install an os onto the volume
-# * Detach volume from builder, and then boot volume-backed instance
+# * Create a bootable volume
+# * Boot a volume-backed instance
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
@@ -37,6 +35,10 @@
# Import exercise configuration
source $TOP_DIR/exerciserc
+# If cinder or n-vol are not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled cinder n-vol || exit 55
+
# Boot this image, use first AMI image if unset
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
@@ -61,16 +63,13 @@
die_if_not_set IMAGE "Failure getting image"
# Instance and volume names
-INSTANCE_NAME=${INSTANCE_NAME:-test_instance}
VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance}
VOL_NAME=${VOL_NAME:-test_volume}
# Clean-up from previous runs
nova delete $VOL_INSTANCE_NAME || true
-nova delete $INSTANCE_NAME || true
-# Wait till server is gone
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then
echo "server didn't terminate!"
exit 1
fi
@@ -95,16 +94,6 @@
nova keypair-add $KEY_NAME > $KEY_FILE
chmod 600 $KEY_FILE
-# Boot our instance
-VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP --key_name $KEY_NAME $INSTANCE_NAME | grep ' id ' | get_field 2`
-die_if_not_set VM_UUID "Failure launching $INSTANCE_NAME"
-
-# check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server didn't become active!"
- exit 1
-fi
-
# Delete the old volume
nova volume-delete $VOL_NAME || true
@@ -122,17 +111,8 @@
exit 1
fi
-# Add floating ip to our server
-nova add-floating-ip $VM_UUID $FLOATING_IP
-
-# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
- echo "Couldn't ping server with floating ip"
- exit 1
-fi
-
-# Create our volume
-nova volume-create --display_name=$VOL_NAME 1
+# Create the bootable volume
+nova volume-create --display_name=$VOL_NAME --image-id $IMAGE 1
# Wait for volume to activate
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
@@ -140,62 +120,7 @@
exit 1
fi
-# FIXME (anthony) - python-novaclient should accept a volume_name for the attachment param?
-DEVICE=/dev/vdb
VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1`
-nova volume-attach $INSTANCE_NAME $VOLUME_ID $DEVICE
-
-# Wait till volume is attached
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
- echo "Volume $VOL_NAME not created"
- exit 1
-fi
-
-# The following script builds our bootable volume.
-# To do this, ssh to the builder instance, mount volume, and build a volume-backed image.
-STAGING_DIR=/tmp/stage
-CIRROS_DIR=/tmp/cirros
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
-set -o errexit
-set -o xtrace
-sudo mkdir -p $STAGING_DIR
-sudo mkfs.ext3 -b 1024 $DEVICE 1048576
-sudo mount $DEVICE $STAGING_DIR
-# The following lines create a writable empty file so that we can scp
-# the actual file
-sudo touch $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz
-sudo chown cirros $STAGING_DIR/cirros-0.3.0-x86_64-rootfs.img.gz
-EOF
-
-# Download cirros
-if [ ! -e cirros-0.3.0-x86_64-rootfs.img.gz ]; then
- wget http://images.ansolabs.com/cirros-0.3.0-x86_64-rootfs.img.gz
-fi
-
-# Copy cirros onto the volume
-scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz ${DEFAULT_INSTANCE_USER}@$FLOATING_IP:$STAGING_DIR
-
-# Unpack cirros into volume
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
-set -o errexit
-set -o xtrace
-cd $STAGING_DIR
-sudo mkdir -p $CIRROS_DIR
-sudo gunzip cirros-0.3.0-x86_64-rootfs.img.gz
-sudo mount cirros-0.3.0-x86_64-rootfs.img $CIRROS_DIR
-
-# Copy cirros into our volume
-sudo cp -pr $CIRROS_DIR/* $STAGING_DIR/
-
-cd
-sync
-sudo umount $CIRROS_DIR
-# The following typically fails. Don't know why.
-sudo umount $STAGING_DIR || true
-EOF
-
-# Detach the volume from the builder instance
-nova volume-detach $INSTANCE_NAME $VOLUME_ID
# Boot instance from volume! This is done with the --block_device_mapping param.
# The format of mapping is:
@@ -211,24 +136,16 @@
fi
# Add floating ip to our server
-nova remove-floating-ip $VM_UUID $FLOATING_IP
-
-# Gratuitous sleep, probably hiding a race condition :/
-sleep 1
-
-# Add floating ip to our server
nova add-floating-ip $VOL_VM_UUID $FLOATING_IP
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
- echo "Couldn't ping volume-backed server with floating ip"
- exit 1
-fi
+ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
# Make sure our volume-backed instance launched
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
-echo "success!"
-EOF
+ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT
+
+# Remove floating ip from volume-backed instance
+nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP
# Delete volume backed instance
nova delete $VOL_INSTANCE_NAME || \
@@ -244,16 +161,6 @@
nova volume-delete $VOL_NAME || \
die "Failure deleting volume $VOLUME_NAME"
-# Delete instance
-nova delete $INSTANCE_NAME || \
- die "Failure deleting instance $INSTANCE_NAME"
-
-# Wait for termination
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- echo "Server $NAME not deleted"
- exit 1
-fi
-
# De-allocate the floating ip
nova floating-ip-delete $FLOATING_IP || \
die "Failure deleting floating IP $FLOATING_IP"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 58b5d91..b121493 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -130,10 +130,7 @@
die "Failure authorizing rule in $SECGROUP"
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
- echo "Couldn't ping server with floating ip"
- exit 1
-fi
+ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
# Revoke pinging
euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 77f020e..6787878 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -118,23 +118,10 @@
fi
# get the IP of the server
-IP=`nova show $VM_UUID | grep "private network" | get_field 2`
+IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2`
die_if_not_set IP "Failure retrieving IP address"
-# for single node deployments, we can ping private ips
-MULTI_HOST=`trueorfalse False $MULTI_HOST`
-if [ "$MULTI_HOST" = "False" ]; then
- # sometimes the first ping fails (10 seconds isn't enough time for the VM's
- # network to respond?), so let's ping for a default of 15 seconds with a
- # timeout of a second for each ping.
- if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
- echo "Couldn't ping server"
- exit 1
- fi
-else
- # On a multi-host system, without vm net access, do a sleep to wait for the boot
- sleep $BOOT_TIMEOUT
-fi
+ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
# Security Groups & Floating IPs
# ------------------------------
@@ -166,10 +153,7 @@
die "Failure adding floating IP $FLOATING_IP to $NAME"
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
- echo "Couldn't ping server with floating ip"
- exit 1
-fi
+ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
# Allocate an IP from second floating pool
TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1`
@@ -187,19 +171,16 @@
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
- print "Security group failure - ping should not be allowed!"
- echo "Couldn't ping server with floating ip"
- exit 1
- fi
+ ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
fi
-# de-allocate the floating ip
-nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP"
-
# Delete second floating IP
nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP"
+
+# de-allocate the floating ip
+nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP"
+
# Shutdown the server
nova delete $VM_UUID || die "Failure deleting instance $NAME"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index ffa12c4..1c73786 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -119,23 +119,11 @@
fi
# get the IP of the server
-IP=`nova show $VM_UUID | grep "private network" | get_field 2`
+IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2`
die_if_not_set IP "Failure retrieving IP address"
# for single node deployments, we can ping private ips
-MULTI_HOST=`trueorfalse False $MULTI_HOST`
-if [ "$MULTI_HOST" = "False" ]; then
- # sometimes the first ping fails (10 seconds isn't enough time for the VM's
- # network to respond?), so let's ping for a default of 15 seconds with a
- # timeout of a second for each ping.
- if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
- echo "Couldn't ping server"
- exit 1
- fi
-else
- # On a multi-host system, without vm net access, do a sleep to wait for the boot
- sleep $BOOT_TIMEOUT
-fi
+ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
# Volumes
# -------
diff --git a/files/apts/general b/files/apts/general
index be7bf98..12a92e0 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -20,3 +20,4 @@
euca2ools # only for testing client
tar
python-cmd2 # dist:precise
+python-netaddr
diff --git a/files/apts/quantum b/files/apts/quantum
index ed3887b..39f4561 100644
--- a/files/apts/quantum
+++ b/files/apts/quantum
@@ -1,8 +1,13 @@
+ebtables
iptables
+iputils-ping
mysql-server #NOPRIME
sudo
+python-boto
+python-iso8601
python-paste
python-routes
+python-suds
python-netaddr
python-pastedeploy
python-greenlet
@@ -14,3 +19,7 @@
python-qpid # dist:precise
dnsmasq-base
dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal
+rabbitmq-server # NOPRIME
+qpid # NOPRIME
+sqlite3
+vlan
diff --git a/files/apts/ryu b/files/apts/ryu
new file mode 100644
index 0000000..1e8f2d2
--- /dev/null
+++ b/files/apts/ryu
@@ -0,0 +1,4 @@
+python-setuptools
+python-gevent
+python-gflags
+python-sphinx
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 7da07aa..9520b17 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -262,6 +262,21 @@
fi
fi
+if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+ CEILOMETER_SERVICE=$(get_id keystone service-create \
+ --name=ceilometer \
+ --type=metering \
+ --description="Ceilometer Service")
+ keystone endpoint-create \
+ --region RegionOne \
+ --service_id $CEILOMETER_SERVICE \
+ --publicurl "http://$SERVICE_HOST:8777/" \
+ --adminurl "http://$SERVICE_HOST:8777/" \
+ --internalurl "http://$SERVICE_HOST:8777/"
+ fi
+fi
+
# EC2
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
diff --git a/files/rpms/general b/files/rpms/general
index 6d89d2e..e4f143d 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -5,6 +5,7 @@
openssh-server
psmisc
pylint
+python-netaddr
python-pep8
python-pip
python-unittest2
diff --git a/files/rpms/ryu b/files/rpms/ryu
new file mode 100644
index 0000000..1e8f2d2
--- /dev/null
+++ b/files/rpms/ryu
@@ -0,0 +1,4 @@
+python-setuptools
+python-gevent
+python-gflags
+python-sphinx
diff --git a/functions b/functions
index 0d0df51..dbe9d30 100644
--- a/functions
+++ b/functions
@@ -849,6 +849,42 @@
yum install -y "$@"
}
+# ping check
+# Uses globals ``ENABLED_SERVICES``
+function ping_check() {
+ _ping_check_novanet "$1" $2 $3
+}
+
+# ping check for nova
+# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK``
+function _ping_check_novanet() {
+ local from_net=$1
+ local ip=$2
+ local boot_timeout=$3
+ MULTI_HOST=`trueorfalse False $MULTI_HOST`
+ if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
+ sleep $boot_timeout
+ return
+ fi
+ if ! timeout $boot_timeout sh -c "while ! ping -c1 -w1 $ip; do sleep 1; done"; then
+ echo "Couldn't ping server"
+ exit 1
+ fi
+}
+
+# ssh check
+function ssh_check() {
+ local NET_NAME=$1
+ local KEY_FILE=$2
+ local FLOATING_IP=$3
+ local DEFAULT_INSTANCE_USER=$4
+ local ACTIVE_TIMEOUT=$5
+ local probe_cmd = ""
+ if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
+ echo "server didn't become ssh-able!"
+ exit 1
+ fi
+}
# Restore xtrace
$XTRACE
diff --git a/lib/ceilometer b/lib/ceilometer
index 7154ccb..043f481 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -8,7 +8,6 @@
# - functions
# - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials
# - DEST set to the destination directory
-# - NOVA_CONF set to the nova configuration file
# stack.sh
# ---------
@@ -36,8 +35,7 @@
CEILOMETER_BIN_DIR=/usr/local/bin
fi
CEILOMETER_CONF_DIR=/etc/ceilometer
-CEILOMETER_AGENT_CONF=$CEILOMETER_CONF_DIR/ceilometer-agent.conf
-CEILOMETER_COLLECTOR_CONF=$CEILOMETER_CONF_DIR/ceilometer-collector.conf
+CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
@@ -57,13 +55,14 @@
[ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR
sudo chown $USER $CEILOMETER_API_LOG_DIR
- # ceilometer confs are copy of /etc/nova/nova.conf which must exist first
- grep -v format_string $NOVA_CONF > $CEILOMETER_AGENT_CONF
- iniset $CEILOMETER_AGENT_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
+ iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
+ iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
+ iniset $CEILOMETER_CONF DEFAULT verbose True
+ iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST
+ iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8
- grep -v format_string $NOVA_CONF > $CEILOMETER_COLLECTOR_CONF
- iniset $CEILOMETER_COLLECTOR_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
- iniset $CEILOMETER_COLLECTOR_CONF DEFAULT notification_topics 'notifications,glance_notifications'
+ iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http
}
# install_ceilometer() - Collect source and prepare
@@ -73,10 +72,10 @@
# start_ceilometer() - Start running processes, including screen
function start_ceilometer() {
- screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_AGENT_CONF\""
- screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_AGENT_CONF"
- screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_COLLECTOR_CONF"
- screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR"
+ screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
+ screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
}
# stop_ceilometer() - Stop running processes
diff --git a/lib/nova b/lib/nova
index 333695e..95d5d87 100644
--- a/lib/nova
+++ b/lib/nova
@@ -12,6 +12,7 @@
#
# install_nova
# configure_nova
+# create_nova_conf
# init_nova
# start_nova
# stop_nova
@@ -213,7 +214,7 @@
fi
fi
- if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
+ if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
# Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
cat <<EOF | sudo tee -a $QEMU_CONF
cgroup_device_acl = [
@@ -274,8 +275,8 @@
fi
}
-# init_nova() - Initialize databases, etc.
-function init_nova() {
+# create_nova_conf() - Create a new nova.conf file
+function create_nova_conf() {
# Remove legacy ``nova.conf``
rm -f $NOVA_DIR/bin/nova.conf
@@ -299,7 +300,6 @@
add_nova_opt "libvirt_type=$LIBVIRT_TYPE"
add_nova_opt "libvirt_cpu_mode=none"
add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
- add_nova_opt "image_service=nova.image.glance.GlanceImageService"
if is_service_enabled n-api; then
add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
@@ -316,6 +316,7 @@
fi
if [ -n "$NOVA_STATE_PATH" ]; then
add_nova_opt "state_path=$NOVA_STATE_PATH"
+ add_nova_opt "lock_path=$NOVA_STATE_PATH"
fi
if [ -n "$NOVA_INSTANCES_PATH" ]; then
add_nova_opt "instances_path=$NOVA_INSTANCES_PATH"
@@ -340,6 +341,13 @@
# Show user_name and project_name instead of user_id and project_id
add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
fi
+ if is_service_enabled ceilometer; then
+ add_nova_opt "instance_usage_audit=True"
+ add_nova_opt "instance_usage_audit_period=hour"
+ add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier"
+ add_nova_opt "notification_driver=ceilometer.compute.nova_notifier"
+ fi
+
# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
@@ -352,7 +360,10 @@
# Attempt to convert flags to options
add_nova_opt ${I//--}
done
+}
+# init_nova() - Initialize databases, etc.
+function init_nova() {
# Nova Database
# -------------
diff --git a/lib/quantum b/lib/quantum
index 1025d2b..f9e1782 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -33,5 +33,29 @@
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
+function quantum_setup_external_bridge() {
+ local bridge=$1
+ # Create it if it does not exist
+ sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
+ # remove internal ports
+ for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do
+ TYPE=$(sudo ovs-vsctl get interface $PORT type)
+ if [[ "$TYPE" == "internal" ]]; then
+ echo `sudo ip link delete $PORT` > /dev/null
+ sudo ovs-vsctl --no-wait del-port $bridge $PORT
+ fi
+ done
+ # ensure no IP is configured on the public bridge
+ sudo ip addr flush dev $bridge
+}
+
+function is_quantum_ovs_base_plugin() {
+ local plugin=$1
+ if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then
+ return 0
+ fi
+ return 1
+}
+
# Restore xtrace
$XTRACE
diff --git a/openrc b/openrc
index 08ef98b..0a6a215 100644
--- a/openrc
+++ b/openrc
@@ -72,3 +72,6 @@
# set log level to DEBUG (helps debug issues)
# export KEYSTONECLIENT_DEBUG=1
# export NOVACLIENT_DEBUG=1
+
+# set qunatum debug command
+export TEST_CONFIG_FILE=/etc/quantum/debug.ini
diff --git a/stack.sh b/stack.sh
index 4904576..d101798 100755
--- a/stack.sh
+++ b/stack.sh
@@ -97,7 +97,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16|f17) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
echo "If you wish to run this script anyway run with FORCE=yes"
@@ -342,6 +342,18 @@
# Meta data IP
Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
+RYU_DIR=$DEST/ryu
+# Ryu API Host
+RYU_API_HOST=${RYU_API_HOST:-127.0.0.1}
+# Ryu API Port
+RYU_API_PORT=${RYU_API_PORT:-8080}
+# Ryu OFP Host
+RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1}
+# Ryu OFP Port
+RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
+# Ryu Applications
+RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
+
# Name of the LVM volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
@@ -491,7 +503,7 @@
# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
# Default is the common DevStack data directory.
-SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift}
+SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
# Default is ``/etc/swift``.
@@ -773,7 +785,7 @@
fi
if is_service_enabled q-agt; then
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
# Install deps
# FIXME add to files/apts/quantum, but don't install if not needed!
if [[ "$os_PACKAGE" = "deb" ]]; then
@@ -875,7 +887,9 @@
if is_service_enabled tempest; then
install_tempest
fi
-
+if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
+ git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+fi
# Initialization
# ==============
@@ -924,6 +938,9 @@
if is_service_enabled tempest; then
configure_tempest
fi
+if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then
+ setup_develop $RYU_DIR
+fi
if [[ $TRACK_DEPENDS = True ]] ; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
@@ -1137,6 +1154,31 @@
fi
+# Ryu
+# ---
+# Ryu is not a part of OpenStack project. Please ignore following block if
+# you are not interested in Ryu.
+# launch ryu manager
+if is_service_enabled ryu; then
+ RYU_CONF_DIR=/etc/ryu
+ if [[ ! -d $RYU_CONF_DIR ]]; then
+ sudo mkdir -p $RYU_CONF_DIR
+ fi
+ sudo chown `whoami` $RYU_CONF_DIR
+ RYU_CONF=$RYU_CONF_DIR/ryu.conf
+ sudo rm -rf $RYU_CONF
+
+ cat <<EOF > $RYU_CONF
+--app_lists=$RYU_APPS
+--wsapi_host=$RYU_API_HOST
+--wsapi_port=$RYU_API_PORT
+--ofp_listen_host=$RYU_OFP_HOST
+--ofp_tcp_listen_port=$RYU_OFP_PORT
+EOF
+ screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+fi
+
+
# Quantum
# -------
@@ -1224,8 +1266,15 @@
Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
Q_DB_NAME="quantum_linux_bridge"
Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
- else
- echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu
+ Q_PLUGIN_CONF_FILENAME=ryu.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2"
+ fi
+
+ if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
+ echo "Quantum plugin not set.. exiting"
exit 1
fi
@@ -1319,6 +1368,9 @@
if [[ "$LB_VLAN_RANGES" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
fi
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT
+ iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
fi
fi
@@ -1368,6 +1420,14 @@
iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
fi
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ # Set up integration bridge
+ OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ quantum_setup_ovs_bridge $OVS_BRIDGE
+ if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
+ sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
+ fi
+ AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
fi
# Update config w/rootwrap
iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
@@ -1386,6 +1446,7 @@
# Set debug
iniset $Q_DHCP_CONF_FILE DEFAULT debug True
iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+ iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url
@@ -1396,6 +1457,9 @@
iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
+ iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
fi
fi
@@ -1422,21 +1486,16 @@
iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
# Set up external bridge
- # Create it if it does not exist
- sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
- # remove internal ports
- for PORT in `sudo ovs-vsctl --no-wait list-ports $PUBLIC_BRIDGE`; do
- TYPE=$(sudo ovs-vsctl get interface $PORT type)
- if [[ "$TYPE" == "internal" ]]; then
- echo `sudo ip link delete $PORT` > /dev/null
- sudo ovs-vsctl --no-wait del-port $bridge $PORT
- fi
- done
- # ensure no IP is configured on the public bridge
- sudo ip addr flush dev $PUBLIC_BRIDGE
+ quantum_setup_external_bridge $PUBLIC_BRIDGE
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ''
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+ # Set up external bridge
+ quantum_setup_external_bridge $PUBLIC_BRIDGE
fi
fi
@@ -1604,8 +1663,8 @@
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
- if is_service_enabled swift3;then
- cat <<EOF>>${SWIFT_CONFIG_PROXY_SERVER}
+ if is_service_enabled swift3; then
+ cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
# NOTE(chmou): s3token middleware is not updated yet to use only
# username and password.
[filter:s3token]
@@ -1741,6 +1800,8 @@
if is_service_enabled nova; then
echo_summary "Configuring Nova"
+ # Rebuild the config file from scratch
+ create_nova_conf
init_nova
fi
@@ -1758,6 +1819,11 @@
NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"
+ add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
+ add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
fi
add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
@@ -1894,17 +1960,17 @@
# Create a small network
# Since quantum command is executed in admin context at this point,
# ``--tenant_id`` needs to be specified.
- NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
+ NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
if is_service_enabled q-l3; then
# Create a router, and add the private subnet as one of its interfaces
ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2)
quantum router-interface-add $ROUTER_ID $SUBNET_ID
# Create an external network, and a subnet. Configure the external network as router gw
- EXT_NET_ID=$(quantum net-create ext_net -- --router:external=True | grep ' id ' | get_field 2)
+ EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
- if [[ "$Q_PLUGIN" = "openvswitch" ]] && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+ if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
CIDR_LEN=${FLOATING_RANGE#*/}
sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
sudo ip link set $PUBLIC_BRIDGE up
@@ -1919,10 +1985,10 @@
elif is_service_enabled mysql && is_service_enabled nova; then
# Create a small network
- $NOVA_BIN_DIR/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+ $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
# Create some floating ips
- $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE
+ $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK
# Create a second pool
$NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
diff --git a/stackrc b/stackrc
index 3002c46..e587efa 100644
--- a/stackrc
+++ b/stackrc
@@ -101,6 +101,10 @@
HEAT_REPO=${GIT_BASE}/heat-api/heat.git
HEAT_BRANCH=master
+# ryu service
+RYU_REPO=https://github.com/osrg/ryu.git
+RYU_BRANCH=master
+
# Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC** or **OpenVZ** based system.
@@ -132,10 +136,10 @@
#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
-# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
+# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
# ``IMAGE_URLS`` to be set directly in ``localrc``.
case "$VIRT_DRIVER" in
- openvz)
+ openvz)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
libvirt)
@@ -155,3 +159,6 @@
# 5Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
+
+PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
+PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
diff --git a/unstack.sh b/unstack.sh
index 49f1e8b..42cb7af 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -111,5 +111,5 @@
# Quantum dhcp agent runs dnsmasq
if is_service_enabled q-dhcp; then
pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }')
- [ ! -z $pid ] && sudo kill -9 $pid
+ [ ! -z "$pid" ] && sudo kill -9 $pid
fi