Merge "Install and configure python-heatclient."
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 460b50c..b06c8dd 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -95,7 +95,7 @@
chmod 600 $KEY_FILE
# Delete the old volume
-nova volume-delete $VOL_NAME || true
+cinder delete $VOL_NAME || true
# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers
if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then
@@ -112,15 +112,15 @@
fi
# Create the bootable volume
-nova volume-create --display_name=$VOL_NAME --image-id $IMAGE 1
+cinder create --display_name=$VOL_NAME --image-id $IMAGE 1
# Wait for volume to activate
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
exit 1
fi
-VOLUME_ID=`nova volume-list | grep $VOL_NAME | get_field 1`
+VOLUME_ID=`cinder list | grep $VOL_NAME | get_field 1`
# Boot instance from volume! This is done with the --block_device_mapping param.
# The format of mapping is:
@@ -152,13 +152,13 @@
die "Failure deleting instance volume $VOL_INSTANCE_NAME"
# Wait till our volume is no longer in-use
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
exit 1
fi
# Delete the volume
-nova volume-delete $VOL_NAME || \
+cinder delete $VOL_NAME || \
die "Failure deleting volume $VOLUME_NAME"
# De-allocate the floating ip
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 1c73786..72c8729 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -2,7 +2,7 @@
# **volumes.sh**
-# Test nova volumes with the nova command from python-novaclient
+# Test cinder volumes with the cinder command from python-cinderclient
echo "*********************************************************************"
echo "Begin DevStack Exercise: $0"
@@ -131,28 +131,28 @@
VOL_NAME="myvol-$(openssl rand -hex 4)"
# Verify it doesn't exist
-if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then
+if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then
echo "Volume $VOL_NAME already exists"
exit 1
fi
# Create a new volume
-nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
+cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
if [[ $? != 0 ]]; then
echo "Failure creating volume $VOL_NAME"
exit 1
fi
start_time=`date +%s`
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
exit 1
fi
end_time=`date +%s`
-echo "Completed volume-create in $((end_time - start_time)) seconds"
+echo "Completed cinder create in $((end_time - start_time)) seconds"
# Get volume ID
-VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | get_field 1`
+VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1`
die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
# Attach to server
@@ -160,14 +160,14 @@
start_time=`date +%s`
nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
die "Failure attaching volume $VOL_NAME to $NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
echo "Volume $VOL_NAME not attached to $NAME"
exit 1
fi
end_time=`date +%s`
echo "Completed volume-attach in $((end_time - start_time)) seconds"
-VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | get_field -1`
+VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1`
die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
echo "Volume not attached to correct instance"
@@ -177,7 +177,7 @@
# Detach volume
start_time=`date +%s`
nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not detached from $NAME"
exit 1
fi
@@ -186,13 +186,13 @@
# Delete volume
start_time=`date +%s`
-nova volume-delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then
+cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then
echo "Volume $VOL_NAME not deleted"
exit 1
fi
end_time=`date +%s`
-echo "Completed volume-delete in $((end_time - start_time)) seconds"
+echo "Completed cinder delete in $((end_time - start_time)) seconds"
# Shutdown the server
nova delete $VM_UUID || die "Failure deleting instance $NAME"
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index 06c21a2..a40b659 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -2,3 +2,4 @@
lvm2
open-iscsi
open-iscsi-utils
+genisoimage
diff --git a/files/apts/quantum b/files/apts/quantum
index 39f4561..64fc1bf 100644
--- a/files/apts/quantum
+++ b/files/apts/quantum
@@ -1,6 +1,7 @@
ebtables
iptables
iputils-ping
+iputils-arping
mysql-server #NOPRIME
sudo
python-boto
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 9520b17..3da11bf 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -2,18 +2,19 @@
#
# Initial data for Keystone using python-keystoneclient
#
-# Tenant User Roles
+# Tenant User Roles
# ------------------------------------------------------------------
-# admin admin admin
-# service glance admin
-# service nova admin, [ResellerAdmin (swift only)]
-# service quantum admin # if enabled
-# service swift admin # if enabled
-# service cinder admin # if enabled
-# service heat admin # if enabled
-# demo admin admin
-# demo demo Member, anotherrole
-# invisible_to_admin demo Member
+# admin admin admin
+# service glance admin
+# service nova admin, [ResellerAdmin (swift only)]
+# service quantum admin # if enabled
+# service swift admin # if enabled
+# service cinder admin # if enabled
+# service heat admin # if enabled
+# service ceilometer admin # if enabled
+# demo admin admin
+# demo demo Member, anotherrole
+# invisible_to_admin demo Member
# Tempest Only:
# alt_demo alt_demo Member
#
@@ -262,7 +263,14 @@
fi
fi
-if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then
+ CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \
+ --pass="$SERVICE_PASSWORD" \
+ --tenant_id $SERVICE_TENANT \
+ --email=ceilometer@example.com)
+ keystone user-role-add --tenant_id $SERVICE_TENANT \
+ --user_id $CEILOMETER_USER \
+ --role_id $ADMIN_ROLE
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
CEILOMETER_SERVICE=$(get_id keystone service-create \
--name=ceilometer \
@@ -345,4 +353,3 @@
--internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
fi
fi
-
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 1996a98..f7054e8 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,3 +1,4 @@
# Stuff for diablo volumes
iscsi-initiator-utils
lvm2
+genisoimage
diff --git a/functions b/functions
index 0da8299..c7f65db 100644
--- a/functions
+++ b/functions
@@ -7,6 +7,7 @@
# ``GLANCE_HOSTPORT``
# ``OFFLINE``
# ``PIP_DOWNLOAD_CACHE``
+# ``PIP_USE_MIRRORS``
# ``RECLONE``
# ``TRACK_DEPENDS``
# ``http_proxy``, ``https_proxy``, ``no_proxy``
@@ -578,7 +579,8 @@
# Wrapper for ``pip install`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``TRACK_DEPENDES``, ``*_proxy`
+# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
+# ``TRACK_DEPENDS``, ``*_proxy`
# pip_install package [package ...]
function pip_install {
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
@@ -597,11 +599,14 @@
CMD_PIP=/usr/bin/pip-python
fi
fi
+ if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
+ PIP_MIRROR_OPT="--use-mirrors"
+ fi
$SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
HTTP_PROXY=$http_proxy \
HTTPS_PROXY=$https_proxy \
NO_PROXY=$no_proxy \
- $CMD_PIP install --use-mirrors $@
+ $CMD_PIP install $PIP_MIRROR_OPT $@
}
@@ -894,7 +899,7 @@
local FLOATING_IP=$3
local DEFAULT_INSTANCE_USER=$4
local ACTIVE_TIMEOUT=$5
- local probe_cmd = ""
+ local probe_cmd=""
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
echo "server didn't become ssh-able!"
exit 1
diff --git a/lib/ceilometer b/lib/ceilometer
index b0f0377..2b014b0 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -6,8 +6,9 @@
# Dependencies:
# - functions
-# - OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL set for admin credentials
+# - OS_AUTH_URL for auth in api
# - DEST set to the destination directory
+# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
# stack.sh
# ---------
@@ -61,7 +62,15 @@
iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8
+ # Install the policy file for the API server
+ cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
+ iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json
+
iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http
+ iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer
+ iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+ iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+
cleanup_ceilometer
}
@@ -73,7 +82,7 @@
# start_ceilometer() - Start running processes, including screen
function start_ceilometer() {
screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
- screen_it ceilometer-acentral "export OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD OS_TENANT_NAME=$OS_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-acentral "export OS_USERNAME=ceilometer OS_PASSWORD=$SERVICE_PASSWORD OS_TENANT_NAME=$SERVICE_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
}
diff --git a/lib/cinder b/lib/cinder
index 81bfbfe..c2cf15b 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -125,6 +125,10 @@
iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions
iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
+ if [ "$SYSLOG" != "False" ]; then
+ iniset $CINDER_CONF DEFAULT use_syslog True
+ fi
+
if is_service_enabled qpid ; then
iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
elif is_service_enabled zeromq; then
@@ -134,6 +138,10 @@
iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
fi
+ if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
+ iniset $CINDER_CONF DEFAULT secure_delete False
+ fi
+
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
# Add color to logging output
iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [[01;36m%(request_id)s [00;36m%(user_id)s %(project_id)s%(color)s] [01;35m%(instance)s%(color)s%(message)s[00m"
diff --git a/lib/glance b/lib/glance
index b02a4b6..60026d5 100644
--- a/lib/glance
+++ b/lib/glance
@@ -70,6 +70,13 @@
setup_develop $GLANCECLIENT_DIR
}
+# durable_glance_queues() - Determine if RabbitMQ queues are durable or not
+function durable_glance_queues() {
+ test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0
+ test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0
+ return 1
+}
+
# configure_glance() - Set config files, create data dirs, etc
function configure_glance() {
setup_develop $GLANCE_DIR
@@ -120,6 +127,12 @@
iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+ if [[ durable_glance_queues -eq 0 ]]; then
+ # This gets around https://bugs.launchpad.net/glance/+bug/1074132
+ # that results in a g-api server becoming unresponsive during
+ # startup...
+ iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True
+ fi
fi
if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
diff --git a/stack.sh b/stack.sh
index 4d76949..40eab36 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,13 +12,12 @@
# developer install.
# To keep this script simple we assume you are running on a recent **Ubuntu**
-# (11.10 Oneiric or 12.04 Precise) or **Fedora** (F16 or F17) machine. It
+# (11.10 Oneiric or newer) or **Fedora** (F16 or newer) machine. It
# should work in a VM or physical server. Additionally we put the list of
# ``apt`` and ``rpm`` dependencies and other configuration files in this repo.
# Learn more and get the most recent version at http://devstack.org
-
# Keep track of the devstack directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
@@ -33,6 +32,7 @@
# Import database library (must be loaded before stackrc which sources localrc)
source $TOP_DIR/lib/database
+
# Settings
# ========
@@ -107,9 +107,8 @@
fi
fi
-# Disallow qpid on oneiric
+# Qpid was introduced to Ubuntu in precise, disallow it on oneiric
if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then
- # Qpid was introduced in precise
echo "You must use Ubuntu Precise or newer for Qpid support."
exit 1
fi
@@ -356,6 +355,11 @@
# Ryu Applications
RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
+# Should cinder perform secure deletion of volumes?
+# Defaults to true, can be set to False to avoid this bug when testing:
+# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
+CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+
# Name of the LVM volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
@@ -448,14 +452,16 @@
# fail.
#
# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set FLAT_INTERFACE=
-# This will stop nova from bridging any interfaces into FLAT_NETWORK_BRIDGE.
+# devices other than that node, you can set ``FLAT_INTERFACE=``
+# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
-# Using Quantum networking:
-#
+
+# Quantum Networking
+# ------------------
+
# Make sure that quantum is enabled in ENABLED_SERVICES. If you want
# to run Quantum on this host, make sure that q-svc is also in
# ENABLED_SERVICES.
@@ -473,18 +479,20 @@
# With Quantum networking the NET_MAN variable is ignored.
-# Database configuration
+# Database Configuration
# ----------------------
+
# To select between database backends, add a line to localrc like:
#
# use_database postgresql
#
-# The available database backends are defined in the DATABASE_BACKENDS
+# The available database backends are defined in the ``DATABASE_BACKENDS``
# variable defined in stackrc. By default, MySQL is enabled as the database
# backend.
initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
+
# RabbitMQ or Qpid
# --------------------------
@@ -536,7 +544,7 @@
S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
fi
# We only ask for Swift Hash if we have enabled swift service.
- # SWIFT_HASH is a random unique string for a swift cluster that
+ # ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
@@ -551,7 +559,7 @@
# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
# just a string and is not a 'real' Keystone token.
read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
-# Services authenticate to Identity with servicename/SERVICE_PASSWORD
+# Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
# Horizon currently truncates usernames and passwords at 20 characters
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
@@ -560,7 +568,6 @@
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
# Horizon
# -------
@@ -574,10 +581,9 @@
# ---------
# Draw a spinner so the user knows something is happening
-function spinner()
-{
+function spinner() {
local delay=0.75
- local spinstr='|/-\'
+ local spinstr='/-\|'
printf "..." >&3
while [ true ]; do
local temp=${spinstr#?}
@@ -632,6 +638,7 @@
SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary
# Redirect output according to config
+
# Copy stdout to fd 3
exec 3>&1
if [[ "$VERBOSE" == "True" ]]; then
@@ -762,7 +769,7 @@
if is_service_enabled q-agt; then
if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
# Install deps
- # FIXME add to files/apts/quantum, but don't install if not needed!
+ # FIXME add to ``files/apts/quantum``, but don't install if not needed!
if [[ "$os_PACKAGE" = "deb" ]]; then
kernel_version=`cat /proc/version | cut -d " " -f3`
install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
@@ -805,6 +812,7 @@
echo_summary "Installing OpenStack project source"
+# Grab clients first
install_keystoneclient
install_glanceclient
install_novaclient
@@ -867,6 +875,7 @@
git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
fi
+
# Initialization
# ==============
@@ -969,10 +978,15 @@
# Configure database
# ------------------
+
if is_service_enabled $DATABASE_BACKENDS; then
configure_database
fi
+
+# Configure screen
+# ----------------
+
if [ -z "$SCREEN_HARDSTATUS" ]; then
SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
fi
@@ -982,9 +996,11 @@
if [[ -e $SCREENRC ]]; then
echo -n > $SCREENRC
fi
+
# Create a new named screen to run processes in
screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
sleep 1
+
# Set a reasonable status bar
screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
@@ -1094,6 +1110,7 @@
# Ryu
# ---
+
# Ryu is not a part of OpenStack project. Please ignore following block if
# you are not interested in Ryu.
# launch ryu manager
@@ -1120,11 +1137,10 @@
# Quantum
# -------
+# Quantum Network Configuration
if is_service_enabled quantum; then
echo_summary "Configuring Quantum"
- #
- # Quantum Network Configuration
- #
+
# The following variables control the Quantum openvswitch and
# linuxbridge plugins' allocation of tenant networks and
# availability of provider networks. If these are not configured
@@ -1152,7 +1168,7 @@
# allocated. An external network switch must be configured to
# trunk these VLANs between hosts for multi-host connectivity.
#
- # Example: TENANT_VLAN_RANGE=1000:1999
+ # Example: ``TENANT_VLAN_RANGE=1000:1999``
TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
# If using VLANs for tenant networks, or if using flat or VLAN
@@ -1161,7 +1177,7 @@
# openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
# agent, as described below.
#
- # Example: PHYSICAL_NETWORK=default
+ # Example: ``PHYSICAL_NETWORK=default``
PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
# With the openvswitch plugin, if using VLANs for tenant networks,
@@ -1171,7 +1187,7 @@
# physical interface must be manually added to the bridge as a
# port for external connectivity.
#
- # Example: OVS_PHYSICAL_BRIDGE=br-eth1
+ # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
# With the linuxbridge plugin, if using VLANs for tenant networks,
@@ -1179,13 +1195,13 @@
# the name of the network interface to use for the physical
# network.
#
- # Example: LB_PHYSICAL_INTERFACE=eth1
+ # Example: ``LB_PHYSICAL_INTERFACE=eth1``
LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
# With the openvswitch plugin, set to True in localrc to enable
- # provider GRE tunnels when ENABLE_TENANT_TUNNELS is False.
+ # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
#
- # Example: OVS_ENABLE_TUNNELING=True
+ # Example: ``OVS_ENABLE_TUNNELING=True``
OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
# Put config files in ``/etc/quantum`` for everyone to find
@@ -1273,7 +1289,7 @@
echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
fi
- # Override OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc
+ # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc``
# for more complex physical network configurations.
if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
OVS_VLAN_RANGES=$PHYSICAL_NETWORK
@@ -1296,7 +1312,7 @@
echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
fi
- # Override LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc
+ # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc``
# for more complex physical network configurations.
if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
LB_VLAN_RANGES=$PHYSICAL_NETWORK
@@ -1336,7 +1352,7 @@
fi
# Setup physical network bridge mappings. Override
- # OVS_VLAN_RANGES and OVS_BRIDGE_MAPPINGS in localrc for more
+ # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
# complex physical network configurations.
if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
@@ -1350,7 +1366,7 @@
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
# Setup physical network interface mappings. Override
- # LB_VLAN_RANGES and LB_INTERFACE_MAPPINGS in localrc for more
+ # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
# complex physical network configurations.
if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
@@ -1451,6 +1467,7 @@
fi
fi
+
# Nova
# ----
@@ -1742,97 +1759,98 @@
# Rebuild the config file from scratch
create_nova_conf
init_nova
-fi
-# Additional Nova configuration that is dependent on other services
-if is_service_enabled quantum; then
- add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
- add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
- add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
- add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
- add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
- add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
+ # Additional Nova configuration that is dependent on other services
+ if is_service_enabled quantum; then
+ add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
+ add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
+ add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
+ add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+ add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
+ add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
+ add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
- if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"
- elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"
- elif [[ "$Q_PLUGIN" = "ryu" ]]; then
- NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"
- add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
- add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
- add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"
+ elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+ NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver"
+ add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE"
+ add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT"
+ fi
+ add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
+ add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
+ elif is_service_enabled n-net; then
+ add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
+ add_nova_opt "public_interface=$PUBLIC_INTERFACE"
+ add_nova_opt "vlan_interface=$VLAN_INTERFACE"
+ add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
+ if [ -n "$FLAT_INTERFACE" ]; then
+ add_nova_opt "flat_interface=$FLAT_INTERFACE"
+ fi
fi
- add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
- add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
-else
- add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
- add_nova_opt "public_interface=$PUBLIC_INTERFACE"
- add_nova_opt "vlan_interface=$VLAN_INTERFACE"
- add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE"
- if [ -n "$FLAT_INTERFACE" ]; then
- add_nova_opt "flat_interface=$FLAT_INTERFACE"
+ # All nova-compute workers need to know the vnc configuration options
+ # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+ if is_service_enabled n-cpu; then
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL"
+ XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+ add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL"
fi
-fi
-# All nova-compute workers need to know the vnc configuration options
-# These settings don't hurt anything if n-xvnc and n-novnc are disabled
-if is_service_enabled n-cpu; then
- NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
- add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL"
- XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
- add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL"
-fi
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
-else
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
-fi
-# Address on which instance vncservers will listen on compute hosts.
-# For multi-host, this should be the management ip of the compute host.
-VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
-add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN"
-add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
-add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST"
-if is_service_enabled zeromq; then
- add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq"
-elif is_service_enabled qpid; then
- add_nova_opt "rpc_backend=nova.rpc.impl_qpid"
-elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
- add_nova_opt "rabbit_host=$RABBIT_HOST"
- add_nova_opt "rabbit_password=$RABBIT_PASSWORD"
-fi
-add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
+ else
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+ fi
+ # Address on which instance vncservers will listen on compute hosts.
+ # For multi-host, this should be the management ip of the compute host.
+ VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
+ add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN"
+ add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
+ add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST"
+ if is_service_enabled zeromq; then
+ add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq"
+ elif is_service_enabled qpid; then
+ add_nova_opt "rpc_backend=nova.rpc.impl_qpid"
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ add_nova_opt "rabbit_host=$RABBIT_HOST"
+ add_nova_opt "rabbit_password=$RABBIT_PASSWORD"
+ fi
+ add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT"
-# XenServer
-# ---------
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- echo_summary "Using XenServer virtualization driver"
- read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
- add_nova_opt "compute_driver=xenapi.XenAPIDriver"
- XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
- XENAPI_USER=${XENAPI_USER:-"root"}
- add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL"
- add_nova_opt "xenapi_connection_username=$XENAPI_USER"
- add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD"
- add_nova_opt "flat_injected=False"
- # Need to avoid crash due to new firewall support
- XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
-elif [ "$VIRT_DRIVER" = 'openvz' ]; then
- echo_summary "Using OpenVZ virtualization driver"
- # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
- # Replace connection_type when this is fixed.
- # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection"
- add_nova_opt "connection_type=openvz"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
-else
- echo_summary "Using libvirt virtualization driver"
- add_nova_opt "compute_driver=libvirt.LibvirtDriver"
- LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
- add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ # XenServer
+ # ---------
+
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ echo_summary "Using XenServer virtualization driver"
+ read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
+ add_nova_opt "compute_driver=xenapi.XenAPIDriver"
+ XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
+ XENAPI_USER=${XENAPI_USER:-"root"}
+ add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL"
+ add_nova_opt "xenapi_connection_username=$XENAPI_USER"
+ add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD"
+ add_nova_opt "flat_injected=False"
+ # Need to avoid crash due to new firewall support
+ XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
+ add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
+ elif [ "$VIRT_DRIVER" = 'openvz' ]; then
+ echo_summary "Using OpenVZ virtualization driver"
+ # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
+ # Replace connection_type when this is fixed.
+ # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection"
+ add_nova_opt "connection_type=openvz"
+ LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
+ add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ else
+ echo_summary "Using libvirt virtualization driver"
+ add_nova_opt "compute_driver=libvirt.LibvirtDriver"
+ LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
+ add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
+ fi
fi
@@ -1913,7 +1931,7 @@
fi
fi
-elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then
+elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
# Create a small network
$NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh
index f64b7b6..8566229 100755
--- a/tools/build_usb_boot.sh
+++ b/tools/build_usb_boot.sh
@@ -11,7 +11,6 @@
DEST_DIR=${1:-/tmp/syslinux-boot}
PXEDIR=${PXEDIR:-/opt/ramstack/pxe}
-PROGDIR=`dirname $0`
# Clean up any resources that may be in use
cleanup() {
@@ -81,7 +80,7 @@
# Get image into place
if [ ! -r $PXEDIR/stack-initrd.img ]; then
cd $TOP_DIR
- $PROGDIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img
+ $TOOLS_DIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img
fi
if [ ! -r $PXEDIR/stack-initrd.gz ]; then
gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 1e35036..b48680c 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -47,6 +47,7 @@
fi
# Source params
+source $TOP_DIR/lib/database
source $TOP_DIR/openrc
# Where Openstack code lives
@@ -57,6 +58,9 @@
CONFIG_DIR=$TEMPEST_DIR/etc
TEMPEST_CONF=$CONFIG_DIR/tempest.conf
+DATABASE_TYPE=${DATABASE_TYPE:-mysql}
+initialize_database_backends
+
# Use the GUEST_IP unless an explicit IP is set by ``HOST_IP``
HOST_IP=${HOST_IP:-$GUEST_IP}
# Use the first IP if HOST_IP still is not set
@@ -186,7 +190,7 @@
# TODO(jaypipes): Create the key file here... right now, no whitebox
# tests actually use a key.
COMPUTE_PATH_TO_PRIVATE_KEY=$TEMPEST_DIR/id_rsa
-COMPUTE_DB_URI=mysql://root:$MYSQL_PASSWORD@localhost/nova
+COMPUTE_DB_URI=$BASE_SQL_CONN/nova
# Image test configuration options...
IMAGE_HOST=${IMAGE_HOST:-127.0.0.1}
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
index ca74a03..156fd43 100755
--- a/tools/get_uec_image.sh
+++ b/tools/get_uec_image.sh
@@ -5,7 +5,7 @@
# Download and prepare Ubuntu UEC images
CACHEDIR=${CACHEDIR:-/opt/stack/cache}
-ROOTSIZE=${ROOTSIZE:-2000}
+ROOTSIZE=${ROOTSIZE:-2000M}
# Keep track of the current directory
TOOLS_DIR=$(cd $(dirname "$0") && pwd)
@@ -24,7 +24,7 @@
echo "$0 [-r rootsize] release imagefile [kernel]"
echo ""
echo "-r size - root fs size (min 2000MB)"
- echo "release - Ubuntu release: jaunty - oneric"
+ echo "release - Ubuntu release: lucid - quantal"
echo "imagefile - output image file"
echo "kernel - output kernel"
exit 1
@@ -64,6 +64,8 @@
KERNEL=$3
case $DIST_NAME in
+ quantal) ;;
+ percise) ;;
oneiric) ;;
natty) ;;
maverick) ;;
@@ -90,7 +92,7 @@
# Get the UEC image
UEC_NAME=$DIST_NAME-server-cloudimg-amd64
-if [ ! -d $CACHEDIR ]; then
+if [ ! -d $CACHEDIR/$DIST_NAME ]; then
mkdir -p $CACHEDIR/$DIST_NAME
fi
if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 0bb6ac8..c78c6f2 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -57,8 +57,8 @@
fi
# get nova
-nova_zipball=$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")
-wget $nova_zipball -O nova-zipball --no-check-certificate
+NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(echo $NOVA_REPO | sed "s:\.git$::;s:$:/zipball/$NOVA_BRANCH:g")}
+wget $NOVA_ZIPBALL_URL -O nova-zipball --no-check-certificate
unzip -o nova-zipball -d ./nova
# install xapi plugins