Merge "Use right service name when stopping tgt in unstack.sh"
diff --git a/AUTHORS b/AUTHORS
index 6141d67..ab929ca 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -34,6 +34,7 @@
Thierry Carrez <thierry@openstack.org>
Todd Willey <xtoddx@gmail.com>
Tres Henry <tres@treshenry.net>
+Vincent Untz <vuntz@suse.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Yun Mao <yunmao@gmail.com>
Yong Sheng Gong <gongysh@cn.ibm.com>
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 6a0937a..7fe81ba 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -46,6 +46,8 @@
# Default floating IP pool name
DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova}
+# Default user
+DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros}
# Launching servers
# =================
@@ -150,7 +152,7 @@
# To do this, ssh to the builder instance, mount volume, and build a volume-backed image.
STAGING_DIR=/tmp/stage
CIRROS_DIR=/tmp/cirros
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
set -o errexit
set -o xtrace
sudo mkdir -p $STAGING_DIR
@@ -168,10 +170,10 @@
fi
# Copy cirros onto the volume
-scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz cirros@$FLOATING_IP:$STAGING_DIR
+scp -o StrictHostKeyChecking=no -i $KEY_FILE cirros-0.3.0-x86_64-rootfs.img.gz ${DEFAULT_INSTANCE_USER}@$FLOATING_IP:$STAGING_DIR
# Unpack cirros into volume
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
set -o errexit
set -o xtrace
cd $STAGING_DIR
@@ -221,7 +223,7 @@
fi
# Make sure our volume-backed instance launched
-ssh -o StrictHostKeyChecking=no -i $KEY_FILE cirros@$FLOATING_IP << EOF
+ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP << EOF
echo "success!"
EOF
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 4a538c6..9f7aed1 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -40,12 +40,15 @@
# Instance type to create
DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+# Boot this image, use first AMI-format image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
# Launching a server
# ==================
# Find a machine image to boot
-IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
+IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
# Define secgroup
SECGROUP=euca_secgroup
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 51019a3..02259c0 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -185,7 +185,7 @@
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP"
# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" ]; then
+if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
# test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
print "Security group failure - ping should not be allowed!"
diff --git a/stack.sh b/stack.sh
index 7143433..6b8362c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -2,7 +2,7 @@
# ``stack.sh`` is an opinionated OpenStack developer installation. It
# installs and configures various combinations of **Glance**, **Horizon**,
-# **Keystone**, **Melange**, **Nova**, **Quantum** and **Swift**
+# **Keystone**, **Nova**, **Quantum** and **Swift**
# This script allows you to specify configuration options of what git
# repositories to use, enabled services, network configuration and various
@@ -251,8 +251,6 @@
SWIFTCLIENT_DIR=$DEST/python-swiftclient
QUANTUM_DIR=$DEST/quantum
QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
-MELANGE_DIR=$DEST/melange
-MELANGECLIENT_DIR=$DEST/python-melangeclient
# Default Quantum Plugin
Q_PLUGIN=${Q_PLUGIN:-openvswitch}
@@ -261,31 +259,17 @@
# Default Quantum Host
Q_HOST=${Q_HOST:-localhost}
# Which Quantum API nova should use
-NOVA_USE_QUANTUM_API=${NOVA_USE_QUANTUM_API:-v1}
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
# Default auth strategy
Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# Default Melange Port
-M_PORT=${M_PORT:-9898}
-# Default Melange Host
-M_HOST=${M_HOST:-localhost}
-# Melange MAC Address Range
-M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
-
# Name of the lvm volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
-# Nova hypervisor configuration. We default to libvirt with **kvm** but will
-# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
-# also install an **LXC** based system.
-VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
-LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
-
# Nova supports pluggable schedulers. ``FilterScheduler`` should work in most
# cases.
SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
@@ -425,14 +409,6 @@
#
# With Quantum networking the NET_MAN variable is ignored.
-# Using Melange IPAM:
-#
-# Make sure that quantum and melange are enabled in ENABLED_SERVICES.
-# If they are then the melange IPAM lib will be set in the QuantumManager.
-# Adding m-svc to ENABLED_SERVICES will start the melange service on this
-# host.
-
-
# MySQL & (RabbitMQ or Qpid)
# --------------------------
@@ -791,13 +767,6 @@
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
fi
-if is_service_enabled m-svc; then
- # melange
- git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
-fi
-if is_service_enabled melange; then
- git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
-fi
if is_service_enabled cinder; then
install_cinder
fi
@@ -835,12 +804,6 @@
setup_develop $QUANTUM_CLIENT_DIR
setup_develop $QUANTUM_DIR
fi
-if is_service_enabled m-svc; then
- setup_develop $MELANGE_DIR
-fi
-if is_service_enabled melange; then
- setup_develop $MELANGECLIENT_DIR
-fi
if is_service_enabled cinder; then
configure_cinder
fi
@@ -1122,20 +1085,12 @@
Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
Q_DB_NAME="ovs_quantum"
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
- fi
+ Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
Q_DB_NAME="quantum_linux_bridge"
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin"
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
- fi
+ Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
else
echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting"
exit 1
@@ -1159,20 +1114,16 @@
sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
fi
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api False
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True
- fi
+ iniset /$Q_PLUGIN_CONF_FILE AGENT target_v2_api True
+ Q_CONF_FILE=/etc/quantum/quantum.conf
+ cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
fi
# Quantum service (for controller node)
if is_service_enabled q-svc; then
- Q_CONF_FILE=/etc/quantum/quantum.conf
Q_API_PASTE_FILE=/etc/quantum/api-paste.ini
Q_POLICY_FILE=/etc/quantum/policy.json
- cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
@@ -1194,8 +1145,6 @@
iniset $Q_API_PASTE_FILE filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $Q_API_PASTE_FILE filter:authtoken admin_user $Q_ADMIN_USERNAME
iniset $Q_API_PASTE_FILE filter:authtoken admin_password $SERVICE_PASSWORD
-
- screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
fi
# Quantum agent (for compute nodes)
@@ -1219,8 +1168,6 @@
sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE
AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py"
fi
- # Start up the quantum agent
- screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
fi
# Quantum DHCP
@@ -1229,9 +1176,7 @@
Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini
- if [[ -e $QUANTUM_DIR/etc/dhcp_agent.ini ]]; then
- sudo cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
- fi
+ cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
# Set verbose
iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
@@ -1250,32 +1195,27 @@
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
fi
- # Start up the quantum agent
- screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE"
fi
-# Melange service
-if is_service_enabled m-svc; then
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS melange;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE melange CHARACTER SET utf8;'
- else
- echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
+# Quantum RPC support - must be updated prior to starting any of the services
+if is_service_enabled quantum; then
+ iniset $Q_CONF_FILE DEFAULT control_exchange quantum
+ if is_service_enabled qpid ; then
+ iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+ elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST
+ iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD
fi
- MELANGE_CONFIG_FILE=$MELANGE_DIR/etc/melange/melange.conf
- cp $MELANGE_CONFIG_FILE.sample $MELANGE_CONFIG_FILE
- sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/melange?charset=utf8/g" $MELANGE_CONFIG_FILE
- cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-manage --config-file=$MELANGE_CONFIG_FILE db_sync
- screen_it m-svc "cd $MELANGE_DIR && PYTHONPATH=.:$PYTHONPATH python $MELANGE_DIR/bin/melange-server --config-file=$MELANGE_CONFIG_FILE"
- echo "Waiting for melange to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9898; do sleep 1; done"; then
- echo "melange-server did not start"
- exit 1
- fi
- melange mac_address_range create cidr=$M_MAC_RANGE
fi
+# Start the Quantum services
+screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-agt "sudo python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
+
+# Start up the quantum agent
+screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file=$Q_DHCP_CONF_FILE"
# Nova
# ----
@@ -1821,28 +1761,13 @@
add_nova_opt "s3_host=$SERVICE_HOST"
add_nova_opt "s3_port=$S3_SERVICE_PORT"
if is_service_enabled quantum; then
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- add_nova_opt "network_manager=nova.network.quantum.manager.QuantumManager"
- add_nova_opt "quantum_connection_host=$Q_HOST"
- add_nova_opt "quantum_connection_port=$Q_PORT"
- add_nova_opt "quantum_use_dhcp=True"
-
- if is_service_enabled melange; then
- add_nova_opt "quantum_ipam_lib=nova.network.quantum.melange_ipam_lib"
- add_nova_opt "use_melange_mac_generation=True"
- add_nova_opt "melange_host=$M_HOST"
- add_nova_opt "melange_port=$M_PORT"
- fi
-
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
- add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
- add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
- add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
- add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
- add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
- add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
- fi
+ add_nova_opt "network_api_class=nova.network.quantumv2.api.API"
+ add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME"
+ add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD"
+ add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+ add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY"
+ add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME"
+ add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT"
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
@@ -1961,6 +1886,13 @@
# Need to avoid crash due to new firewall support
XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER"
+elif [ "$VIRT_DRIVER" = 'openvz' ]; then
+ # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here.
+ # Replace connection_type when this is fixed.
+ # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection"
+ add_nova_opt "connection_type=openvz"
+ LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
+ add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER"
else
add_nova_opt "compute_driver=libvirt.LibvirtDriver"
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
@@ -2135,25 +2067,23 @@
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
# happen after we've started the Quantum service.
-if is_service_enabled mysql && is_service_enabled nova; then
- if [[ "$NOVA_USE_QUANTUM_API" = "v1" ]]; then
- # Create a small network
- $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+if is_service_enabled q-svc; then
+ TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
- # Create some floating ips
- $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
+ # Create a small network
+ # Since quantum command is executed in admin context at this point,
+ # --tenant_id needs to be specified.
+ NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
+ quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
+elif is_service_enabled mysql && is_service_enabled nova; then
+ # Create a small network
+ $NOVA_DIR/bin/nova-manage network create private $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
- # Create a second pool
- $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
- elif [[ "$NOVA_USE_QUANTUM_API" = "v2" ]]; then
- TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+ # Create some floating ips
+ $NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
- # Create a small network
- # Since quantum command is executed in admin context at this point,
- # --tenant_id needs to be specified.
- NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
- quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
- fi
+ # Create a second pool
+ $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
# Launching nova-compute should be as simple as running ``nova-compute`` but
@@ -2216,6 +2146,14 @@
wget -c $image_url -O $FILES/$IMAGE_FNAME
fi
+ # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
+ if [[ "$image_url" =~ 'openvz' ]]; then
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
+ glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format ami --disk-format ami < "$IMAGE"
+ continue
+ fi
+
KERNEL=""
RAMDISK=""
DISK_FORMAT=""
@@ -2262,19 +2200,19 @@
esac
if [ "$CONTAINER_FORMAT" = "bare" ]; then
- glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
else
# Use glance client to add the kernel the root filesystem.
# We parse the results of the first upload to get the glance ID of the
# kernel for use when uploading the root filesystem.
KERNEL_ID=""; RAMDISK_ID="";
if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ KERNEL_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public=True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
fi
if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public=True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public=True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
fi
done
fi
diff --git a/stackrc b/stackrc
index 3bbc475..c906f95 100644
--- a/stackrc
+++ b/stackrc
@@ -91,13 +91,16 @@
TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git
TEMPEST_BRANCH=master
-# melange service
-MELANGE_REPO=${GIT_BASE}/openstack/melange.git
-MELANGE_BRANCH=master
+# Nova hypervisor configuration. We default to libvirt with **kvm** but will
+# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
+# also install an **LXC** or **OpenVZ** based system.
+VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
+LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
-# python melange client library
-MELANGECLIENT_REPO=${GIT_BASE}/openstack/python-melangeclient.git
-MELANGECLIENT_BRANCH=master
+# allow local overrides of env variables
+if [ -f $RC_DIR/localrc ]; then
+ source $RC_DIR/localrc
+fi
# Specify a comma-separated list of uec images to download and install into glance.
# supported urls here are:
@@ -114,19 +117,27 @@
# http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
-case "$LIBVIRT_TYPE" in
- lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
- DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-rootfs
- IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";;
- *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
- DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-uec
- IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";;
+#
+# Set default image based on LIBVIRT_TYPE or VIRT_DRIVER, which may be set in localrc
+# but allow DEFAULT_IMAGE_NAME and IMAGE_URLS to be set directly in localrc, too.
+case "$VIRT_DRIVER" in
+ openvz) # OpenVZ uses its own format of image, and does not support uec style images
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
+ IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
+ libvirt)
+ case "$LIBVIRT_TYPE" in
+ lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-rootfs}
+ IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz"};;
+ *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
+ IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
+ esac
+ ;;
+ *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
+ IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
esac
-# allow local overrides of env variables
-if [ -f $RC_DIR/localrc ]; then
- source $RC_DIR/localrc
-fi
-
# 5Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 4d029d8..5be709a 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -67,15 +67,20 @@
# Glance should already contain images to be used in tempest
# testing. Here we simply look for images stored in Glance
# and set the appropriate variables for use in the tempest config
-# We ignore ramdisk and kernel images and set the IMAGE_UUID to
-# the first image returned and set IMAGE_UUID_ALT to the second,
+# We ignore ramdisk and kernel images, look for the default image
+# DEFAULT_IMAGE_NAME. If not found, we set the IMAGE_UUID to the
+# first image returned and set IMAGE_UUID_ALT to the second,
# if there is more than one returned...
# ... Also ensure we only take active images, so we don't get snapshots in process
IMAGE_LINES=`glance image-list`
IFS="$(echo -e "\n\r")"
IMAGES=""
for line in $IMAGE_LINES; do
- IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`"
+ if [ -z $DEFAULT_IMAGE_NAME ]; then
+ IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | cut -d' ' -f2`"
+ else
+ IMAGES="$IMAGES `echo $line | grep -v "^\(ID\|+--\)" | grep -v "\(aki\|ari\)" | grep 'active' | grep "$DEFAULT_IMAGE_NAME" | cut -d' ' -f2`"
+ fi
done
# Create array of image UUIDs...
IFS=" "
@@ -127,9 +132,31 @@
ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
ALT_PASSWORD=$OS_PASSWORD
-# TODO(jaypipes): Support configurable flavor refs here...
-FLAVOR_REF=1
-FLAVOR_REF_ALT=2
+# Check Nova for existing flavors and, if set, look for the
+# DEFAULT_INSTANCE_TYPE and use that. Otherwise, just use the first flavor.
+FLAVOR_LINES=`nova flavor-list`
+IFS="$(echo -e "\n\r")"
+FLAVORS=""
+for line in $FLAVOR_LINES; do
+ if [ -z $DEFAULT_INSTANCE_TYPE ]; then
+ FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | cut -d' ' -f2`"
+ else
+ FLAVORS="$FLAVORS `echo $line | grep -v "^\(ID\|+--\)" | grep "$DEFAULT_INSTANCE_TYPE" | cut -d' ' -f2`"
+ fi
+done
+IFS=" "
+FLAVORS=($FLAVORS)
+NUM_FLAVORS=${#FLAVORS[*]}
+echo "Found $NUM_FLAVORS flavors"
+if [[ $NUM_FLAVORS -eq 0 ]]; then
+ echo "Found no valid flavors to use!"
+ exit 1
+fi
+FLAVOR_REF=${FLAVORS[0]}
+FLAVOR_REF_ALT=$FLAVOR_REF
+if [[ $NUM_FLAVORS -gt 1 ]]; then
+ FLAVOR_REF_ALT=${FLAVORS[1]}
+fi
# Do any of the following need to be configurable?
COMPUTE_CATALOG_TYPE=compute
@@ -141,7 +168,8 @@
BUILD_INTERVAL=3
BUILD_TIMEOUT=400
RUN_SSH=True
-SSH_USER=$OS_USERNAME
+# Check for DEFAULT_INSTANCE_USER and try to connect with that account
+SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME}
NETWORK_FOR_SSH=private
IP_VERSION_FOR_SSH=4
SSH_TIMEOUT=4