Merge "Use iputils-arping package for Ubuntu Oneiric distribution"
diff --git a/files/apts/horizon b/files/apts/horizon
index aa08a31..1e0b0e6 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -18,3 +18,9 @@
 python-kombu
 python-coverage
 python-cherrypy3 # why?
+python-django
+python-django-mailer
+python-django-nose
+python-django-registration
+python-cloudfiles
+python-migrate
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
new file mode 100644
index 0000000..06c21a2
--- /dev/null
+++ b/files/apts/n-cpu
@@ -0,0 +1,4 @@
+# Stuff for diablo volumes
+lvm2
+open-iscsi
+open-iscsi-utils
diff --git a/files/apts/n-vol b/files/apts/n-vol
new file mode 100644
index 0000000..edaee2c
--- /dev/null
+++ b/files/apts/n-vol
@@ -0,0 +1,3 @@
+iscsitarget  # NOPRIME
+iscsitarget-dkms  # NOPRIME
+lvm2
diff --git a/files/apts/nova b/files/apts/nova
index b034509..4c91470 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -40,8 +40,3 @@
 python-m2crypto
 python-boto
 python-kombu
-
-# Stuff for diablo volumes
-iscsitarget  # NOPRIME
-iscsitarget-dkms  # NOPRIME
-lvm2
diff --git a/files/glance-api.conf b/files/glance-api.conf
index 1bbd58e..6c670b5 100644
--- a/files/glance-api.conf
+++ b/files/glance-api.conf
@@ -141,30 +141,32 @@
 [pipeline:glance-api]
 #pipeline = versionnegotiation context apiv1app
 # NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken context apiv1app
+pipeline = versionnegotiation authtoken auth-context apiv1app
 
 # To enable Image Cache Management API replace pipeline with below:
 # pipeline = versionnegotiation context imagecache apiv1app
 # NOTE: use the following pipeline for keystone auth (with caching)
-# pipeline = versionnegotiation authtoken context imagecache apiv1app
-
-[pipeline:versions]
-pipeline = versionsapp
-
-[app:versionsapp]
-paste.app_factory = glance.api.versions:app_factory
+# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
 
 [app:apiv1app]
-paste.app_factory = glance.api.v1.router:app_factory
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.api.v1.router:API
 
 [filter:versionnegotiation]
-paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
 
-[filter:imagecache]
-paste.filter_factory = glance.api.middleware.image_cache:filter_factory
+[filter:cache]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache:CacheFilter
+
+[filter:cachemanage]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
 
 [filter:context]
-paste.filter_factory = glance.common.context:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
@@ -176,3 +178,7 @@
 auth_protocol = http
 auth_uri = http://127.0.0.1:5000/
 admin_token = %SERVICE_TOKEN%
+
+[filter:auth-context]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
index 1e04186..e732e86 100644
--- a/files/glance-registry.conf
+++ b/files/glance-registry.conf
@@ -46,14 +46,16 @@
 [pipeline:glance-registry]
 #pipeline = context registryapp
 # NOTE: use the following pipeline for keystone
-pipeline = authtoken keystone_shim context registryapp
+pipeline = authtoken auth-context context registryapp
 
 [app:registryapp]
-paste.app_factory = glance.registry.server:app_factory
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.registry.api.v1:API
 
 [filter:context]
 context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.context:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
@@ -66,5 +68,7 @@
 auth_uri = http://127.0.0.1:5000/
 admin_token = %SERVICE_TOKEN%
 
-[filter:keystone_shim]
-paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory
+[filter:auth-context]
+context_class = glance.registry.context.RequestContext
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/pips/horizon b/files/pips/horizon
index dddf011..f35a01d 100644
--- a/files/pips/horizon
+++ b/files/pips/horizon
@@ -1,11 +1,5 @@
-Django==1.3
-django-mailer
-django-nose==0.1.2
 django-nose-selenium
-django-registration==0.7
 pycrypto==2.3
-sqlalchemy-migrate
-python-cloudfiles
 
 -e git+https://github.com/cloudbuilders/openstackx.git#egg=openstackx
 -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack
diff --git a/files/screenrc b/files/screenrc
deleted file mode 100644
index e18db39..0000000
--- a/files/screenrc
+++ /dev/null
@@ -1,9 +0,0 @@
-hardstatus on
-hardstatus alwayslastline
-hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c"
-
-defscrollback 10240
-
-vbell off
-startup_message off
-
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 2db6d32..d7ed485 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -16,7 +16,7 @@
 use = egg:swiftkeystone2#keystone2
 keystone_admin_token = %SERVICE_TOKEN%
 keystone_url = http://localhost:35357/v2.0
-keystone_admin_group = Member
+keystone_swift_operator_roles = Member
 
 [filter:tempauth]
 use = egg:swift#tempauth
diff --git a/stack.sh b/stack.sh
index 7d66e31..f978e44 100755
--- a/stack.sh
+++ b/stack.sh
@@ -80,9 +80,6 @@
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
 
-# Configure services to syslog instead of writing to individual log files
-SYSLOG=${SYSLOG:-False}
-
 # apt-get wrapper to just get arguments set correctly
 function apt_get() {
     local sudo="sudo"
@@ -121,7 +118,7 @@
 
     echo "Copying files to stack user"
     STACK_DIR="$DEST/${PWD##*/}"
-    cp -r -f "$PWD" "$STACK_DIR"
+    cp -r -f -T "$PWD" "$STACK_DIR"
     chown -R stack "$STACK_DIR"
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
@@ -157,12 +154,17 @@
 
 # Default Quantum Plugin
 Q_PLUGIN=${Q_PLUGIN:-openvswitch}
+# Default Quantum Port
+Q_PORT=${Q_PORT:-9696}
+# Default Quantum Host
+Q_HOST=${Q_HOST:-localhost}
 
 # Specify which services to launch.  These generally correspond to screen tabs
 ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx}
 
 # Name of the lvm volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 
 # Nova hypervisor configuration.  We default to libvirt whth  **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  Stack.sh can
@@ -186,6 +188,23 @@
     fi
 fi
 
+# Normalize config values to True or False
+# VAR=`trueorfalse default-value test-value`
+function trueorfalse() {
+    local default=$1
+    local testval=$2
+
+    [[ -z "$testval" ]] && { echo "$default"; return; }
+    [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
+    [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
+    echo "$default"
+}
+
+# Configure services to syslog instead of writing to individual log files
+SYSLOG=`trueorfalse False $SYSLOG`
+SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
+SYSLOG_PORT=${SYSLOG_PORT:-516}
+
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
@@ -266,8 +285,9 @@
 
 # Using Quantum networking:
 #
-# Make sure that q-svc is enabled in ENABLED_SERVICES.  If it is the network
-# manager will be set to the QuantumManager.
+# Make sure that quantum is enabled in ENABLED_SERVICES.  If it is the network
+# manager will be set to the QuantumManager.  If you want to run Quantum on
+# this host, make sure that q-svc is also in ENABLED_SERVICES.
 #
 # If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
 # "openvswitch" and make sure the q-agt service is enabled in
@@ -378,8 +398,8 @@
 # - We are parsing the packages files and detecting metadatas.
 #  - If there is a NOPRIME as comment mean we are not doing the install
 #    just yet.
-#  - If we have the meta-keyword distro:DISTRO or
-#    distro:DISTRO1,DISTRO2 it will be installed only for those
+#  - If we have the meta-keyword dist:DISTRO or
+#    dist:DISTRO1,DISTRO2 it will be installed only for those
 #    distros (case insensitive).
 function get_packages() {
     local file_to_parse="general"
@@ -517,7 +537,7 @@
     # that is *deprecated*.  The code is being moved into python-novaclient & nova.
     git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
 fi
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
     # quantum
     git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
 fi
@@ -551,13 +571,31 @@
     cd $HORIZON_DIR/horizon; sudo python setup.py develop
     cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
 fi
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
     cd $QUANTUM_DIR; sudo python setup.py develop
 fi
 
-# Add a useful screenrc.  This isn't required to run openstack but is we do
-# it since we are going to run the services in screen for simple
-cp $FILES/screenrc ~/.screenrc
+# Syslog
+# ---------
+
+if [[ $SYSLOG != "False" ]]; then
+    apt_get install -y rsyslog-relp
+    if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then
+        # Configure the master host to receive
+        cat <<EOF >/tmp/90-stack-m.conf
+\$ModLoad imrelp
+\$InputRELPServerRun $SYSLOG_PORT
+EOF
+        sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d
+    else
+        # Set rsyslog to send to remote host
+        cat <<EOF >/tmp/90-stack-s.conf
+*.*		:omrelp:$SYSLOG_HOST:$SYSLOG_PORT
+EOF
+        sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
+    fi
+    sudo /usr/sbin/service rsyslog restart
+fi
 
 # Rabbit
 # ---------
@@ -699,6 +737,7 @@
 
     # Virtualization Configuration
     # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    apt_get install libvirt-bin
 
     # attempt to load modules: network block device - used to manage qcow images
     sudo modprobe nbd || true
@@ -707,7 +746,6 @@
     # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
     # come with hardware virtualization disabled in BIOS.
     if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
-        apt_get install libvirt-bin
         sudo modprobe kvm || true
         if [ ! -e /dev/kvm ]; then
             echo "WARNING: Switching to QEMU"
@@ -719,15 +757,17 @@
     # splitting a system into many smaller parts.  LXC uses cgroups and chroot
     # to simulate multiple systems.
     if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
-        apt_get install lxc
-        # lxc uses cgroups (a kernel interface via virtual filesystem) configured
-        # and mounted to ``/cgroup``
-        sudo mkdir -p /cgroup
-        if ! grep -q cgroup /etc/fstab; then
-            echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab
-        fi
-        if ! mount -n | grep -q cgroup; then
-            sudo mount /cgroup
+        if [[ "$DISTRO" > natty ]]; then
+            apt_get install cgroup-lite
+        else
+            cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
+            sudo mkdir -p /cgroup
+            if ! grep -q cgroup /etc/fstab; then
+                echo "$cgline" | sudo tee -a /etc/fstab
+            fi
+            if ! mount -n | grep -q cgroup; then
+                sudo mount /cgroup
+            fi
         fi
     fi
 
@@ -909,12 +949,29 @@
 
     apt_get install iscsitarget-dkms iscsitarget
 
-    if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then
+    if ! sudo vgs $VOLUME_GROUP; then
         VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
-        truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+        # Only create if the file doesn't already exists
+        [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
         DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
-        sudo vgcreate $VOLUME_GROUP $DEV
+        # Only create if the loopback device doesn't contain $VOLUME_GROUP
+        if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+    fi
+
+    if sudo vgs $VOLUME_GROUP; then
+        # Clean out existing volumes
+        for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+            # VOLUME_NAME_PREFIX prefixes the LVs we want
+            if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+                tid=`egrep "^tid.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '='`
+                if [[ -n "$tid" ]]; then
+                    lun=`egrep "lun.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '=' | tr -d '\t'`
+                    sudo ietadm --op delete --$tid --$lun
+                fi
+                sudo lvremove -f $VOLUME_GROUP/$lv
+            fi
+        done
     fi
 
     # Configure iscsitarget
@@ -933,9 +990,11 @@
 add_nova_flag "--scheduler_driver=$SCHEDULER"
 add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf"
 add_nova_flag "--fixed_range=$FIXED_RANGE"
-if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
     add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
-    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+    add_nova_flag "--quantum_connection_host=$Q_HOST"
+    add_nova_flag "--quantum_connection_port=$Q_PORT"
+    if [[ "$ENABLED_SERVICES" =~ "q-svc" && "$Q_PLUGIN" = "openvswitch" ]]; then
         add_nova_flag "--libvirt_vif_type=ethernet"
         add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
         add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
@@ -946,6 +1005,7 @@
 fi
 if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
     add_nova_flag "--volume_group=$VOLUME_GROUP"
+    add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x"
 fi
 add_nova_flag "--my_ip=$HOST_IP"
 add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
@@ -1043,6 +1103,16 @@
     sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA
     # initialize keystone with default users/endpoints
     ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA
+
+    if [ "$SYSLOG" != "False" ]; then
+        sed -i -e '/^handlers=devel$/s/=devel/=production/' \
+            $KEYSTONE_DIR/etc/logging.cnf
+        sed -i -e "
+            /^log_file/s/log_file/\#log_file/; \
+            /^log_config/d;/^\[DEFAULT\]/a\
+            log_config=$KEYSTONE_DIR/etc/logging.cnf" \
+            $KEYSTONE_DIR/etc/keystone.conf
+    fi
 fi
 
 
@@ -1074,6 +1144,8 @@
 # create a new named screen to run processes in
 screen -d -m -S stack -t stack
 sleep 1
+# set a reasonable statusbar
+screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
 
 # launch the glance registry service
 if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
@@ -1110,25 +1182,23 @@
     fi
 fi
 
-# Quantum
+# Quantum service
 if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-    # Install deps
-    # FIXME add to files/apts/quantum, but don't install if not needed!
-    apt_get install openvswitch-switch openvswitch-datapath-dkms
-
-    # Create database for the plugin/agent
     if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+        # Install deps
+        # FIXME add to files/apts/quantum, but don't install if not needed!
+        apt_get install openvswitch-switch openvswitch-datapath-dkms
+        # Create database for the plugin/agent
         if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
         else
             echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
             exit 1
         fi
+        QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini
+        # Make sure we're using the openvswitch plugin
+        sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
     fi
-
-    QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini
-    # Make sure we're using the openvswitch plugin
-    sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
     screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf"
 fi
 
@@ -1213,20 +1283,55 @@
     for image_url in ${IMAGE_URLS//,/ }; do
         # Downloads the image (uec ami+aki style), then extracts it.
         IMAGE_FNAME=`basename "$image_url"`
-        IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz`
         if [ ! -f $FILES/$IMAGE_FNAME ]; then
             wget -c $image_url -O $FILES/$IMAGE_FNAME
         fi
 
-        # Extract ami and aki files
-        tar -zxf $FILES/$IMAGE_FNAME -C $FILES/images
+        KERNEL=""
+        RAMDISK=""
+        case "$IMAGE_FNAME" in
+            *.tar.gz|*.tgz)
+                # Extract ami and aki files
+                [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
+                    IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
+                    IMAGE_NAME="${IMAGE_FNAME%.tgz}"
+                xdir="$FILES/images/$IMAGE_NAME"
+                rm -Rf "$xdir";
+                mkdir "$xdir"
+                tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
+                KERNEL=$(for f in "$xdir/"*-vmlinuz*; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                RAMDISK=$(for f in "$xdir/"*-initrd*; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                IMAGE=$(for f in "$xdir/"*.img; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                [ -n "$IMAGE_NAME" ]
+                IMAGE_NAME=$(basename "$IMAGE" ".img")
+                ;;
+            *.img)
+                IMAGE="$FILES/$IMAGE_FNAME";
+                IMAGE_NAME=$(basename "$IMAGE" ".img")
+                ;;
+            *.img.gz)
+                IMAGE="$FILES/${IMAGE_FNAME}"
+                IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
+                ;;
+            *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+        esac
 
         # Use glance client to add the kernel the root filesystem.
         # We parse the results of the first upload to get the glance ID of the
         # kernel for use when uploading the root filesystem.
-        RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/$IMAGE_NAME-vmlinuz*`
-        KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
-        glance add -A $SERVICE_TOKEN name="$IMAGE_NAME" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/$IMAGE_NAME.img
+        KERNEL_ID=""; RAMDISK_ID="";
+        if [ -n "$KERNEL" ]; then
+            RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
+            KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+        fi
+        if [ -n "$RAMDISK" ]; then
+            RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
+            RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+        fi
+        glance add -A $SERVICE_TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
     done
 fi
 
diff --git a/stackrc b/stackrc
index 524cc99..0e700d5 100644
--- a/stackrc
+++ b/stackrc
@@ -44,7 +44,21 @@
 CITEST_BRANCH=master
 
 # Specify a comma-separated list of uec images to download and install into glance.
-IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz
+# supported urls here are:
+#  * "uec-style" images:
+#     If the file ends in .tar.gz, uncompress the tarball and and select the first
+#     .img file inside it as the image.  If present, use "*-vmlinuz*" as the kernel
+#     and "*-initrd*" as the ramdisk
+#     example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
+#  * disk image (*.img,*.img.gz)
+#    if file ends in .img, then it will be uploaded and registered as a to
+#    glance as a disk image.  If it ends in .gz, it is uncompressed first.
+#    example: 
+#      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
+#      http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
+#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
+#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
+IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz" # uec style cirros image
 
 # allow local overrides of env variables
 if [ -f ./localrc ]; then