Merge "Move the ubuntu template generator to use Squeeze."
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 2fe20f5..84bf9cd 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -40,7 +40,7 @@
admin_password = %SERVICE_PASSWORD%
[filter:swift3]
-use = egg:swift#swift3
+use = egg:swift3#middleware
[filter:tempauth]
use = egg:swift#tempauth
diff --git a/stack.sh b/stack.sh
index 340d4a8..776ff86 100755
--- a/stack.sh
+++ b/stack.sh
@@ -214,6 +214,7 @@
OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
NOVNC_DIR=$DEST/noVNC
SWIFT_DIR=$DEST/swift
+SWIFT3_DIR=$DEST/swift3
QUANTUM_DIR=$DEST/quantum
QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
MELANGE_DIR=$DEST/melange
@@ -673,6 +674,7 @@
if is_service_enabled swift; then
# storage service
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
+ git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
fi
if is_service_enabled g-api n-api; then
# image catalog service
@@ -689,7 +691,7 @@
if is_service_enabled quantum; then
git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH
fi
-if is_service_enabled q-svc; then
+if is_service_enabled quantum; then
# quantum
git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
fi
@@ -716,6 +718,7 @@
fi
if is_service_enabled swift; then
cd $SWIFT_DIR; sudo python setup.py develop
+ cd $SWIFT3_DIR; sudo python setup.py develop
fi
if is_service_enabled g-api n-api; then
cd $GLANCE_DIR; sudo python setup.py develop
@@ -727,7 +730,7 @@
if is_service_enabled quantum; then
cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop
fi
-if is_service_enabled q-svc; then
+if is_service_enabled quantum; then
cd $QUANTUM_DIR; sudo python setup.py develop
fi
if is_service_enabled m-svc; then
@@ -1000,6 +1003,7 @@
cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
iniset $GLANCE_API_CONF DEFAULT debug True
inicomment $GLANCE_API_CONF DEFAULT log_file
+ iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
iniset $GLANCE_API_CONF paste_deploy flavor keystone
@@ -1027,128 +1031,111 @@
cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
fi
-# Quantum
+# Quantum (for controller or agent nodes)
# -------
if is_service_enabled quantum; then
# Put config files in /etc/quantum for everyone to find
- QUANTUM_CONF_DIR=/etc/quantum
- if [[ ! -d $QUANTUM_CONF_DIR ]]; then
- sudo mkdir -p $QUANTUM_CONF_DIR
+ if [[ ! -d /etc/quantum ]]; then
+ sudo mkdir -p /etc/quantum
fi
- sudo chown `whoami` $QUANTUM_CONF_DIR
-
- # Set default values when using Linux Bridge plugin
- if [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- # set the config file
- QUANTUM_LB_CONF_DIR=$QUANTUM_CONF_DIR/plugins/linuxbridge
- mkdir -p $QUANTUM_LB_CONF_DIR
- QUANTUM_LB_CONFIG_FILE=$QUANTUM_LB_CONF_DIR/linuxbridge_conf.ini
- # must remove this file from existing location, otherwise Quantum will prefer it
- if [[ -e $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini ]]; then
- sudo mv $QUANTUM_DIR/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini $QUANTUM_LB_CONFIG_FILE
- fi
- #set the default network interface
- QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
- fi
-fi
-# Quantum service
-if is_service_enabled q-svc; then
- QUANTUM_PLUGIN_INI_FILE=$QUANTUM_CONF_DIR/plugins.ini
- # must remove this file from existing location, otherwise Quantum will prefer it
- if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then
- sudo mv $QUANTUM_DIR/etc/plugins.ini $QUANTUM_PLUGIN_INI_FILE
- fi
+ sudo chown `whoami` /etc/quantum
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- # Install deps
- # FIXME add to files/apts/quantum, but don't install if not needed!
- if [[ "$os_PACKAGE" = "deb" ]]; then
- kernel_version=`cat /proc/version | cut -d " " -f3`
- install_package openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
- else
- ### FIXME(dtroyer): Find RPMs for OpenVSwitch
- echo "OpenVSwitch packages need to be located"
- fi
-
- QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch
- QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini
-
- # Create database for the plugin/agent
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum CHARACTER SET utf8;'
- else
- echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
- exit 1
- fi
- # Make sure we're using the openvswitch plugin
- sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
+ Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini
+ Q_DB_NAME="ovs_quantum"
+ Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin"
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
# Install deps
# FIXME add to files/apts/quantum, but don't install if not needed!
install_package python-configobj
- # Create database for the plugin/agent
- if is_service_enabled mysql; then
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS quantum_linux_bridge;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS quantum_linux_bridge;'
- if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE
- then
- sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE
- else
- sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE
- fi
+ Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
+ Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
+ Q_DB_NAME="quantum_linux_bridge"
+ Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin"
+ else
+ echo "Unknown Quantum plugin '$Q_PLUGIN'.. exiting"
+ exit 1
+ fi
+
+ # if needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum
+ mkdir -p /$Q_PLUGIN_CONF_PATH
+ Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+ if [[ -e $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE ]]; then
+ sudo mv $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+ fi
+ sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE
+
+ OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True}
+ if [[ "$Q_PLUGIN" = "openvswitch" && $OVS_ENABLE_TUNNELING = "True" ]]; then
+ OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
+ if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
+ echo "You are running OVS version $OVS_VERSION."
+ echo "OVS 1.4+ is required for tunneling between multiple hosts."
+ exit 1
+ fi
+ sudo sed -i -e "s/.*enable-tunneling = .*$/enable-tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
+ fi
+fi
+
+# Quantum service (for controller node)
+if is_service_enabled q-svc; then
+ Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini
+ Q_CONF_FILE=/etc/quantum/quantum.conf
+ # must remove this file from existing location, otherwise Quantum will prefer it
+ if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then
+ sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE
+ fi
+
+ if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then
+ sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
+ fi
+
+ if is_service_enabled mysql; then
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;"
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;"
else
echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
exit 1
- fi
- # Make sure we're using the linuxbridge plugin
- sudo sed -i -e "s/^provider =.*$/provider = quantum.plugins.linuxbridge.LinuxBridgePlugin.LinuxBridgePlugin/g" $QUANTUM_PLUGIN_INI_FILE
fi
- if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then
- sudo mv $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF_DIR/quantum.conf
- fi
- screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$QUANTUM_CLIENT_DIR:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_CONF_DIR/quantum.conf"
+ sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE
+
+ screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $Q_CONF_FILE"
fi
# Quantum agent (for compute nodes)
if is_service_enabled q-agt; then
if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ # Install deps
+ # FIXME add to files/apts/quantum, but don't install if not needed!
+ if [[ "$os_PACKAGE" = "deb" ]]; then
+ kernel_version=`cat /proc/version | cut -d " " -f3`
+ install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
+ else
+ ### FIXME(dtroyer): Find RPMs for OpenVSwitch
+ echo "OpenVSwitch packages need to be located"
+ fi
# Set up integration bridge
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+ for PORT in `sudo ovs-vsctl --no-wait list-ports $OVS_BRIDGE`; do
+ if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi
+ sudo ovs-vsctl --no-wait del-port $OVS_BRIDGE $PORT
+ done
sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-
- # Start up the quantum <-> openvswitch agent
- QUANTUM_OVS_CONF_DIR=$QUANTUM_CONF_DIR/plugins/openvswitch
- mkdir -p $QUANTUM_OVS_CONF_DIR
- QUANTUM_OVS_CONFIG_FILE=$QUANTUM_OVS_CONF_DIR/ovs_quantum_plugin.ini
- if [[ -e $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini ]]; then
- sudo mv $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini $QUANTUM_OVS_CONFIG_FILE
- fi
- sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum?charset=utf8/g" $QUANTUM_OVS_CONFIG_FILE
- screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
+ sudo sed -i -e "s/.*local-ip = .*/local-ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
+ AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
# Start up the quantum <-> linuxbridge agent
install_package bridge-utils
- sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" $QUANTUM_LB_CONFIG_FILE
- if grep -Fxq "user = " $QUANTUM_LB_CONFIG_FILE
- then
- sudo sed -i -e "s/^connection = sqlite$/#connection = sqlite/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^#connection = mysql$/connection = mysql/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^user = .*$/user = $MYSQL_USER/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^pass = .*$/pass = $MYSQL_PASSWORD/g" $QUANTUM_LB_CONFIG_FILE
- sudo sed -i -e "s/^host = .*$/host = $MYSQL_HOST/g" $QUANTUM_LB_CONFIG_FILE
- else
- sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/quantum_linux_bridge?charset=utf8/g" $QUANTUM_LB_CONFIG_FILE
- fi
-
- screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py $QUANTUM_LB_CONFIG_FILE -v"
+ #set the default network interface
+ QUANTUM_LB_PRIVATE_INTERFACE=${QUANTUM_LB_PRIVATE_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+ sudo sed -i -e "s/^physical_interface = .*$/physical_interface = $QUANTUM_LB_PRIVATE_INTERFACE/g" /$Q_PLUGIN_CONF_FILE
+ AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/linuxbridge/agent/linuxbridge_quantum_agent.py
fi
+ # Start up the quantum agent
+ screen_it q-agt "sudo python $AGENT_BINARY /$Q_PLUGIN_CONF_FILE -v"
fi
# Melange service
@@ -1278,6 +1265,21 @@
fi
fi
+ QEMU_CONF=/etc/libvirt/qemu.conf
+ if is_service_enabled quantum && [[ $Q_PLUGIN = "openvswitch" ]] && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
+ # add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
+ sudo chmod 666 $QEMU_CONF
+ sudo cat <<EOF >> /etc/libvirt/qemu.conf
+cgroup_device_acl = [
+ "/dev/null", "/dev/full", "/dev/zero",
+ "/dev/random", "/dev/urandom",
+ "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+ "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
+EOF
+ sudo chmod 644 $QEMU_CONF
+ fi
+
if [[ "$os_PACKAGE" = "deb" ]]; then
LIBVIRT_DAEMON=libvirt-bin
else
@@ -1618,17 +1620,18 @@
add_nova_opt "melange_host=$M_HOST"
add_nova_opt "melange_port=$M_PORT"
fi
- if is_service_enabled q-svc && [[ "$Q_PLUGIN" = "openvswitch" ]]; then
- add_nova_opt "libvirt_vif_type=ethernet"
- add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
- add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
- add_nova_opt "quantum_use_dhcp=True"
- elif is_service_enabled q-svc && [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
- add_nova_opt "libvirt_vif_type=ethernet"
- add_nova_opt "libvirt_vif_driver=nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"
- add_nova_opt "linuxnet_interface_driver=nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver"
- add_nova_opt "quantum_use_dhcp=True"
+
+ if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+ NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
+ LINUXNET_VIF_DRIVER="nova.network.linux_net.LinuxOVSInterfaceDriver"
+ elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+ NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"
+ LINUXNET_VIF_DRIVER="nova.network.linux_net.QuantumLinuxBridgeInterfaceDriver"
fi
+ add_nova_opt "libvirt_vif_type=ethernet"
+ add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
+ add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
+ add_nova_opt "quantum_use_dhcp=True"
else
add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
fi
@@ -1702,7 +1705,7 @@
# For Example: EXTRA_OPTS=(foo=true bar=2)
for I in "${EXTRA_OPTS[@]}"; do
# Attempt to convert flags to options
- add_nova_opt ${I//-}
+ add_nova_opt ${I//--}
done
@@ -1713,8 +1716,9 @@
read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
add_nova_opt "connection_type=xenapi"
XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
+ XENAPI_USER=${XENAPI_USER:-"root"}
add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL"
- add_nova_opt "xenapi_connection_username=root"
+ add_nova_opt "xenapi_connection_username=$XENAPI_USER"
add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD"
add_nova_opt "flat_injected=False"
# Need to avoid crash due to new firewall support
diff --git a/stackrc b/stackrc
index 41a399a..98e6bd4 100644
--- a/stackrc
+++ b/stackrc
@@ -16,6 +16,9 @@
# storage service
SWIFT_REPO=https://github.com/openstack/swift.git
SWIFT_BRANCH=master
+SWIFT3_REPO=https://github.com/fujita/swift3.git
+SWIFT3_BRANCH=master
+
# image catalog service
GLANCE_REPO=https://github.com/openstack/glance.git
diff --git a/tools/xen/README.md b/tools/xen/README.md
index d102b01..f20ad04 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -81,3 +81,9 @@
* Play with horizon
* Play with the CLI
* Log bugs to devstack and core projects, and submit fixes!
+
+Step 6: Run from snapshot
+-------------------------
+If you want to quicky re-run devstack from a clean state,
+using the same settings you used in your previous run,
+you can revert the DomU to the snapshot called "before_first_boot"
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index edc0db3..fdc6a60 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -1,46 +1,40 @@
#!/bin/bash
-set -e
+# This script is run by install_os_domU.sh
+#
+# It modifies the ubuntu image created by install_os_domU.sh
+# and previously moodified by prepare_guest_template.sh
+#
+# This script is responsible for:
+# - pushing in the DevStack code
+# - creating run.sh, to run the code on boot
+# It does this by mounting the disk image of the VM.
+#
+# The resultant image is then templated and started
+# by install_os_domU.sh
-declare -a on_exit_hooks
-
-on_exit()
-{
- for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0)
- do
- eval "${on_exit_hooks[$i]}"
- done
-}
-
-add_on_exit()
-{
- local n=${#on_exit_hooks[*]}
- on_exit_hooks[$n]="$*"
- if [[ $n -eq 0 ]]
- then
- trap on_exit EXIT
- fi
-}
-
-# Abort if localrc is not set
-if [ ! -e ../../localrc ]; then
- echo "You must have a localrc with ALL necessary passwords defined before proceeding."
- echo "See the xen README for required passwords."
- exit 1
-fi
+# Exit on errors
+set -o errexit
+# Echo commands
+set -o xtrace
# This directory
TOP_DIR=$(cd $(dirname "$0") && pwd)
+# Include onexit commands
+. $TOP_DIR/scripts/on_exit.sh
+
# Source params - override xenrc params in your localrc to suite your taste
source xenrc
-# Echo commands
-set -o xtrace
-
+#
+# Parameters
+#
GUEST_NAME="$1"
-# Directory where we stage the build
+#
+# Mount the VDI
+#
STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
@@ -76,7 +70,7 @@
cat <<EOF >$STAGING_DIR/etc/rc.local
# network restart required for getting the right gateway
/etc/init.d/networking restart
-GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/devstack/tools/xen/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
+chown -R stack /opt/stack
su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack
exit 0
EOF
@@ -85,8 +79,12 @@
echo $GUEST_NAME > $STAGING_DIR/etc/hostname
# Hostname must resolve for rabbit
+HOSTS_FILE_IP=$PUB_IP
+if [ $MGT_IP != "dhcp" ]; then
+ HOSTS_FILE_IP=$MGT_IP
+fi
cat <<EOF >$STAGING_DIR/etc/hosts
-$MGT_IP $GUEST_NAME
+$HOSTS_FILE_IP $GUEST_NAME
127.0.0.1 localhost localhost.localdomain
EOF
@@ -120,6 +118,13 @@
sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES
fi
+if [ "$ENABLE_GI" == "true" ]; then
+ cat <<EOF >>$INTERFACES
+auto eth0
+iface eth0 inet dhcp
+EOF
+fi
+
# Gracefully cp only if source file/dir exists
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
@@ -142,8 +147,6 @@
#!/bin/bash
cd /opt/stack/devstack
killall screen
-UPLOAD_LEGACY_TTY=yes HOST_IP=$PUB_IP VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh
+VIRT_DRIVER=xenserver FORCE=yes MULTI_HOST=$MULTI_HOST HOST_IP_IFACE=$HOST_IP_IFACE $STACKSH_PARAMS ./stack.sh
EOF
chmod 755 $STAGING_DIR/opt/stack/run.sh
-
-echo "Done"
diff --git a/tools/xen/install_domU_multi.sh b/tools/xen/install_domU_multi.sh
deleted file mode 100755
index 91129c5..0000000
--- a/tools/xen/install_domU_multi.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-
-# Echo commands
-set -o xtrace
-
-# Head node host, which runs glance, api, keystone
-HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57}
-HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57}
-
-COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58}
-COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58}
-
-# Networking params
-FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30}
-
-# Variables common amongst all hosts in the cluster
-COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE"
-
-# Helper to launch containers
-function install_domU {
- GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_domU.sh
-}
-
-# Launch the head node - headnode uses a non-ip domain name,
-# because rabbit won't launch with an ip addr hostname :(
-install_domU HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
-
-if [ $HEAD_PUB_IP == "dhcp" ]
-then
- guestnet=$(xe vm-list --minimal name-label=HEADNODE params=networks)
- HEAD_PUB_IP=$(echo $guestnet | grep -w -o --only-matching "3/ip: [0-9,.]*;" | cut -d ':' -f2 | cut -d ';' -f 1)
-fi
-# Wait till the head node is up
-while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
- echo "Waiting for head node ($HEAD_PUB_IP) to start..."
- sleep 5
-done
-
-# Build the HA compute host
-install_domU COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 088748f..352f63a 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -1,7 +1,16 @@
#!/bin/bash
+# This script is a level script
+# It must be run on a XenServer or XCP machine
+#
+# It creates a DomU VM that runs OpenStack services
+#
+# For more details see: README.md
+
# Exit on errors
set -o errexit
+# Echo commands
+set -o xtrace
# Abort if localrc is not set
if [ ! -e ../../localrc ]; then
@@ -16,12 +25,17 @@
# Source lower level functions
. $TOP_DIR/../../functions
+# Include onexit commands
+. $TOP_DIR/scripts/on_exit.sh
+
+
+#
+# Get Settings
+#
+
# Source params - override xenrc params in your localrc to suit your taste
source xenrc
-# Echo commands
-set -o xtrace
-
xe_min()
{
local cmd="$1"
@@ -29,22 +43,38 @@
xe "$cmd" --minimal "$@"
}
+
+#
+# Prepare Dom0
+# including installing XenAPI plugins
+#
+
cd $TOP_DIR
if [ -f ./master ]
then
rm -rf ./master
rm -rf ./nova
fi
+
+# get nova
wget https://github.com/openstack/nova/zipball/master --no-check-certificate
unzip -o master -d ./nova
-cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d /etc/
-chmod a+x /etc/xapi.d/plugins/*
+
+# install xapi plugins
+XAPI_PLUGIN_DIR=/etc/xapi.d/plugins/
+if [ ! -d $XAPI_PLUGIN_DIR ]; then
+ # the following is needed when using xcp-xapi
+ XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/
+fi
+cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR
+chmod a+x ${XAPI_PLUGIN_DIR}*
mkdir -p /boot/guest
-GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
-SNAME="ubuntusnapshot"
-TNAME="ubuntuready"
+
+#
+# Configure Networking
+#
# Helper to create networks
# Uses echo trickery to return network uuid
@@ -84,7 +114,7 @@
fi
}
-# Create host, vm, mgmt, pub networks
+# Create host, vm, mgmt, pub networks on XenServer
VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr")
errorcheck
MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr")
@@ -123,28 +153,48 @@
create_vlan $VM_DEV $VM_VLAN $VM_NET
create_vlan $MGT_DEV $MGT_VLAN $MGT_NET
-# dom0 ip
-HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`}
-
-# Set up ip forwarding
-if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then
- # FIXME: This doesn't work on reboot!
- echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network
+# Get final bridge names
+if [ -z $VM_BR ]; then
+ VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge)
+fi
+if [ -z $MGT_BR ]; then
+ MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge)
+fi
+if [ -z $PUB_BR ]; then
+ PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge)
fi
+# dom0 ip, XenAPI is assumed to be listening
+HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`}
+
+# Set up ip forwarding, but skip on xcp-xapi
+if [ -a /etc/sysconfig/network]; then
+ if ! grep -q "FORWARD_IPV4=YES" /etc/sysconfig/network; then
+ # FIXME: This doesn't work on reboot!
+ echo "FORWARD_IPV4=YES" >> /etc/sysconfig/network
+ fi
+fi
# Also, enable ip forwarding in rc.local, since the above trick isn't working
if ! grep -q "echo 1 >/proc/sys/net/ipv4/ip_forward" /etc/rc.local; then
echo "echo 1 >/proc/sys/net/ipv4/ip_forward" >> /etc/rc.local
fi
-
# Enable ip forwarding at runtime as well
echo 1 > /proc/sys/net/ipv4/ip_forward
+
+#
# Shutdown previous runs
+#
+
DO_SHUTDOWN=${DO_SHUTDOWN:-1}
+CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
if [ "$DO_SHUTDOWN" = "1" ]; then
# Shutdown all domU's that created previously
- xe_min vm-list name-label="$GUEST_NAME" | xargs ./scripts/uninstall-os-vpx.sh
+ clean_templates_arg=""
+ if $CLEAN_TEMPLATES; then
+ clean_templates_arg="--remove-templates"
+ fi
+ ./scripts/uninstall-os-vpx.sh $clean_templates_arg
# Destroy any instances that were launched
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
@@ -160,34 +210,18 @@
done
fi
-# Start guest
-if [ -z $VM_BR ]; then
- VM_BR=$(xe_min network-list uuid=$VM_NET params=bridge)
-fi
-if [ -z $MGT_BR ]; then
- MGT_BR=$(xe_min network-list uuid=$MGT_NET params=bridge)
-fi
-if [ -z $PUB_BR ]; then
- PUB_BR=$(xe_min network-list uuid=$PUB_NET params=bridge)
-fi
-templateuuid=$(xe template-list name-label="$TNAME")
-if [ -n "$templateuuid" ]
-then
- vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
-else
- template=$(xe_min template-list name-label="Ubuntu 11.10 (64-bit)")
- if [ -z "$template" ]
- then
- cp $TOP_DIR/devstackubuntupreseed.cfg /opt/xensource/www/
- $TOP_DIR/scripts/xenoneirictemplate.sh "${HOST_IP}/devstackubuntupreseed.cfg"
- MIRROR=${MIRROR:-archive.ubuntu.com}
- sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \
- -i /opt/xensource/www/devstackubuntupreseed.cfg
- fi
- $TOP_DIR/scripts/install-os-vpx.sh -t "Ubuntu 11.10 (64-bit)" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
+#
+# Create Ubuntu VM template
+# and/or create VM from template
+#
- # Wait for install to finish
+GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
+TNAME="devstack_template_folsom_11.10"
+SNAME_PREPARED="template_prepared"
+SNAME_FIRST_BOOT="before_first_boot"
+
+function wait_for_VM_to_halt() {
while true
do
state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
@@ -196,72 +230,199 @@
break
else
echo "Waiting for "$GUEST_NAME" to finish installation..."
- sleep 30
+ sleep 20
fi
done
+}
+templateuuid=$(xe template-list name-label="$TNAME")
+if [ -z "$templateuuid" ]; then
+ #
+ # Install Ubuntu over network
+ #
+
+ # try to find ubuntu template
+ ubuntu_template_name="Ubuntu 11.10 for DevStack (64-bit)"
+ ubuntu_template=$(xe_min template-list name-label="$ubuntu_template_name")
+
+ # remove template, if we are in CLEAN_TEMPLATE mode
+ if [ -n "$ubuntu_template" ]; then
+ if $CLEAN_TEMPLATES; then
+ xe template-param-clear param-name=other-config uuid=$ubuntu_template
+ xe template-uninstall template-uuid=$ubuntu_template force=true
+ ubuntu_template=""
+ fi
+ fi
+
+ # always update the preseed file, incase we have a newer one
+ PRESEED_URL=${PRESEED_URL:-""}
+ if [ -z "$PRESEED_URL" ]; then
+ PRESEED_URL="${HOST_IP}/devstackubuntupreseed.cfg"
+ HTTP_SERVER_LOCATION="/opt/xensource/www"
+ if [ ! -e $HTTP_SERVER_LOCATION ]; then
+ HTTP_SERVER_LOCATION="/var/www/html"
+ mkdir -p $HTTP_SERVER_LOCATION
+ fi
+ cp -f $TOP_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
+ MIRROR=${MIRROR:-""}
+ if [ -n "$MIRROR" ]; then
+ sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \
+ -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
+ fi
+ fi
+
+ if [ -z "$ubuntu_template" ]; then
+ $TOP_DIR/scripts/xenoneirictemplate.sh $PRESEED_URL
+ fi
+
+ # create a new VM with the given template
+ # creating the correct VIFs and metadata
+ $TOP_DIR/scripts/install-os-vpx.sh -t "$ubuntu_template_name" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
+
+ # wait for install to finish
+ wait_for_VM_to_halt
+
+ # set VM to restart after a reboot
vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME")
xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid"
+ #
+ # Prepare VM for DevStack
+ #
+
+ # Install XenServer tools, and other such things
+ $TOP_DIR/prepare_guest_template.sh "$GUEST_NAME"
+
+ # start the VM to run the prepare steps
+ xe vm-start vm="$GUEST_NAME"
+
+ # Wait for prep script to finish and shutdown system
+ wait_for_VM_to_halt
+
# Make template from VM
- snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME")
- template_uuid=$(xe snapshot-clone uuid=$snuuid new-name-label="$TNAME")
+ snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED")
+ xe snapshot-clone uuid=$snuuid new-name-label="$TNAME"
+else
+ #
+ # Template already installed, create VM from template
+ #
+ vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
fi
+
+#
+# Inject DevStack inside VM disk
+#
$TOP_DIR/build_xva.sh "$GUEST_NAME"
+# create a snapshot before the first boot
+# to allow a quick re-run with the same settings
+xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
+
+
+#
+# Run DevStack VM
+#
xe vm-start vm="$GUEST_NAME"
-if [ $PUB_IP == "dhcp" ]; then
- PUB_IP=$(xe_min vm-list name-label=$GUEST_NAME params=networks | sed -ne 's,^.*3/ip: \([0-9.]*\).*$,\1,p')
+
+#
+# Find IP and optionally wait for stack.sh to complete
+#
+
+function find_ip_by_name() {
+ local guest_name="$1"
+ local interface="$2"
+ local period=10
+ max_tries=10
+ i=0
+ while true
+ do
+ if [ $i -ge $max_tries ]; then
+ echo "Timed out waiting for devstack ip address"
+ exit 11
+ fi
+
+ devstackip=$(xe vm-list --minimal \
+ name-label=$guest_name \
+ params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p")
+ if [ -z "$devstackip" ]
+ then
+ sleep $period
+ ((i++))
+ else
+ echo $devstackip
+ break
+ fi
+ done
+}
+
+function ssh_no_check() {
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@"
+}
+
+# Note the XenServer needs to be on the chosen
+# network, so XenServer can access Glance API
+if [ $HOST_IP_IFACE == "eth2" ]; then
+ DOMU_IP=$MGT_IP
+ if [ $MGT_IP == "dhcp" ]; then
+ DOMU_IP=$(find_ip_by_name $GUEST_NAME 2)
+ fi
+else
+ DOMU_IP=$PUB_IP
+ if [ $PUB_IP == "dhcp" ]; then
+ DOMU_IP=$(find_ip_by_name $GUEST_NAME 3)
+ fi
fi
# If we have copied our ssh credentials, use ssh to monitor while the installation runs
WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
+COPYENV=${COPYENV:-1}
if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = "1" ]; then
- # Done creating the container, let's tail the log
- echo
- echo "============================================================="
- echo " -- YAY! --"
- echo "============================================================="
- echo
echo "We're done launching the vm, about to start tailing the"
echo "stack.sh log. It will take a second or two to start."
echo
echo "Just CTRL-C at any time to stop tailing."
- set +o xtrace
-
- while ! ssh -q stack@$PUB_IP "[ -e run.sh.log ]"; do
- sleep 1
+ # wait for log to appear
+ while ! ssh_no_check -q stack@$DOMU_IP "[ -e run.sh.log ]"; do
+ sleep 10
done
- ssh stack@$PUB_IP 'tail -f run.sh.log' &
-
+ # output the run.sh.log
+ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' &
TAIL_PID=$!
function kill_tail() {
- kill $TAIL_PID
+ kill -9 $TAIL_PID
exit 1
}
-
# Let Ctrl-c kill tail and exit
trap kill_tail SIGINT
- echo "Waiting stack.sh to finish..."
- while ! ssh -q stack@$PUB_IP "grep -q 'stack.sh completed in' run.sh.log"; do
- sleep 1
+ # ensure we kill off the tail if we exit the script early
+ # for other reasons
+ add_on_exit "kill -9 $TAIL_PID || true"
+
+ # wait silently until stack.sh has finished
+ set +o xtrace
+ while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do
+ sleep 10
done
+ set -o xtrace
- kill $TAIL_PID
+ # kill the tail process now stack.sh has finished
+ kill -9 $TAIL_PID
- if ssh -q stack@$PUB_IP "grep -q 'stack.sh failed' run.sh.log"; then
+ # check for a failure
+ if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then
exit 1
fi
+ echo "################################################################################"
echo ""
- echo "Finished - Zip-a-dee Doo-dah!"
- echo "You can then visit the OpenStack Dashboard"
- echo "at http://$PUB_IP, and contact other services at the usual ports."
+ echo "All Finished!"
+ echo "You can visit the OpenStack Dashboard"
+ echo "at http://$DOMU_IP, and contact other services at the usual ports."
else
echo "################################################################################"
echo ""
@@ -269,10 +430,9 @@
echo "Now, you can monitor the progress of the stack.sh installation by "
echo "tailing /opt/stack/run.sh.log from within your domU."
echo ""
- echo "ssh into your domU now: 'ssh stack@$PUB_IP' using your password"
+ echo "ssh into your domU now: 'ssh stack@$DOMU_IP' using your password"
echo "and then do: 'tail -f /opt/stack/run.sh.log'"
echo ""
echo "When the script completes, you can then visit the OpenStack Dashboard"
- echo "at http://$PUB_IP, and contact other services at the usual ports."
-
+ echo "at http://$DOMU_IP, and contact other services at the usual ports."
fi
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 74efaff..89a0169 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -1,6 +1,18 @@
#!/bin/bash
+# This script is run on an Ubuntu VM.
+# This script is inserted into the VM by prepare_guest_template.sh
+# and is run when that VM boots.
+# It customizes a fresh Ubuntu install, so it is ready
+# to run stack.sh
+#
+# This includes installing the XenServer tools,
+# creating the user called "stack",
+# and shuts down the VM to signal the script has completed
+
set -x
+# Echo commands
+set -o xtrace
# Configurable nuggets
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
@@ -13,7 +25,7 @@
chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo
chroot $STAGING_DIR pip install xenapi
-# Install guest utilities
+# Install XenServer guest utilities
XEGUEST=xe-guest-utilities_5.6.100-651_amd64.deb
wget http://images.ansolabs.com/xen/$XEGUEST -O $XEGUEST
cp $XEGUEST $STAGING_DIR/root
@@ -68,3 +80,12 @@
rm -f stage.tgz
tar cfz stage.tgz stage
fi
+
+# remove self from local.rc
+# so this script is not run again
+rm -rf /etc/rc.local
+mv /etc/rc.local.preparebackup /etc/rc.local
+cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup
+
+# shutdown to notify we are done
+shutdown -h now
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
new file mode 100755
index 0000000..7c6dec4
--- /dev/null
+++ b/tools/xen/prepare_guest_template.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# This script is run by install_os_domU.sh
+#
+# Parameters:
+# - $GUEST_NAME - hostname for the DomU VM
+#
+# It modifies the ubuntu image created by install_os_domU.sh
+#
+# This script is responsible for cusomtizing the fresh ubuntu
+# image so on boot it runs the prepare_guest.sh script
+# that modifies the VM so it is ready to run stack.sh.
+# It does this by mounting the disk image of the VM.
+#
+# The resultant image is started by install_os_domU.sh,
+# and once the VM has shutdown, build_xva.sh is run
+
+# Exit on errors
+set -o errexit
+# Echo commands
+set -o xtrace
+
+# This directory
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Include onexit commands
+. $TOP_DIR/scripts/on_exit.sh
+
+# Source params - override xenrc params in your localrc to suite your taste
+source xenrc
+
+#
+# Parameters
+#
+GUEST_NAME="$1"
+
+# Mount the VDI
+STAGING_DIR=$($TOP_DIR/scripts/manage-vdi open $GUEST_NAME 0 1 | grep -o "/tmp/tmp.[[:alnum:]]*")
+add_on_exit "$TOP_DIR/scripts/manage-vdi close $GUEST_NAME 0 1"
+
+# Make sure we have a stage
+if [ ! -d $STAGING_DIR/etc ]; then
+ echo "Stage is not properly set up!"
+ exit 1
+fi
+
+# Copy prepare_guest.sh to VM
+mkdir -p $STAGING_DIR/opt/stack/
+cp $TOP_DIR/prepare_guest.sh $STAGING_DIR/opt/stack/prepare_guest.sh
+
+# backup rc.local
+cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.preparebackup
+
+# run prepare_guest.sh on boot
+cat <<EOF >$STAGING_DIR/etc/rc.local
+GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ DO_TGZ=0 bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
+EOF
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index fe5e810..7f2f3e6 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -19,7 +19,12 @@
set -eux
-. /etc/xensource-inventory
+if [ -a /etc/xensource-inventory]
+then
+ . /etc/xensource-inventory
+else
+ . /etc/xcp/inventory
+fi
NAME="XenServer OpenStack VPX"
DATA_VDI_SIZE="500MiB"
diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi
index a0a27e8..05c4b07 100755
--- a/tools/xen/scripts/manage-vdi
+++ b/tools/xen/scripts/manage-vdi
@@ -7,46 +7,80 @@
device="${3-0}"
part="${4-}"
-xe_min()
-{
+function xe_min() {
local cmd="$1"
shift
xe "$cmd" --minimal "$@"
}
+function run_udev_settle() {
+ which_udev=$(which udevsettle) || true
+ if [ -n "$which_udev" ]; then
+ udevsettle
+ else
+ udevadm settle
+ fi
+}
+
vm_uuid=$(xe_min vm-list name-label="$vm")
vdi_uuid=$(xe_min vbd-list params=vdi-uuid vm-uuid="$vm_uuid" \
userdevice="$device")
dom0_uuid=$(xe_min vm-list is-control-domain=true)
-open_vdi()
-{
+function get_mount_device() {
+ vbd_uuid=$1
+
+ dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
+ if [[ "$dev" =~ "sm/" ]]; then
+ DEBIAN_FRONTEND=noninteractive \
+ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes \
+ install kpartx &> /dev/null || true
+ mapping=$(kpartx -av "/dev/$dev" | sed -ne 's,^add map \([a-z0-9\-]*\).*$,\1,p' | sed -ne "s,^\(.*${part}\)\$,\1,p")
+ if [ -z "$mapping" ]; then
+ echo "Failed to find mapping"
+ exit -1
+ fi
+ echo "/dev/mapper/${mapping}"
+ else
+ echo "/dev/$dev$part"
+ fi
+}
+
+function clean_dev_mappings() {
+ dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
+ if [[ "$dev" =~ "sm/" ]]; then
+ kpartx -dv "/dev/$dev"
+ fi
+}
+
+function open_vdi() {
vbd_uuid=$(xe vbd-create vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid" \
device=autodetect)
mp=$(mktemp -d)
xe vbd-plug uuid="$vbd_uuid"
- udevsettle
- dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
- mount "/dev/$dev$part" "$mp"
+ run_udev_settle
+
+ mount_device=$(get_mount_device "$vbd_uuid")
+ mount "$mount_device" "$mp"
echo "Your vdi is mounted at $mp"
}
-close_vdi()
-{
+function close_vdi() {
vbd_uuid=$(xe_min vbd-list vm-uuid="$dom0_uuid" vdi-uuid="$vdi_uuid")
- dev=$(xe_min vbd-list params=device uuid="$vbd_uuid")
- umount "/dev/$dev$part"
+ mount_device=$(get_mount_device "$vbd_uuid")
+ run_udev_settle
+ umount "$mount_device"
+
+ clean_dev_mappings
xe vbd-unplug uuid=$vbd_uuid
xe vbd-destroy uuid=$vbd_uuid
}
-if [ "$action" == "open" ]
-then
+if [ "$action" == "open" ]; then
open_vdi
-elif [ "$action" == "close" ]
-then
+elif [ "$action" == "close" ]; then
close_vdi
fi
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
new file mode 100755
index 0000000..a4db39c
--- /dev/null
+++ b/tools/xen/scripts/on_exit.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+set -o xtrace
+
+declare -a on_exit_hooks
+
+on_exit()
+{
+ for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0)
+ do
+ eval "${on_exit_hooks[$i]}"
+ done
+}
+
+add_on_exit()
+{
+ local n=${#on_exit_hooks[*]}
+ on_exit_hooks[$n]="$*"
+ if [[ $n -eq 0 ]]
+ then
+ trap on_exit EXIT
+ fi
+}
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
index a82f3a0..0feaec7 100755
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ b/tools/xen/scripts/uninstall-os-vpx.sh
@@ -17,19 +17,19 @@
# under the License.
#
-remove_data=
-if [ "$1" = "--remove-data" ]
-then
- remove_data=1
-fi
+set -ex
-set -eu
+# By default, don't remove the templates
+REMOVE_TEMPLATES=${REMOVE_TEMPLATES:-"false"}
+if [ "$1" = "--remove-templates" ]; then
+ REMOVE_TEMPLATES=true
+fi
xe_min()
{
local cmd="$1"
shift
- /opt/xensource/bin/xe "$cmd" --minimal "$@"
+ xe "$cmd" --minimal "$@"
}
destroy_vdi()
@@ -39,11 +39,8 @@
local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
- if [ "$type" = 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]
- then
- echo -n "Destroying data disk... "
+ if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
xe vdi-destroy uuid=$vdi_uuid
- echo "done."
fi
}
@@ -52,50 +49,36 @@
local vm_uuid="$1"
local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
- if [ "$power_state" != "halted" ]
- then
- echo -n "Shutting down VM... "
+ if [ "$power_state" != "halted" ]; then
xe vm-shutdown vm=$vm_uuid force=true
- echo "done."
fi
- if [ "$remove_data" = "1" ]
- then
- for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g')
- do
- destroy_vdi "$v"
- done
- fi
+ for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
+ destroy_vdi "$v"
+ done
- echo -n "Deleting VM... "
xe vm-uninstall vm=$vm_uuid force=true >/dev/null
- echo "done."
}
uninstall_template()
{
local vm_uuid="$1"
- if [ "$remove_data" = "1" ]
- then
- for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g')
- do
- destroy_vdi "$v"
- done
- fi
+ for v in $(xe_min vbd-list vm-uuid=$vm_uuid | sed -e 's/,/ /g'); do
+ destroy_vdi "$v"
+ done
- echo -n "Deleting template... "
xe template-uninstall template-uuid=$vm_uuid force=true >/dev/null
- echo "done."
}
-
-for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g')
-do
+# remove the VMs and their disks
+for u in $(xe_min vm-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
uninstall "$u"
done
-for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g')
-do
- uninstall_template "$u"
-done
+# remove the templates
+if [ "$REMOVE_TEMPLATES" == "true" ]; then
+ for u in $(xe_min template-list other-config:os-vpx=true | sed -e 's/,/ /g'); do
+ uninstall_template "$u"
+ done
+fi
diff --git a/tools/xen/scripts/xenoneirictemplate.sh b/tools/xen/scripts/xenoneirictemplate.sh
index 9e1e9de..7f10c33 100755
--- a/tools/xen/scripts/xenoneirictemplate.sh
+++ b/tools/xen/scripts/xenoneirictemplate.sh
@@ -3,7 +3,7 @@
## on Xenserver 6.0.2 Net install only
## Original Author: David Markey <david.markey@citrix.com>
## Author: Renuka Apte <renuka.apte@citrix.com>
-## This is not an officially supported guest OS on XenServer 6.02
+## This is not an officially supported guest OS on XenServer 6.0.2
BASE_DIR=$(cd $(dirname "$0") && pwd)
source $BASE_DIR/../../../localrc
@@ -15,11 +15,15 @@
exit 1
fi
-distro="Ubuntu 11.10"
+distro="Ubuntu 11.10 for DevStack"
arches=("32-bit" "64-bit")
preseedurl=${1:-"http://images.ansolabs.com/devstackubuntupreseed.cfg"}
+NETINSTALL_LOCALE=${NETINSTALL_LOCALE:-en_US}
+NETINSTALL_KEYBOARD=${NETINSTALL_KEYBOARD:-us}
+NETINSTALL_IFACE=${NETINSTALL_IFACE:-eth3}
+
for arch in ${arches[@]} ; do
echo "Attempting $distro ($arch)"
if [[ -n $(xe template-list name-label="$distro ($arch)" params=uuid --minimal) ]] ; then
@@ -30,7 +34,11 @@
echo "NETINSTALLIP not set in localrc"
exit 1
fi
- pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=en_US console-setup/ask_detect=false keyboard-configuration/layoutcode=us netcfg/choose_interface=eth3 netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}"
+ # Some of these settings can be found in example preseed files
+ # however these need to be answered before the netinstall
+ # is ready to fetch the preseed file, and as such must be here
+ # to get a fully automated install
+ pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 locale=${NETINSTALL_LOCALE} console-setup/ask_detect=false keyboard-configuration/layoutcode=${NETINSTALL_KEYBOARD} netcfg/choose_interface=${NETINSTALL_IFACE} netcfg/get_hostname=os netcfg/get_domain=os auto url=${preseedurl}"
if [ "$NETINSTALLIP" != "dhcp" ]
then
netcfgargs="netcfg/disable_autoconfig=true netcfg/get_nameservers=${NAMESERVERS} netcfg/get_ipaddress=${NETINSTALLIP} netcfg/get_netmask=${NETMASK} netcfg/get_gateway=${GATEWAY} netcfg/confirm_static=true"
diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in
index e315a8c..74b41cc 100644
--- a/tools/xen/templates/interfaces.in
+++ b/tools/xen/templates/interfaces.in
@@ -21,6 +21,3 @@
iface eth2 inet static
address @ETH2_IP@
netmask @ETH2_NETMASK@
-
-auto eth0
-iface eth0 inet dhcp
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index f434b11..102a492 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -5,12 +5,15 @@
# Size of image
VDI_MB=${VDI_MB:-5000}
+OSDOMU_MEM_MB=1024
# VM Password
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
-# Host Interface, i.e. the public facing interface on the nova vm
-HOST_IP_IFACE=${HOST_IP_IFACE:-eth0}
+# Host Interface, i.e. the interface on the nova vm you want to expose the services on
+# Usually either eth2 (management network) or eth3 (public network)
+# not eth0 (private network with XenServer host) or eth1 (VM traffic network)
+HOST_IP_IFACE=${HOST_IP_IFACE:-eth3}
# Our nova host's network info
VM_IP=${VM_IP:-10.255.255.255} # A host-only ip that let's the interface come up, otherwise unused
@@ -35,7 +38,8 @@
MGT_VLAN=${MGT_VLAN:-101}
MGT_DEV=${MGT_DEV:-eth0}
-OSDOMU_MEM_MB=1024
+# Guest installer network
+ENABLE_GI=true
# Source params
cd ../.. && source ./stackrc && cd $TOP_DIR