Merge "xenapi - use management network to reach OS VM"
diff --git a/README.md b/README.md
index 6570a14..1987db8 100644
--- a/README.md
+++ b/README.md
@@ -153,3 +153,23 @@
MYSQL_HOST=$SERVICE_HOST
RABBIT_HOST=$SERVICE_HOST
Q_HOST=$SERVICE_HOST
+
+# Cells
+
+Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells.
+
+To setup a cells environment add the following to your `localrc`:
+
+ enable_service n-cell
+ enable_service n-api-meta
+ MULTI_HOST=True
+
+ # The following have not been tested with cells, they may or may not work.
+ disable_service n-obj
+ disable_service cinder
+ disable_service c-sch
+ disable_service c-api
+ disable_service c-vol
+ disable_service n-xvnc
+
+Be aware that there are some features currently missing in cells, one notable one being security groups.
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index fbb1b77..34f4f62 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -43,13 +43,12 @@
# Import configuration
source $TOP_DIR/openrc
-# If quantum is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55
-
-# Import quantum fucntions
+# Import quantum functions
source $TOP_DIR/lib/quantum
+# If quantum is not enabled we exit with exitcode 55, which means exercise is skipped.
+quantum_plugin_check_adv_test_requirements || exit 55
+
# Import exercise configuration
source $TOP_DIR/exerciserc
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 151e7e2..d50482e 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,7 +16,8 @@
python-migrate
python-mox
python-netaddr
-python-nose
+# RHEL6's python-nose is incompatible with Tempest
+python-nose #dist:f16,f17,f18
python-paste #dist:f16,f17,f18
python-paste-deploy #dist:f16,f17,f18
python-pep8
diff --git a/files/rpms/swift b/files/rpms/swift
index 1b36e34..c626d8e 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -7,7 +7,8 @@
python-eventlet
python-greenlet
python-netifaces
-python-nose
+# RHEL6's python-nose is incompatible with Tempest
+python-nose # dist:f16,f17,f18
python-paste-deploy # dist:f16,f17,f18
python-setuptools # dist:f16,f17,f18
python-simplejson
diff --git a/functions b/functions
index 0b2710c..669fa69 100644
--- a/functions
+++ b/functions
@@ -1415,6 +1415,35 @@
fi
}
+# Path permissions sanity check
+# check_path_perm_sanity path
+function check_path_perm_sanity() {
+ # Ensure no element of the path has 0700 permissions, which is very
+ # likely to cause issues for daemons. Inspired by default 0700
+ # homedir permissions on RHEL and common practice of making DEST in
+ # the stack user's homedir.
+
+ local real_path=$(readlink -f $1)
+ local rebuilt_path=""
+ for i in $(echo ${real_path} | tr "/" " "); do
+ rebuilt_path=$rebuilt_path"/"$i
+
+ if [[ $(stat -c '%a' ${rebuilt_path}) = 700 ]]; then
+ echo "*** DEST path element"
+ echo "*** ${rebuilt_path}"
+ echo "*** appears to have 0700 permissions."
+ echo "*** This is very likely to cause fatal issues for devstack daemons."
+
+ if [[ -n "$SKIP_PATH_SANITY" ]]; then
+ return
+ else
+ echo "*** Set SKIP_PATH_SANITY to skip this check"
+ die $LINENO "Invalid path permissions"
+ fi
+ fi
+ done
+}
+
# Restore xtrace
$XTRACE
diff --git a/lib/ceilometer b/lib/ceilometer
index 1c289fd..90a1884 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,8 +1,8 @@
# lib/ceilometer
# Install and start **Ceilometer** service
-# To enable, add the following to localrc
-# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api
+# To enable Ceilometer services, add the following to localrc
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
# Dependencies:
# - functions
@@ -70,7 +70,7 @@
iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
- iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
+ iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
iniset $CEILOMETER_CONF DEFAULT verbose True
# Install the policy file for the API server
diff --git a/lib/cinder b/lib/cinder
index 82e7454..7e9c2ba 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -58,6 +58,14 @@
# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+# Cinder reports allocations back to the scheduler on periodic intervals
+# it turns out we can get an "out of space" issue when we run tests too
+# quickly just because cinder didn't realize we'd freed up resources.
+# Make this configurable so that devstack-gate/tempest can set it to
+# less than the 60 second default
+# https://bugs.launchpad.net/cinder/+bug/1180976
+CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
+
# Name of the lvm volume groups to use/create for iscsi volumes
# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
@@ -197,6 +205,7 @@
iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
+ iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
if is_service_enabled tls-proxy; then
# Set the service port for a proxy to take the original
diff --git a/lib/heat b/lib/heat
index cd0a204..0c95ebb 100644
--- a/lib/heat
+++ b/lib/heat
@@ -159,7 +159,6 @@
recreate_database heat utf8
$HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD
- $HEAT_DIR/tools/nova_create_flavors.sh
create_heat_cache_dir
}
diff --git a/lib/horizon b/lib/horizon
index 3d8b3e6..1ee530e 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -38,6 +38,18 @@
APACHE_USER=${APACHE_USER:-$USER}
APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
+# Set up service name and configuration path
+if is_ubuntu; then
+ APACHE_NAME=apache2
+ APACHE_CONF=sites-available/horizon
+elif is_fedora; then
+ APACHE_NAME=httpd
+ APACHE_CONF=conf.d/horizon.conf
+elif is_suse; then
+ APACHE_NAME=apache2
+ APACHE_CONF=vhosts.d/horizon.conf
+fi
+
# Functions
# ---------
@@ -135,8 +147,6 @@
HORIZON_REQUIRE=''
if is_ubuntu; then
- APACHE_NAME=apache2
- APACHE_CONF=sites-available/horizon
# Clean up the old config name
sudo rm -f /etc/apache2/sites-enabled/000-default
# Be a good citizen and use the distro tools here
@@ -145,9 +155,6 @@
# WSGI isn't enabled by default, enable it
sudo a2enmod wsgi
elif is_fedora; then
- APACHE_NAME=httpd
- APACHE_CONF=conf.d/horizon.conf
-
if [[ "$os_RELEASE" -ge "18" ]]; then
# fedora 18 has Require all denied in its httpd.conf
# and requires explicit Require all granted
@@ -155,14 +162,16 @@
fi
sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf
elif is_suse; then
- APACHE_NAME=apache2
- APACHE_CONF=vhosts.d/horizon.conf
# WSGI isn't enabled by default, enable it
sudo a2enmod wsgi
else
exit_distro_not_supported "apache configuration"
fi
+ # Remove old log files that could mess with how devstack detects whether Horizon
+ # has been successfully started (see start_horizon() and functions::screen_it())
+ sudo rm -f /var/log/$APACHE_NAME/horizon_*
+
# Configure apache to run horizon
sudo sh -c "sed -e \"
s,%USER%,$APACHE_USER,g;
@@ -219,12 +228,8 @@
# stop_horizon() - Stop running processes (non-screen)
function stop_horizon() {
- if is_ubuntu; then
- stop_service apache2
- elif is_fedora; then
- stop_service httpd
- elif is_suse; then
- stop_service apache2
+ if [ -n "$APACHE_NAME" ]; then
+ stop_service $APACHE_NAME
else
exit_distro_not_supported "apache configuration"
fi
diff --git a/lib/nova b/lib/nova
index 6fa1db4..9fc4ded 100644
--- a/lib/nova
+++ b/lib/nova
@@ -37,6 +37,9 @@
NOVA_CONF_DIR=/etc/nova
NOVA_CONF=$NOVA_CONF_DIR/nova.conf
+NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
+NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
+
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
# Public facing bits
@@ -125,10 +128,6 @@
# Functions
# ---------
-function add_nova_opt {
- echo "$1" >>$NOVA_CONF
-}
-
# Helper to clean iptables rules
function clean_iptables() {
# Delete rules
@@ -152,7 +151,7 @@
instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 sudo virsh destroy || true
- echo $instances | xargs -n1 sudo virsh undefine || true
+ echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
fi
# Logout and delete iscsi sessions
@@ -415,7 +414,6 @@
# (Re)create ``nova.conf``
rm -f $NOVA_CONF
- add_nova_opt "[DEFAULT]"
iniset $NOVA_CONF DEFAULT verbose "True"
iniset $NOVA_CONF DEFAULT debug "True"
iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
@@ -539,6 +537,32 @@
iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
}
+function init_nova_cells() {
+ if is_service_enabled n-cell; then
+ cp $NOVA_CONF $NOVA_CELLS_CONF
+ iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB`
+ iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
+ iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
+ iniset $NOVA_CELLS_CONF cells enable True
+ iniset $NOVA_CELLS_CONF cells name child
+
+ iniset $NOVA_CONF DEFAULT scheduler_topic cells
+ iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI
+ iniset $NOVA_CONF cells enable True
+ iniset $NOVA_CONF cells name region
+
+ if is_service_enabled n-api-meta; then
+ NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
+ iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
+ iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
+ fi
+
+ $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
+ $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
+ $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
+ fi
+}
+
# create_nova_cache_dir() - Part of the init_nova() process
function create_nova_cache_dir() {
# Create cache dir
@@ -578,6 +602,10 @@
# Migrate nova database
$NOVA_BIN_DIR/nova-manage db sync
+ if is_service_enabled n-cell; then
+ recreate_database $NOVA_CELLS_DB latin1
+ fi
+
# (Re)create nova baremetal database
if is_baremetal; then
recreate_database nova_bm latin1
@@ -648,14 +676,26 @@
# start_nova() - Start running processes, including screen
function start_nova() {
- # The group **$LIBVIRT_GROUP** is added to the current user in this script.
- # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
+ NOVA_CONF_BOTTOM=$NOVA_CONF
+
# ``screen_it`` checks ``is_service_enabled``, it is not needed here
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
- screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP $NOVA_BIN_DIR/nova-compute"
+
+ if is_service_enabled n-cell; then
+ NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
+ screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
+ screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
+ screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
+ fi
+
+ # The group **$LIBVIRT_GROUP** is added to the current user in this script.
+ # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
+ screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP \"$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM\""
screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
- screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
- screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler"
+ screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM"
+ screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM"
+ screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM"
+
screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR"
screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
@@ -670,7 +710,9 @@
# stop_nova() - Stop running processes (non-screen)
function stop_nova() {
# Kill the nova screen windows
- for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do
+ # Some services are listed here twice since more than one instance
+ # of a service may be running in certain configs.
+ for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
diff --git a/lib/quantum b/lib/quantum
index 293ef3a..c36a743 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -75,7 +75,7 @@
# Default Quantum Port
Q_PORT=${Q_PORT:-9696}
# Default Quantum Host
-Q_HOST=${Q_HOST:-$HOST_IP}
+Q_HOST=${Q_HOST:-$SERVICE_HOST}
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
# Default auth strategy
@@ -86,7 +86,7 @@
Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
# Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
+Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
# Allow Overlapping IP among subnets
Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
# Use quantum-debug command
@@ -397,12 +397,23 @@
pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
[ ! -z "$pid" ] && sudo kill -9 $pid
fi
+ if is_service_enabled q-meta; then
+ pid=$(ps aux | awk '/quantum-ns-metadata-proxy/ { print $2 }')
+ [ ! -z "$pid" ] && sudo kill -9 $pid
+ fi
}
# cleanup_quantum() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_quantum() {
- :
+ if is_quantum_ovs_base_plugin; then
+ quantum_ovs_base_cleanup
+ fi
+
+ # delete all namespaces created by quantum
+ for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do
+ sudo ip netns delete ${ns}
+ done
}
# _configure_quantum_common()
@@ -498,7 +509,6 @@
# for l3-agent, only use per tenant router if we have namespaces
Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
- PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -577,10 +587,6 @@
iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
_quantum_setup_keystone $QUANTUM_CONF keystone_authtoken
- # Comment out keystone authtoken configuration in api-paste.ini
- # It is required to avoid any breakage in Quantum where the sample
- # api-paste.ini has authtoken configurations.
- _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken
# Configure plugin
quantum_plugin_configure_service
@@ -651,21 +657,6 @@
rm -f $QUANTUM_AUTH_CACHE_DIR/*
}
-function _quantum_commentout_keystone_authtoken() {
- local conf_file=$1
- local section=$2
-
- inicomment $conf_file $section auth_host
- inicomment $conf_file $section auth_port
- inicomment $conf_file $section auth_protocol
- inicomment $conf_file $section auth_url
-
- inicomment $conf_file $section admin_tenant_name
- inicomment $conf_file $section admin_user
- inicomment $conf_file $section admin_password
- inicomment $conf_file $section signing_dir
-}
-
function _quantum_setup_interface_driver() {
# ovs_use_veth needs to be set before the plugin configuration
diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md
index 05bfb85..e829940 100644
--- a/lib/quantum_plugins/README.md
+++ b/lib/quantum_plugins/README.md
@@ -34,3 +34,5 @@
* ``quantum_plugin_setup_interface_driver``
* ``has_quantum_plugin_security_group``:
return 0 if the plugin support quantum security group otherwise return 1
+* ``quantum_plugin_check_adv_test_requirements``:
+ return 0 if requirements are satisfied otherwise return 1
diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight
index 4857f49..edee0eb 100644
--- a/lib/quantum_plugins/bigswitch_floodlight
+++ b/lib/quantum_plugins/bigswitch_floodlight
@@ -56,5 +56,9 @@
return 1
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade
index 6e26ad7..fc86deb 100644
--- a/lib/quantum_plugins/brocade
+++ b/lib/quantum_plugins/brocade
@@ -50,5 +50,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$BRCD_XTRACE
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 324e255..b4b52e9 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -86,5 +86,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
index f61f50b..411f5a4 100644
--- a/lib/quantum_plugins/nec
+++ b/lib/quantum_plugins/nec
@@ -17,8 +17,6 @@
OFC_RETRY_MAX=${OFC_RETRY_MAX:-0}
OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1}
-OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-
# Main logic
# ---------------------------
@@ -118,5 +116,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index 6eefb02..fc06b55 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -8,7 +8,6 @@
source $TOP_DIR/lib/quantum_plugins/ovs_base
function setup_integration_bridge() {
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
_quantum_ovs_base_setup_bridge $OVS_BRIDGE
# Set manager to NVP controller (1st of list)
if [[ "$NVP_CONTROLLERS" != "" ]]; then
@@ -146,5 +145,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index ab16483..3741720 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -41,7 +41,6 @@
function quantum_plugin_configure_plugin_agent() {
# Setup integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
_quantum_ovs_base_setup_bridge $OVS_BRIDGE
_quantum_ovs_base_configure_firewall_driver
@@ -72,10 +71,13 @@
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- # Nova will always be installed along with quantum for a domU
- # devstack install, so it should be safe to rely on nova.conf
- # for xenapi configuration.
- Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF"
+ Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
+
+ # For now, duplicate the xen configuration already found in nova.conf
+ iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_url "$XENAPI_CONNECTION_URL"
+ iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_username "$XENAPI_USER"
+ iniset $Q_RR_CONF_FILE XENAPI xenapi_connection_password "$XENAPI_PASSWORD"
+
# Under XS/XCP, the ovs agent needs to target the dom0
# integration bridge. This is enabled by using a root wrapper
# that executes commands on dom0 via a XenAPI plugin.
@@ -144,5 +146,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index 2ada0db..a5e03ac 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -5,6 +5,9 @@
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
+OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+
function is_quantum_ovs_base_plugin() {
# Yes, we use OVS.
return 0
@@ -17,6 +20,18 @@
sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
}
+function quantum_ovs_base_cleanup() {
+ # remove all OVS ports that look like Quantum created ports
+ for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+ sudo ovs-vsctl del-port ${port}
+ done
+
+ # remove all OVS bridges created by Quantum
+ for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+ sudo ovs-vsctl del-br ${bridge}
+ done
+}
+
function _quantum_ovs_base_install_agent_packages() {
local kernel_version
# Install deps
diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid
index 912aa7e..1456710 100644
--- a/lib/quantum_plugins/plumgrid
+++ b/lib/quantum_plugins/plumgrid
@@ -35,5 +35,8 @@
:
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index 1139232..c482747 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -45,7 +45,6 @@
function quantum_plugin_configure_plugin_agent() {
# Set up integration bridge
- OVS_BRIDGE=${OVS_BRIDGE:-br-int}
_quantum_ovs_base_setup_bridge $OVS_BRIDGE
if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
@@ -71,5 +70,9 @@
return 0
}
+function quantum_plugin_check_adv_test_requirements() {
+ is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight
index 60e3924..385bd0d 100644
--- a/lib/quantum_thirdparty/bigswitch_floodlight
+++ b/lib/quantum_thirdparty/bigswitch_floodlight
@@ -7,7 +7,6 @@
BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633}
-OVS_BRIDGE=${OVS_BRIDGE:-br-int}
function configure_bigswitch_floodlight() {
:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 27d3ba3..fc439ec 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -30,7 +30,7 @@
# that can be passed as arguments to is_service_enabled.
# We check for a call to iniset_rpc_backend in these files, meaning
# the service needs a backend.
- rpc_candidates=$(grep -rl iniset_rpc_backend . | awk -F/ '{print $NF}')
+ rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}')
for c in ${rpc_candidates}; do
if is_service_enabled $c; then
rpc_needed=0
@@ -138,6 +138,13 @@
fi
# change the rabbit password since the default is "guest"
sudo rabbitmqctl change_password guest $RABBIT_PASSWORD
+ if is_service_enabled n-cell; then
+ # Add partitioned access for the child cell
+ if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then
+ sudo rabbitmqctl add_vhost child_cell
+ sudo rabbitmqctl set_permissions -p child_cell guest ".*" ".*" ".*"
+ fi
+ fi
elif is_service_enabled qpid; then
echo_summary "Starting qpid"
restart_service qpidd
diff --git a/stack.sh b/stack.sh
index 5a6945d..5dea000 100755
--- a/stack.sh
+++ b/stack.sh
@@ -208,6 +208,9 @@
sudo mkdir -p $DEST
sudo chown -R $STACK_USER $DEST
+# a basic test for $DEST path permissions (fatal on error unless skipped)
+check_path_perm_sanity ${DEST}
+
# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
# Internet access. ``stack.sh`` must have been previously run with Internet
# access to install prerequisites and fetch repositories.
@@ -560,6 +563,12 @@
# ============================
if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+ # Avoid having to configure selinux to allow things like httpd to
+ # access horizion files or run binaries like nodejs (LP#1175444)
+ if selinuxenabled; then
+ sudo setenforce 0
+ fi
+
# An old version (2.0.1) of python-crypto is probably installed on
# a fresh system, via the dependency chain
# cas->python-paramiko->python-crypto (related to anaconda).
@@ -949,8 +958,6 @@
echo_summary "Using XenServer virtualization driver"
read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
- XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
- XENAPI_USER=${XENAPI_USER:-"root"}
iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL"
iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER"
iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD"
@@ -1031,6 +1038,8 @@
LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
fi
+
+ init_nova_cells
fi
# Extra things to prepare nova for baremetal, before nova starts
@@ -1091,14 +1100,19 @@
create_quantum_initial_network
setup_quantum_debug
elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
+ NM_CONF=${NOVA_CONF}
+ if is_service_enabled n-cell; then
+ NM_CONF=${NOVA_CELLS_CONF}
+ fi
+
# Create a small network
- $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+ $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
# Create some floating ips
- $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
+ $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
# Create a second pool
- $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
+ $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
if is_service_enabled quantum; then
diff --git a/stackrc b/stackrc
index 4a76c3a..871c8a1 100644
--- a/stackrc
+++ b/stackrc
@@ -234,6 +234,10 @@
# Compatibility until it's eradicated from CI
USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
+# Xen config common to nova and quantum
+XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
+XENAPI_USER=${XENAPI_USER:-"root"}
+
# Local variables:
# mode: shell-script
# End:
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 3fadc78..258d7a3 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,5 +1,4 @@
-Getting Started With XenServer 5.6 and Devstack
-===============================================
+# Getting Started With XenServer 5.6 and Devstack
The purpose of the code in this directory it to help developers bootstrap
a XenServer 5.6 (or greater) + Openstack development environment. This file gives
some pointers on how to get started.
@@ -9,8 +8,7 @@
machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack
to communicate with the host.
-Step 1: Install Xenserver
-------------------------
+## Step 1: Install Xenserver
Install XenServer 5.6+ on a clean box. You can get XenServer by signing
up for an account on citrix.com, and then visiting:
https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148
@@ -25,16 +23,14 @@
* XenServer Gateway: 192.168.1.1
* XenServer DNS: 192.168.1.1
-Step 2: Download devstack
---------------------------
+## Step 2: Download devstack
On your XenServer host, run the following commands as root:
wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master
unzip -o master -d ./devstack
cd devstack/*/
-Step 3: Configure your localrc inside the devstack directory
-------------------------------------------------------------
+## Step 3: Configure your localrc inside the devstack directory
Devstack uses a localrc for user-specific configuration. Note that
the XENAPI_PASSWORD must be your dom0 root password.
Of course, use real passwords if this machine is exposed.
@@ -43,12 +39,18 @@
MYSQL_PASSWORD=my_super_secret
SERVICE_TOKEN=my_super_secret
ADMIN_PASSWORD=my_super_secret
- SERVICE_PASSWORD=$ADMIN_PASSWORD
+ SERVICE_PASSWORD=my_super_secret
RABBIT_PASSWORD=my_super_secret
- # This is the password for your guest (for both stack and root users)
+ SWIFT_HASH="66a3d6b56c1f479c8b4e70ab5c2000f5"
+ # This is the password for the OpenStack VM (for both stack and root users)
GUEST_PASSWORD=my_super_secret
+
+ # XenAPI parameters
# IMPORTANT: The following must be set to your dom0 root password!
- XENAPI_PASSWORD=my_super_secret
+ XENAPI_PASSWORD=my_xenserver_root_password
+ XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
+ VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
+
# Do not download the usual images yet!
IMAGE_URLS=""
# Explicitly set virt driver here
@@ -60,34 +62,32 @@
# Host Interface, i.e. the interface on the nova vm you want to expose the
# services on. Usually eth2 (management network) or eth3 (public network) and
# not eth0 (private network with XenServer host) or eth1 (VM traffic network)
- # This is also used as the interface for the Ubuntu install
# The default is eth3.
# HOST_IP_IFACE=eth3
+
+ # Settings for netinstalling Ubuntu
+ # UBUNTU_INST_RELEASE=precise
+
# First time Ubuntu network install params
- NETINSTALLIP="dhcp"
- NAMESERVERS=""
- NETMASK=""
- GATEWAY=""
+ # UBUNTU_INST_IFACE="eth3"
+ # UBUNTU_INST_IP="dhcp"
EOF
-Step 4: Run ./install_os_domU.sh from the tools/xen directory
--------------------------------------------------------------
-cd tools/xen
-./install_os_domU.sh
+## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory
-Once this script finishes executing, log into the VM (openstack domU)
-that it installed and tail the run.sh.log file. You will need to wait
-until it run.sh has finished executing.
+ cd tools/xen
+ ./install_os_domU.sh
+Once this script finishes executing, log into the VM (openstack domU) that it
+installed and tail the run.sh.log file. You will need to wait until it run.sh
+has finished executing.
-Step 5: Do cloudy stuff!
---------------------------
+## Step 5: Do cloudy stuff!
* Play with horizon
* Play with the CLI
* Log bugs to devstack and core projects, and submit fixes!
-Step 6: Run from snapshot
--------------------------
+## Step 6: Run from snapshot
If you want to quicky re-run devstack from a clean state,
using the same settings you used in your previous run,
-you can revert the DomU to the snapshot called "before_first_boot"
+you can revert the DomU to the snapshot called `before_first_boot`
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 8d46939..0d5e31e 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -1,15 +1,13 @@
#!/bin/bash
-# This script is a level script
-# It must be run on a XenServer or XCP machine
+# This script must be run on a XenServer or XCP machine
#
# It creates a DomU VM that runs OpenStack services
#
# For more details see: README.md
-# Exit on errors
set -o errexit
-# Echo commands
+set -o nounset
set -o xtrace
# Abort if localrc is not set
@@ -31,13 +29,12 @@
# xapi functions
. $THIS_DIR/functions
-
#
# Get Settings
#
# Source params - override xenrc params in your localrc to suit your taste
-source xenrc
+source $THIS_DIR/xenrc
xe_min()
{
@@ -253,11 +250,12 @@
mkdir -p $HTTP_SERVER_LOCATION
fi
cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
- MIRROR=${MIRROR:-""}
- if [ -n "$MIRROR" ]; then
- sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \
- -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
- fi
+
+ sed \
+ -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \
+ -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \
+ -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \
+ -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
fi
# Update the template
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index fe52445..0e11226 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -10,54 +10,51 @@
# creating the user called "stack",
# and shuts down the VM to signal the script has completed
-set -x
-# Echo commands
+set -o errexit
+set -o nounset
set -o xtrace
# Configurable nuggets
-GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
-STAGING_DIR=${STAGING_DIR:-stage}
-DO_TGZ=${DO_TGZ:-1}
-XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"}
-STACK_USER=${STACK_USER:-stack}
+GUEST_PASSWORD="$1"
+XS_TOOLS_PATH="$2"
+STACK_USER="$3"
# Install basics
-chroot $STAGING_DIR apt-get update
-chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo
-chroot $STAGING_DIR pip install xenapi
+apt-get update
+apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
+apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo
+pip install xenapi
# Install XenServer guest utilities
-cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH}
-chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH
-chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove
-chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults
+dpkg -i $XS_TOOLS_PATH
+update-rc.d -f xe-linux-distribution remove
+update-rc.d xe-linux-distribution defaults
# Make a small cracklib dictionary, so that passwd still works, but we don't
# have the big dictionary.
-mkdir -p $STAGING_DIR/usr/share/cracklib
-echo a | chroot $STAGING_DIR cracklib-packer
+mkdir -p /usr/share/cracklib
+echo a | cracklib-packer
# Make /etc/shadow, and set the root password
-chroot $STAGING_DIR "pwconv"
-echo "root:$GUEST_PASSWORD" | chroot $STAGING_DIR chpasswd
+pwconv
+echo "root:$GUEST_PASSWORD" | chpasswd
# Put the VPX into UTC.
-rm -f $STAGING_DIR/etc/localtime
+rm -f /etc/localtime
# Add stack user
-chroot $STAGING_DIR groupadd libvirtd
-chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
-echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd
-echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
+groupadd libvirtd
+useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
+echo $STACK_USER:$GUEST_PASSWORD | chpasswd
+echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
# Give ownership of /opt/stack to stack user
-chroot $STAGING_DIR chown -R $STACK_USER /opt/stack
+chown -R $STACK_USER /opt/stack
# Make our ip address hostnames look nice at the command prompt
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/root/.bashrc
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/etc/profile
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /opt/stack/.bashrc
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /root/.bashrc
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /etc/profile
function setup_vimrc {
if [ ! -e $1 ]; then
@@ -72,20 +69,15 @@
}
# Setup simple .vimrcs
-setup_vimrc $STAGING_DIR/root/.vimrc
-setup_vimrc $STAGING_DIR/opt/stack/.vimrc
-
-if [ "$DO_TGZ" = "1" ]; then
- # Compress
- rm -f stage.tgz
- tar cfz stage.tgz stage
-fi
+setup_vimrc /root/.vimrc
+setup_vimrc /opt/stack/.vimrc
# remove self from local.rc
# so this script is not run again
rm -rf /etc/rc.local
-mv /etc/rc.local.preparebackup /etc/rc.local
-cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup
+
+# Restore rc.local file
+cp /etc/rc.local.preparebackup /etc/rc.local
# shutdown to notify we are done
shutdown -h now
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
index 19bd2f8..6ea6f63 100755
--- a/tools/xen/prepare_guest_template.sh
+++ b/tools/xen/prepare_guest_template.sh
@@ -15,9 +15,8 @@
# The resultant image is started by install_os_domU.sh,
# and once the VM has shutdown, build_xva.sh is run
-# Exit on errors
set -o errexit
-# Echo commands
+set -o nounset
set -o xtrace
# This directory
@@ -75,7 +74,8 @@
# run prepare_guest.sh on boot
cat <<EOF >$STAGING_DIR/etc/rc.local
-GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \
- DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \
- bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
+#!/bin/sh -e
+bash /opt/stack/prepare_guest.sh \\
+ "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\
+ > /opt/stack/prepare_guest.log 2>&1
EOF
diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh
index 43b6dec..b7a8eff 100755
--- a/tools/xen/scripts/install_ubuntu_template.sh
+++ b/tools/xen/scripts/install_ubuntu_template.sh
@@ -7,9 +7,8 @@
# Based on a script by: David Markey <david.markey@citrix.com>
#
-# Exit on errors
set -o errexit
-# Echo commands
+set -o nounset
set -o xtrace
# This directory
@@ -54,11 +53,11 @@
pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \
console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \
keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \
-netcfg/choose_interface=${HOST_IP_IFACE} \
+netcfg/choose_interface=${UBUNTU_INST_IFACE} \
netcfg/get_hostname=os netcfg/get_domain=os auto \
url=${preseed_url}"
-if [ "$NETINSTALLIP" != "dhcp" ]; then
+if [ "$UBUNTU_INST_IP" != "dhcp" ]; then
netcfgargs="netcfg/disable_autoconfig=true \
netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \
netcfg/get_ipaddress=${UBUNTU_INST_IP} \
@@ -70,11 +69,16 @@
xe template-param-set uuid=$new_uuid \
other-config:install-methods=http \
- other-config:install-repository="$UBUNTU_INST_REPOSITORY" \
+ other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \
PV-args="$pvargs" \
other-config:debian-release="$UBUNTU_INST_RELEASE" \
other-config:default_template=true \
other-config:disks='<provision><disk device="0" size="'$disk_size'" sr="" bootable="true" type="system"/></provision>' \
other-config:install-arch="$UBUNTU_INST_ARCH"
+if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then
+ xe template-param-set uuid=$new_uuid \
+ other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY"
+fi
+
echo "Ubuntu template installed uuid:$new_uuid"
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index e4d8ac9..e50f954 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -8,6 +8,9 @@
# Name of this guest
GUEST_NAME=${GUEST_NAME:-DevStackOSDomU}
+# Template cleanup
+CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
+
# Size of image
VDI_MB=${VDI_MB:-5000}
OSDOMU_MEM_MB=1024
@@ -19,7 +22,6 @@
# Host Interface, i.e. the interface on the nova vm you want to expose the
# services on. Usually eth2 (management network) or eth3 (public network) and
# not eth0 (private network with XenServer host) or eth1 (VM traffic network)
-# This is also used as the interface for the Ubuntu install
HOST_IP_IFACE=${HOST_IP_IFACE:-eth3}
#
@@ -62,15 +64,16 @@
# XenServer 6.1 and later or XCP 1.6 or later
# 11.10 is only really supported with XenServer 6.0.2 and later
UBUNTU_INST_ARCH="amd64"
-UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu"
+UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.net"
+UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
+UBUNTU_INST_HTTP_PROXY=""
UBUNTU_INST_LOCALE="en_US"
UBUNTU_INST_KEYBOARD="us"
-# network configuration for HOST_IP_IFACE during install
+# network configuration for ubuntu netinstall
+UBUNTU_INST_IFACE="eth3"
UBUNTU_INST_IP="dhcp"
UBUNTU_INST_NAMESERVERS=""
UBUNTU_INST_NETMASK=""
UBUNTU_INST_GATEWAY=""
-# Load stackrc defaults
-# then override with settings from localrc
-cd ../.. && source ./stackrc && cd $TOP_DIR
+source ../../stackrc
diff --git a/unstack.sh b/unstack.sh
index 3ac2985..d1d0349 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -109,4 +109,5 @@
if is_service_enabled quantum; then
stop_quantum
stop_quantum_third_party
+ cleanup_quantum
fi