Merge "enable apache2 server as front end for swift"
diff --git a/README.md b/README.md
index 23200e2..46d3f96 100644
--- a/README.md
+++ b/README.md
@@ -155,6 +155,32 @@
Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472)
Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan)
+devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below:
+
+ # VLAN configuration
+ Q_PLUGIN=ml2
+ ENABLE_TENANT_VLANS=True
+
+ # GRE tunnel configuration
+ Q_PLUGIN=ml2
+ ENABLE_TENANT_TUNNELS=True
+
+ # VXLAN tunnel configuration
+ Q_PLUGIN=ml2
+ Q_ML2_TENANT_NETWORK_TYPE=vxlan
+
+The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge).
+
+ Variable Name Notes
+ -------------------------------------------------------------------------------------
+ Q_AGENT This specifies which agent to run with the ML2 Plugin (either `openvswitch` or `linuxbridge`).
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default is none. Note, ML2 will work with the OVS and LinuxBridge agents by default.
+ Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to all available TypeDrivers.
+ Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none.
+ Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none.
+ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none.
+ Q_AGENT_EXTRA_AGENT_OPTS Extra configuration options to pass to the OVS or LinuxBridge Agent.
+
# Tempest
If tempest has been successfully configured, a basic set of smoke tests can be run as follows:
@@ -195,15 +221,5 @@
To setup a cells environment add the following to your `localrc`:
enable_service n-cell
- enable_service n-api-meta
- MULTI_HOST=True
- # The following have not been tested with cells, they may or may not work.
- disable_service n-obj
- disable_service cinder
- disable_service c-sch
- disable_service c-api
- disable_service c-vol
- disable_service n-xvnc
-
-Be aware that there are some features currently missing in cells, one notable one being security groups.
+Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells.
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 3c83725..e2baecd 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -42,6 +42,8 @@
# Test as the admin user
. $TOP_DIR/openrc admin admin
+# Cells does not support aggregates.
+is_service_enabled n-cell && exit 55
# Create an aggregate
# ===================
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 1814732..a3a14eb 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -80,12 +80,18 @@
# List security groups
nova secgroup-list
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
- nova secgroup-create $SECGROUP "$SECGROUP description"
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
+if is_service_enabled n-cell; then
+ # Cells does not support security groups, so force the use of "default"
+ SECGROUP="default"
+ echo "Using the default security group because of Cells."
+else
+ # Create a secgroup
+ if ! nova secgroup-list | grep -q $SECGROUP; then
+ nova secgroup-create $SECGROUP "$SECGROUP description"
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+ echo "Security group not created"
+ exit 1
+ fi
fi
fi
@@ -200,8 +206,12 @@
end_time=$(date +%s)
echo "Completed cinder delete in $((end_time - start_time)) seconds"
-# Delete secgroup
-nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ $SECGROUP = "default" ]] ; then
+ echo "Skipping deleting default security group"
+else
+ # Delete secgroup
+ nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index eec8636..5b0d1ba 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -58,11 +58,17 @@
IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
-# Add a secgroup
-if ! euca-describe-groups | grep -q $SECGROUP; then
- euca-add-group -d "$SECGROUP description" $SECGROUP
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
- die $LINENO "Security group not created"
+if is_service_enabled n-cell; then
+ # Cells does not support security groups, so force the use of "default"
+ SECGROUP="default"
+ echo "Using the default security group because of Cells."
+else
+ # Add a secgroup
+ if ! euca-describe-groups | grep -q $SECGROUP; then
+ euca-add-group -d "$SECGROUP description" $SECGROUP
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
+ die $LINENO "Security group not created"
+ fi
fi
fi
@@ -77,7 +83,7 @@
# Volumes
# -------
-if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then
+if is_service_enabled c-vol && ! is_service_enabled n-cell; then
VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
@@ -117,41 +123,45 @@
echo "Volume Tests Skipped"
fi
-# Allocate floating address
-FLOATING_IP=`euca-allocate-address | cut -f2`
-die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
+if is_service_enabled n-cell; then
+ echo "Floating IP Tests Skipped because of Cells."
+else
+ # Allocate floating address
+ FLOATING_IP=`euca-allocate-address | cut -f2`
+ die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
-# Associate floating address
-euca-associate-address -i $INSTANCE $FLOATING_IP || \
- die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
+ # Associate floating address
+ euca-associate-address -i $INSTANCE $FLOATING_IP || \
+ die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
-# Authorize pinging
-euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
- die $LINENO "Failure authorizing rule in $SECGROUP"
+ # Authorize pinging
+ euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+ die $LINENO "Failure authorizing rule in $SECGROUP"
-# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
+ # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+ ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
-# Revoke pinging
-euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
- die $LINENO "Failure revoking rule in $SECGROUP"
+ # Revoke pinging
+ euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+ die $LINENO "Failure revoking rule in $SECGROUP"
-# Release floating address
-euca-disassociate-address $FLOATING_IP || \
- die $LINENO "Failure disassociating address $FLOATING_IP"
+ # Release floating address
+ euca-disassociate-address $FLOATING_IP || \
+ die $LINENO "Failure disassociating address $FLOATING_IP"
-# Wait just a tick for everything above to complete so release doesn't fail
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
- die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
-fi
+ # Wait just a tick for everything above to complete so release doesn't fail
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
+ die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
+ fi
-# Release floating address
-euca-release-address $FLOATING_IP || \
- die $LINENO "Failure releasing address $FLOATING_IP"
+ # Release floating address
+ euca-release-address $FLOATING_IP || \
+ die $LINENO "Failure releasing address $FLOATING_IP"
-# Wait just a tick for everything above to complete so terminate doesn't fail
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
- die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
+ # Wait just a tick for everything above to complete so terminate doesn't fail
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
+ die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
+ fi
fi
# Terminate instance
@@ -166,8 +176,12 @@
die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
fi
-# Delete secgroup
-euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ "$SECGROUP" = "default" ]] ; then
+ echo "Skipping deleting default security group"
+else
+ # Delete secgroup
+ euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index b22ef11..ac65cf7 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -56,6 +56,8 @@
# Instance name
VM_NAME="ex-float"
+# Cells does not support floating ips API calls
+is_service_enabled n-cell && exit 55
# Launching a server
# ==================
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index f574bb3..b2b391c 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -78,12 +78,18 @@
# List security groups
nova secgroup-list
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
- nova secgroup-create $SECGROUP "$SECGROUP description"
- if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
+if is_service_enabled n-cell; then
+ # Cells does not support security groups, so force the use of "default"
+ SECGROUP="default"
+ echo "Using the default security group because of Cells."
+else
+ # Create a secgroup
+ if ! nova secgroup-list | grep -q $SECGROUP; then
+ nova secgroup-create $SECGROUP "$SECGROUP description"
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+ echo "Security group not created"
+ exit 1
+ fi
fi
fi
@@ -201,8 +207,12 @@
die $LINENO "Server $VM_NAME not deleted"
fi
-# Delete secgroup
-nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ $SECGROUP = "default" ]] ; then
+ echo "Skipping deleting default security group"
+else
+ # Delete secgroup
+ nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
set +o xtrace
echo "*********************************************************************"
diff --git a/files/apts/swift b/files/apts/swift
index c52c68b..1c283cf 100644
--- a/files/apts/swift
+++ b/files/apts/swift
@@ -1,5 +1,6 @@
curl
gcc
+libffi-dev
memcached
python-configobj
python-coverage
diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf
new file mode 100644
index 0000000..66a3751
--- /dev/null
+++ b/files/dnsmasq-for-baremetal-from-nova-network.conf
@@ -0,0 +1,3 @@
+enable-tftp
+tftp-root=/tftpboot
+dhcp-boot=pxelinux.0
diff --git a/files/rpms/swift b/files/rpms/swift
index ee1fad8..2cc4a0b 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,5 +1,6 @@
curl
gcc
+libffi-devel
memcached
python-configobj
python-coverage
diff --git a/functions b/functions
index eb83dfb..262f70f 100644
--- a/functions
+++ b/functions
@@ -745,12 +745,17 @@
# For backward compatibility if we have **swift** in ENABLED_SERVICES all the
# **s-** services will be enabled. This will be deprecated in the future.
#
+# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
+# We also need to make sure to treat **n-cell-region** and **n-cell-child**
+# as enabled in this case.
+#
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled() {
services=$@
for service in ${services}; do
[[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+ [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
@@ -1214,6 +1219,14 @@
return
fi
+ # vmdk format images
+ if [[ "$image_url" =~ '.vmdk' ]]; then
+ IMAGE="$FILES/${IMAGE_FNAME}"
+ IMAGE_NAME="${IMAGE_FNAME%.vmdk}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}"
+ return
+ fi
+
# XenServer-ovf-format images are provided as .vhd.tgz as well
# and should not be decompressed prior to loading
if [[ "$image_url" =~ '.vhd.tgz' ]]; then
@@ -1283,9 +1296,9 @@
if [ "$CONTAINER_FORMAT" = "bare" ]; then
if [ "$UNPACK" = "zcat" ]; then
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
else
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -1293,12 +1306,12 @@
# kernel for use when uploading the root filesystem.
KERNEL_ID=""; RAMDISK_ID="";
if [ -n "$KERNEL" ]; then
- KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+ KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
fi
if [ -n "$RAMDISK" ]; then
- RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+ RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+ glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
fi
}
@@ -1478,11 +1491,7 @@
# Get the path to the pip command.
# get_pip_command
function get_pip_command() {
- if is_fedora; then
- which pip-python
- else
- which pip
- fi
+ which pip || which pip-python
if [ $? -ne 0 ]; then
die $LINENO "Unable to find pip; cannot continue"
diff --git a/lib/baremetal b/lib/baremetal
index bed3c09..145544d 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -80,6 +80,15 @@
# change the virtualization type: --engine qemu
BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
+# To provide PXE, configure nova-network's dnsmasq rather than run the one
+# dedicated to baremetal. When enable this, make sure these conditions are
+# fulfilled:
+# 1) nova-compute and nova-network runs on the same host
+# 2) nova-network uses FlatDHCPManager
+# NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option
+# is enabled.
+BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
+
# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
if [ "$BM_USE_FAKE_ENV" ]; then
BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
@@ -239,14 +248,14 @@
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $BM_DEPLOY_KERNEL \
- --public --disk-format=aki \
+ --is-public True --disk-format=aki \
< $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
BM_DEPLOY_RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $BM_DEPLOY_RAMDISK \
- --public --disk-format=ari \
+ --is-public True --disk-format=ari \
< $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
}
@@ -294,14 +303,14 @@
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $image_name-kernel \
- --public --disk-format=aki \
+ --is-public True --disk-format=aki \
< $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
--name $image_name-initrd \
- --public --disk-format=ari \
+ --is-public True --disk-format=ari \
< $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
}
@@ -371,14 +380,14 @@
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
- --name "$IMAGE_NAME-kernel" --public \
+ --name "$IMAGE_NAME-kernel" --is-public True \
--container-format aki \
--disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
RAMDISK_ID=$(glance \
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
- --name "$IMAGE_NAME-ramdisk" --public \
+ --name "$IMAGE_NAME-ramdisk" --is-public True \
--container-format ari \
--disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
else
@@ -390,7 +399,7 @@
--os-auth-token $token \
--os-image-url http://$GLANCE_HOSTPORT \
image-create \
- --name "${IMAGE_NAME%.img}" --public \
+ --name "${IMAGE_NAME%.img}" --is-public True \
--container-format $CONTAINER_FORMAT \
--disk-format $DISK_FORMAT \
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
diff --git a/lib/ceilometer b/lib/ceilometer
index 548496e..8768122 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,8 +1,11 @@
# lib/ceilometer
# Install and start **Ceilometer** service
-# To enable Ceilometer services, add the following to localrc
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+# To enable a minimal set of Ceilometer services, add the following to localrc:
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+#
+# To ensure Ceilometer alarming services are enabled also, further add to the localrc:
+# enable_service ceilometer-alarm-notify ceilometer-alarm-eval
# Dependencies:
# - functions
@@ -81,7 +84,6 @@
iniset $CEILOMETER_CONF DEFAULT os_username ceilometer
iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD
iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME
- iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL
iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http
iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer
@@ -136,12 +138,14 @@
screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF"
}
# stop_ceilometer() - Stop running processes
function stop_ceilometer() {
# Kill the ceilometer screen windows
- for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do
+ for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do
screen -S $SCREEN_NAME -p $serv -X kill
done
}
diff --git a/lib/horizon b/lib/horizon
index b06ea1c..89bd659 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -88,6 +88,9 @@
local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
cp $HORIZON_SETTINGS $local_settings
+ if is_service_enabled neutron; then
+ _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP
+ fi
# enable loadbalancer dashboard in case service is enabled
if is_service_enabled q-lbaas; then
_horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True
diff --git a/lib/infra b/lib/infra
new file mode 100644
index 0000000..0b73259
--- /dev/null
+++ b/lib/infra
@@ -0,0 +1,56 @@
+# lib/infra
+#
+# Functions to install infrastructure projects needed by other projects
+# early in the cycle. We need this so we can do things like gate on
+# requirements as a global list
+
+# Dependencies:
+# ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# unfubar_setuptools
+# install_infra
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+PBR_DIR=$DEST/pbr
+REQUIREMENTS_DIR=$DEST/requirements
+
+# Entry Points
+# ------------
+
+# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools
+function unfubar_setuptools() {
+ # this is a giant game of who's on first, but it does consistently work
+ # there is hope that upstream python packaging fixes this in the future
+ echo_summary "Unbreaking setuptools"
+ pip_install -U setuptools
+ pip_install -U pip
+ uninstall_package python-setuptools
+ pip_install -U setuptools
+ pip_install -U pip
+}
+
+
+# install_infra() - Collect source and prepare
+function install_infra() {
+ # bring down global requirements
+ git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+
+ # Install pbr
+ git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
+ setup_develop $PBR_DIR
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
new file mode 100644
index 0000000..4d343f5
--- /dev/null
+++ b/lib/neutron_plugins/midonet
@@ -0,0 +1,82 @@
+# Neutron MidoNet plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function is_neutron_ovs_base_plugin() {
+ # MidoNet does not use l3-agent
+ # 0 means True here
+ return 1
+}
+
+function neutron_plugin_create_nova_conf() {
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"midonet.nova.virt.libvirt.vif.MidonetVifDriver"}
+}
+
+function neutron_plugin_install_agent_packages() {
+ :
+}
+
+function neutron_plugin_configure_common() {
+ Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
+ Q_PLUGIN_CONF_FILENAME=midonet.ini
+ Q_DB_NAME="neutron_midonet"
+ Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
+}
+
+function neutron_plugin_configure_debug_command() {
+ :
+}
+
+function neutron_plugin_configure_dhcp_agent() {
+ die $LINENO "q-dhcp must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_l3_agent() {
+ die $LINENO "q-l3 must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_plugin_agent() {
+ die $LINENO "q-agt must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_service() {
+ if [[ "$MIDONET_API_URI" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI
+ fi
+ if [[ "$MIDONET_USERNAME" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
+ fi
+ if [[ "$MIDONET_PASSWORD" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD
+ fi
+ if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
+ fi
+ if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
+ fi
+ if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID
+ fi
+}
+
+function neutron_plugin_setup_interface_driver() {
+ # May change in the future
+ :
+}
+
+function has_neutron_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
+function neutron_plugin_check_adv_test_requirements() {
+ # 0 means True here
+ return 1
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index fcff870..ff49d8e 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -5,10 +5,42 @@
MY_XTRACE=$(set +o | grep xtrace)
set +o xtrace
+# Enable this to simply and quickly enable tunneling with ML2.
+# Select either 'gre', 'vxlan', or '(gre vxlan)'
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-}
+# This has to be set here since the agent will set this in the config file
+if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
+ Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
+elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+ Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre)
+fi
+
# Default openvswitch L2 agent
Q_AGENT=${Q_AGENT:-openvswitch}
source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
+# List of MechanismDrivers to load
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_PLUGIN_MECHANISM_DRIVERS:-}
+# List of Type Drivers to load
+Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan}
+# Default GRE TypeDriver options
+Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES}
+# Default VXLAN TypeDriver options
+Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000}
+# Default VLAN TypeDriver options
+Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-}
+
+function populate_ml2_config() {
+ OPTS=$1
+ CONF=$2
+ SECTION=$3
+
+ for I in "${OPTS[@]}"; do
+ # Replace the first '=' with ' ' for iniset syntax
+ iniset $CONF $SECTION ${I/=/ }
+ done
+}
+
function neutron_plugin_configure_common() {
Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2
Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
@@ -17,26 +49,31 @@
}
function neutron_plugin_configure_service() {
- if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre
- iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES
+ if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
+ Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
+ elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+ # This assumes you want a simple configuration, and will overwrite
+ # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS.
+ Q_SRV_EXTRA_OPTS=(tenant_network_types=gre)
+ Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES)
elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types vlan
+ Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan)
else
echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts."
fi
- # Override ``ML2_VLAN_RANGES`` and any needed agent configuration
- # variables in ``localrc`` for more complex physical network
- # configurations.
- if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
- ML2_VLAN_RANGES=$PHYSICAL_NETWORK
- if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
- ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE
+ # Allow for overrding VLAN configuration (for example, to configure provider
+ # VLANs) by first checking if Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is set.
+ if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then
+ if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+ ML2_VLAN_RANGES=$PHYSICAL_NETWORK
+ if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+ ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE
+ fi
fi
- fi
- if [[ "$ML2_VLAN_RANGES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE ml2_type_vlan network_vlan_ranges $ML2_VLAN_RANGES
+ if [[ "$ML2_VLAN_RANGES" != "" ]]; then
+ Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=$ML2_VLAN_RANGES)
+ fi
fi
# REVISIT(rkukura): Setting firewall_driver here for
@@ -52,6 +89,20 @@
iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver
fi
+ # Since we enable the tunnel TypeDrivers, also enable a local_ip
+ iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+
+ populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+
+ populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2
+
+ populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre
+
+ populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan
+
+ if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
+ populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan
+ fi
}
function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
new file mode 100644
index 0000000..b3c726f
--- /dev/null
+++ b/lib/neutron_thirdparty/midonet
@@ -0,0 +1,64 @@
+# MidoNet
+# -------
+
+# This file implements functions required to configure MidoNet as the third-party
+# system used with devstack's Neutron. To include this file, specify the following
+# variables in localrc:
+#
+# * enable_service midonet
+#
+
+# MidoNet devstack destination dir
+MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
+
+# MidoNet client repo
+MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
+MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
+MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient
+
+# MidoNet OpenStack repo
+MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git}
+MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master}
+MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack
+MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py}
+
+
+MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log}
+MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log}
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_midonet() {
+ :
+}
+
+function init_midonet() {
+
+ # Initialize DB. Evaluate the output of setup_midonet_topology.py to set
+ # env variables for provider router ID and metadata router ID
+ eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices`
+ die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set."
+ die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set."
+
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id
+ iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id
+}
+
+function install_midonet() {
+ git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
+ git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH
+ export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH
+}
+
+function start_midonet() {
+ :
+}
+
+function stop_midonet() {
+ :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/nova b/lib/nova
index 617fb08..7a5ff1f 100644
--- a/lib/nova
+++ b/lib/nova
@@ -568,11 +568,11 @@
iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
iniset $NOVA_CELLS_CONF cells enable True
+ iniset $NOVA_CELLS_CONF cells cell_type compute
iniset $NOVA_CELLS_CONF cells name child
- iniset $NOVA_CONF DEFAULT scheduler_topic cells
- iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI
iniset $NOVA_CONF cells enable True
+ iniset $NOVA_CONF cells cell_type api
iniset $NOVA_CONF cells name region
if is_service_enabled n-api-meta; then
@@ -714,8 +714,8 @@
if is_service_enabled n-cell; then
NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
- screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
- screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
+ screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
+ screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
fi
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
diff --git a/lib/oslo b/lib/oslo
new file mode 100644
index 0000000..1eb13db
--- /dev/null
+++ b/lib/oslo
@@ -0,0 +1,42 @@
+# lib/oslo
+#
+# Functions to install oslo libraries from git
+#
+# We need this to handle the fact that projects would like to use
+# pre-released versions of oslo libraries.
+
+# Dependencies:
+# ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_oslo
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+OSLOCFG_DIR=$DEST/oslo.config
+OSLOMSG_DIR=$DEST/oslo.messaging
+
+# Entry Points
+# ------------
+
+# install_oslo() - Collect source and prepare
+function install_oslo() {
+ git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
+ setup_develop $OSLOCFG_DIR
+
+ git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
+ setup_develop $OSLOMSG_DIR
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/tempest b/lib/tempest
index 3831c28..aaa7281 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -42,6 +42,7 @@
TEMPEST_DIR=$DEST/tempest
TEMPEST_CONF_DIR=$TEMPEST_DIR/etc
TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf
+TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest}
NOVA_SOURCE_DIR=$DEST/nova
@@ -195,6 +196,12 @@
fi
fi
+ # Oslo
+ iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH
+ mkdir -p $TEMPEST_STATE_PATH
+ iniset $TEMPEST_CONF DEFAULT use_stderr False
+ iniset $TEMPEST_CONF DEFAULT log_file tempest.log
+
# Timeouts
iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT
iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT
@@ -275,11 +282,15 @@
iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2"
fi
+ # Dashboard
+ iniset $TEMPEST_CONF dashboard dashboard_url "http://$SERVICE_HOST/"
+ iniset $TEMPEST_CONF dashboard login_url "http://$SERVICE_HOST/auth/login/"
+
# cli
iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR
# service_available
- for service in nova cinder glance neutron swift heat ; do
+ for service in nova cinder glance neutron swift heat horizon ; do
if is_service_enabled $service ; then
iniset $TEMPEST_CONF service_available $service "True"
else
diff --git a/stack.sh b/stack.sh
index c4d414e..5ba60d2 100755
--- a/stack.sh
+++ b/stack.sh
@@ -300,6 +300,8 @@
# Source project function libraries
source $TOP_DIR/lib/apache
source $TOP_DIR/lib/tls
+source $TOP_DIR/lib/infra
+source $TOP_DIR/lib/oslo
source $TOP_DIR/lib/horizon
source $TOP_DIR/lib/keystone
source $TOP_DIR/lib/glance
@@ -314,8 +316,6 @@
# Set the destination directories for other OpenStack projects
OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-PBR_DIR=$DEST/pbr
-
# Interactive Configuration
# -------------------------
@@ -588,6 +588,8 @@
install_neutron_agent_packages
fi
+# Unbreak the giant mess that is the current state of setuptools
+unfubar_setuptools
# System-specific preconfigure
# ============================
@@ -658,9 +660,11 @@
echo_summary "Installing OpenStack project source"
-# Install pbr
-git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
-setup_develop $PBR_DIR
+# Install required infra support libraries
+install_infra
+
+# Install oslo libraries that have graduated
+install_oslo
# Install clients libraries
install_keystoneclient
@@ -1050,6 +1054,11 @@
iniset $NOVA_CONF baremetal driver $BM_DRIVER
iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER
iniset $NOVA_CONF baremetal tftp_root /tftpboot
+ if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then
+ BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf
+ sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF"
+ iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF"
+ fi
# Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
@@ -1295,15 +1304,16 @@
create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
# otherwise user can manually add it later by calling nova-baremetal-manage
- # otherwise user can manually add it later by calling nova-baremetal-manage
[[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node
- # NOTE: we do this here to ensure that our copy of dnsmasq is running
- sudo pkill dnsmasq || true
- sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
- --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
- --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \
- ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS}
+ if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "False" ]]; then
+ # NOTE: we do this here to ensure that our copy of dnsmasq is running
+ sudo pkill dnsmasq || true
+ sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
+ --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
+ --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \
+ ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS}
+ fi
# ensure callback daemon is running
sudo pkill nova-baremetal-deploy-helper || true
screen_it baremetal "nova-baremetal-deploy-helper"
diff --git a/stackrc b/stackrc
index 50774e4..74a399c 100644
--- a/stackrc
+++ b/stackrc
@@ -116,6 +116,14 @@
OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
+# oslo.config
+OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
+OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
+
+# oslo.messaging
+OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
+OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master}
+
# pbr drives the setuptools configs
PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
PBR_BRANCH=${PBR_BRANCH:-master}
@@ -128,6 +136,10 @@
NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master}
+# consolidated openstack requirements
+REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
+REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
+
# storage service
SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
SWIFT_BRANCH=${SWIFT_BRANCH:-master}
@@ -215,8 +227,8 @@
# ``IMAGE_URLS`` to be set directly in ``localrc``.
case "$VIRT_DRIVER" in
openvz)
- DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
- IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
+ IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
libvirt)
case "$LIBVIRT_TYPE" in
lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
@@ -228,7 +240,8 @@
esac
;;
vsphere)
- IMAGE_URLS="";;
+ DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
+ IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
*) # Default to Cirros with kernel, ramdisk and disk image
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
diff --git a/tools/xen/functions b/tools/xen/functions
index 7146858..7616a5f 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -53,18 +53,18 @@
find $1 -path '*/xapi.d/plugins' -type d -print
}
-function install_xapi_plugins_from_zipball {
+function install_xapi_plugins_from {
local XAPI_PLUGIN_DIR
local EXTRACTED_FILES
local EXTRACTED_PLUGINS_DIR
+ EXTRACTED_FILES="$1"
+
XAPI_PLUGIN_DIR=$(xapi_plugin_location)
- EXTRACTED_FILES=$(extract_remote_zipball $1)
EXTRACTED_PLUGINS_DIR=$(find_xapi_plugins_dir $EXTRACTED_FILES)
cp -pr $EXTRACTED_PLUGINS_DIR/* $XAPI_PLUGIN_DIR
- rm -rf $EXTRACTED_FILES
chmod a+x ${XAPI_PLUGIN_DIR}*
}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 6eb3013..e762f6d 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -63,12 +63,25 @@
## Nova plugins
NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)}
-install_xapi_plugins_from_zipball $NOVA_ZIPBALL_URL
+EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL")
+install_xapi_plugins_from "$EXTRACTED_NOVA"
+
+LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print)
+if [ -n "$LOGROT_SCRIPT" ]; then
+ mkdir -p "/var/log/xen/guest"
+ cp "$LOGROT_SCRIPT" /root/consolelogrotate
+ chmod +x /root/consolelogrotate
+ echo "* * * * * /root/consolelogrotate" | crontab
+fi
+
+rm -rf "$EXTRACTED_NOVA"
## Install the netwrap xapi plugin to support agent control of dom0 networking
if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then
NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)}
- install_xapi_plugins_from_zipball $NEUTRON_ZIPBALL_URL
+ EXTRACTED_NEUTRON=$(extract_remote_zipball "$NEUTRON_ZIPBALL_URL")
+ install_xapi_plugins_from "$EXTRACTED_NEUTRON"
+ rm -rf "$EXTRACTED_NEUTRON"
fi
create_directory_for_kernels
@@ -138,9 +151,7 @@
# Destroy any instances that were launched
for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
echo "Shutting down nova instance $uuid"
- xe vm-unpause uuid=$uuid || true
- xe vm-shutdown uuid=$uuid || true
- xe vm-destroy uuid=$uuid
+ xe vm-uninstall uuid=$uuid force=true
done
# Destroy orphaned vdis
diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi
index 381e671..909ce32 100755
--- a/tools/xen/scripts/manage-vdi
+++ b/tools/xen/scripts/manage-vdi
@@ -41,7 +41,17 @@
echo "Failed to find mapping"
exit -1
fi
- echo "/dev/mapper/${mapping}"
+
+ local device="/dev/mapper/${mapping}"
+ for (( i = 0; i < 5; i++ )) ; do
+ if [ -b $device ] ; then
+ echo $device
+ return
+ fi
+ sleep 1
+ done
+ echo "ERROR: timed out waiting for dev-mapper"
+ exit 1
else
echo "/dev/$dev$part"
fi