Merge "Hide output when configuring apt retry"
diff --git a/.mailmap b/.mailmap
index 29be995..43e4e6e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -4,3 +4,4 @@
Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
+Sean Dague <sean.dague@samsung.com> <sdague@linux.vnet.ibm.com> <sean@dague.net>
diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index 04bff48..0000000
--- a/AUTHORS
+++ /dev/null
@@ -1,51 +0,0 @@
-Aaron Lee <aaron.lee@rackspace.com>
-Aaron Rosen <arosen@nicira.com>
-Adam Gandelman <adamg@canonical.com>
-Akihiro MOTOKI <motoki@da.jp.nec.com>
-Andrew Laski <andrew.laski@rackspace.com>
-Andy Smith <github@anarkystic.com>
-Anthony Young <sleepsonthefloor@gmail.com>
-Armando Migliaccio <armando.migliaccio@eu.citrix.com>
-Brad Hall <brad@nicira.com>
-Chmouel Boudjnah <chmouel@chmouel.com>
-Dan Prince <dprince@redhat.com>
-Dean Troyer <dtroyer@gmail.com>
-Devin Carlen <devin.carlen@gmail.com>
-Doug hellmann <doug.hellmann@dreamhost.com>
-Eddie Hebert <edhebert@gmail.com>
-Edgar Magana <emagana@gmail.com>
-Eoghan Glynn <eglynn@redhat.com>
-Eric Windisch <ewindisch@cloudscaling.com>
-Gabriel Hurley <gabriel@strikeawe.com>
-Gary Kotton <gkotton@redhat.com>
-Hengqing Hu <hudayou@hotmail.com>
-Hua ZHANG <zhuadl@cn.ibm.com>
-Isaku Yamahata <yamahata@private.email.ne.jp>
-Jake Dahn <admin@jakedahn.com>
-James E. Blair <james.blair@rackspace.com>
-Jason Cannavale <jason.cannavale@rackspace.com>
-Jay Pipes <jaypipes@gmail.com>
-Jesse Andrews <anotherjesse@gmail.com>
-Jian Wen <jian.wen@canonical.com>
-Joe Gordon <jogo@cloudscaling.com>
-Johannes Erdfelt <johannes.erdfelt@rackspace.com>
-John Postlethwait <john.postlethwait@nebula.com>
-Josh Kearney <josh@jk0.org>
-Justin Shepherd <galstrom21@gmail.com>
-Ken Pepple <ken.pepple@rabbityard.com>
-Kiall Mac Innes <kiall@managedit.ie>
-Matt Joyce <matt.joyce@cloudscaling.com>
-Osamu Habuka <xiu.yushen@gmail.com>
-Russell Bryant <rbryant@redhat.com>
-Scott Moser <smoser@ubuntu.com>
-Sean Dague <sdague@linux.vnet.ibm.com>
-Sumit Naiksatam <sumitnaiksatam@gmail.com>
-Thierry Carrez <thierry@openstack.org>
-Todd Willey <xtoddx@gmail.com>
-Tres Henry <tres@treshenry.net>
-Vincent Untz <vuntz@suse.com>
-Vishvananda Ishaya <vishvananda@gmail.com>
-Yun Mao <yunmao@gmail.com>
-Yong Sheng Gong <gongysh@cn.ibm.com>
-Zhongyue Luo <lzyeval@gmail.com>
-Zhenguo Niu <niu.zglinux@gmail.com>
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
new file mode 100644
index 0000000..0891d02
--- /dev/null
+++ b/MAINTAINERS.rst
@@ -0,0 +1,47 @@
+MAINTAINERS
+===========
+
+
+Overview
+--------
+
+The following is a list of people known to have interests in
+particular areas or sub-systems of devstack.
+
+It is a rather general guide intended to help seed the initial
+reviewers list of a change. A +1 on a review from someone identified
+as being a maintainer of its affected area is a very positive flag to
+the core team for the veracity of the change.
+
+The ``devstack-core`` group can still be added to all reviews.
+
+
+Format
+~~~~~~
+
+The format of the file is the name of the maintainer and their
+gerrit-registered email.
+
+
+Maintainers
+-----------
+
+.. contents:: :local:
+
+Fedora/CentOS/RHEL
+~~~~~~~~~~~~~~~~~~
+
+ * Ian Wienand <iwienand@redhat.com>
+
+Xen
+~~~
+
+Cinder
+~~~~~~
+
+Neutron
+~~~~~~~
+
+tempest
+~~~~~~~
+
diff --git a/clean.sh b/clean.sh
index e2374e7..db1a1e4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -84,7 +84,6 @@
fi
# Clean projects
-cleanup_oslo
cleanup_cinder
cleanup_glance
cleanup_keystone
diff --git a/files/apts/neutron b/files/apts/neutron
index 648716a..92e0a06 100644
--- a/files/apts/neutron
+++ b/files/apts/neutron
@@ -23,3 +23,4 @@
qpidd # NOPRIME
sqlite3
vlan
+radvd # NOPRIME
diff --git a/files/apts/q-l3 b/files/apts/q-l3
new file mode 100644
index 0000000..b98b628
--- /dev/null
+++ b/files/apts/q-l3
@@ -0,0 +1,2 @@
+conntrackd
+keepalived
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index d4841b1..f3bafc7 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -20,6 +20,7 @@
sqlite3
sudo
vlan
+radvd # NOPRIME
# FIXME: qpid is not part of openSUSE, those names are tentative
python-qpid # NOPRIME
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 15ed973..f02c02b 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -22,3 +22,4 @@
qpid-cpp-server # NOPRIME
sqlite
sudo
+radvd # NOPRIME
diff --git a/files/rpms/q-l3 b/files/rpms/q-l3
new file mode 100644
index 0000000..a7a190c
--- /dev/null
+++ b/files/rpms/q-l3
@@ -0,0 +1,2 @@
+conntrack-tools
+keepalived
diff --git a/functions b/functions
index 1fa6346..76f7047 100644
--- a/functions
+++ b/functions
@@ -85,7 +85,7 @@
# OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
if [[ "$image_url" =~ 'openvz' ]]; then
image_name="${image_fname%.tar.gz}"
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name" --is-public=True --container-format ami --disk-format ami < "${image}"
+ openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
return
fi
@@ -196,7 +196,7 @@
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
+ openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
return
fi
@@ -212,11 +212,11 @@
# directly from volume.
force_vm_mode="--property vm_mode=xen"
fi
- glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name "$image_name" --is-public=True \
+ openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ "$image_name" --public \
--container-format=ovf --disk-format=vhd \
$force_vm_mode < "${image}"
return
@@ -227,11 +227,11 @@
# Setting metadata, so PV mode is used.
if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
image_name="${image_fname%.xen-raw.tgz}"
- glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name "$image_name" --is-public=True \
+ openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ "$image_name" --public \
--container-format=tgz --disk-format=raw \
--property vm_mode=xen < "${image}"
return
@@ -289,6 +289,15 @@
disk_format=iso
container_format=bare
;;
+ *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz)
+ local extension="${image_fname#*.}"
+ image_name=$(basename "$image" ".$extension")
+ disk_format=vhd
+ container_format=bare
+ if [ "${image_fname##*.}" == "gz" ]; then
+ unpack=zcat
+ fi
+ ;;
*) echo "Do not know what to do with $image_fname"; false;;
esac
@@ -298,9 +307,9 @@
if [ "$container_format" = "bare" ]; then
if [ "$unpack" = "zcat" ]; then
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name" $img_property --is-public True --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+ openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
else
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name" $img_property --is-public True --container-format=$container_format --disk-format $disk_format < "${image}"
+ openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -308,12 +317,12 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name-kernel" $img_property --is-public True --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$image_name-ramdisk" $img_property --is-public True --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
fi
- glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${image_name%.img}" $img_property --is-public True --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
+ openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
fi
}
diff --git a/functions-common b/functions-common
index 5284056..c096664 100644
--- a/functions-common
+++ b/functions-common
@@ -466,6 +466,13 @@
[[ "$(uname -m)" == "$1" ]]
}
+# Quick check for a rackspace host; n.b. rackspace provided images
+# have these Xen tools installed but a custom image may not.
+function is_rackspace {
+ [ -f /usr/bin/xenstore-ls ] && \
+ sudo /usr/bin/xenstore-ls vm-data | grep -q "Rackspace"
+}
+
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
# is_fedora
diff --git a/lib/baremetal b/lib/baremetal
index adcbe4c..e3b2b9a 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -229,19 +229,19 @@
fi
# load them into glance
- BM_DEPLOY_KERNEL_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $BM_DEPLOY_KERNEL \
- --is-public True --disk-format=aki \
+ BM_DEPLOY_KERNEL_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $BM_DEPLOY_KERNEL \
+ --public --disk-format=aki \
< $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2)
- BM_DEPLOY_RAMDISK_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $BM_DEPLOY_RAMDISK \
- --is-public True --disk-format=ari \
+ BM_DEPLOY_RAMDISK_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $BM_DEPLOY_RAMDISK \
+ --public --disk-format=ari \
< $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2)
}
@@ -284,19 +284,19 @@
OUT_RAMDISK=${out##*,}
# load them into glance
- KERNEL_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $image_name-kernel \
- --is-public True --disk-format=aki \
+ KERNEL_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $image_name-kernel \
+ --public --disk-format=aki \
< $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
- RAMDISK_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $image_name-initrd \
- --is-public True --disk-format=ari \
+ RAMDISK_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $image_name-initrd \
+ --public --disk-format=ari \
< $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
}
@@ -362,18 +362,18 @@
if [ "$CONTAINER_FORMAT" = "bare" ]; then
extract_and_upload_k_and_r_from_image $token $IMAGE
elif [ "$CONTAINER_FORMAT" = "ami" ]; then
- KERNEL_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name "$IMAGE_NAME-kernel" --is-public True \
+ KERNEL_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ "$IMAGE_NAME-kernel" --public \
--container-format aki \
--disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
- RAMDISK_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name "$IMAGE_NAME-ramdisk" --is-public True \
+ RAMDISK_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ "$IMAGE_NAME-ramdisk" --public \
--container-format ari \
--disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
else
@@ -381,11 +381,11 @@
return
fi
- glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name "${IMAGE_NAME%.img}" --is-public True \
+ openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ "${IMAGE_NAME%.img}" --public \
--container-format $CONTAINER_FORMAT \
--disk-format $DISK_FORMAT \
${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
diff --git a/lib/ceilometer b/lib/ceilometer
index 54d95c5..7bd1421 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -223,16 +223,19 @@
# start_ceilometer() - Start running processes, including screen
function start_ceilometer {
+ screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+
+ # Start the compute agent last to allow time for the collector to
+ # fully wake up and connect to the message bus. See bug #1355809
if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'"
fi
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF"
fi
- screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
- screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
- screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
- screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
# only die on API if it was actually intended to be turned on
if is_service_enabled ceilometer-api; then
diff --git a/lib/glance b/lib/glance
index 92577d9..78e5e88 100644
--- a/lib/glance
+++ b/lib/glance
@@ -96,6 +96,10 @@
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry
+ if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+ iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
+ fi
+ iniset_rpc_backend glance $GLANCE_REGISTRY_CONF DEFAULT
cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
diff --git a/lib/heat b/lib/heat
index 510b683..b6124c0 100644
--- a/lib/heat
+++ b/lib/heat
@@ -190,6 +190,7 @@
# stop_heat() - Stop running processes
function stop_heat {
# Kill the screen windows
+ local serv
for serv in h-eng h-api h-api-cfn h-api-cw; do
screen_stop $serv
done
@@ -213,26 +214,26 @@
# create_heat_accounts() - Set up common required heat accounts
function create_heat_accounts {
# migrated from files/keystone_data.sh
- SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
- ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+ local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
- HEAT_USER=$(get_or_create_user "heat" \
- "$SERVICE_PASSWORD" $SERVICE_TENANT)
- get_or_add_user_role $ADMIN_ROLE $HEAT_USER $SERVICE_TENANT
+ local heat_user=$(get_or_create_user "heat" \
+ "$SERVICE_PASSWORD" $service_tenant)
+ get_or_add_user_role $admin_role $heat_user $service_tenant
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- HEAT_SERVICE=$(get_or_create_service "heat" \
+ local heat_service=$(get_or_create_service "heat" \
"orchestration" "Heat Orchestration Service")
- get_or_create_endpoint $HEAT_SERVICE \
+ get_or_create_endpoint $heat_service \
"$REGION_NAME" \
"$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
"$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
"$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
- HEAT_CFN_SERVICE=$(get_or_create_service "heat-cfn" \
+ local heat_cfn_service=$(get_or_create_service "heat-cfn" \
"cloudformation" "Heat CloudFormation Service")
- get_or_create_endpoint $HEAT_CFN_SERVICE \
+ get_or_create_endpoint $heat_cfn_service \
"$REGION_NAME" \
"$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
"$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
@@ -247,36 +248,36 @@
# heat_stack_owner role is given to users who create Heat stacks,
# it's the default role used by heat to delegate to the heat service
# user (for performing deferred operations via trusts), see heat.conf
- HEAT_OWNER_ROLE=$(get_or_create_role "heat_stack_owner")
+ local heat_owner_role=$(get_or_create_role "heat_stack_owner")
# Give the role to the demo and admin users so they can create stacks
# in either of the projects created by devstack
- get_or_add_user_role $HEAT_OWNER_ROLE demo demo
- get_or_add_user_role $HEAT_OWNER_ROLE admin demo
- get_or_add_user_role $HEAT_OWNER_ROLE admin admin
+ get_or_add_user_role $heat_owner_role demo demo
+ get_or_add_user_role $heat_owner_role admin demo
+ get_or_add_user_role $heat_owner_role admin admin
iniset $HEAT_CONF DEFAULT deferred_auth_method trusts
fi
if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
# Note we have to pass token/endpoint here because the current endpoint and
# version negotiation in OSC means just --os-identity-api-version=3 won't work
- KS_ENDPOINT_V3="$KEYSTONE_SERVICE_URI/v3"
+ local ks_endpoint_v3="$KEYSTONE_SERVICE_URI/v3"
- D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ D_ID=$(openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
--os-identity-api-version=3 domain list | grep ' heat ' | get_field 1)
if [[ -z "$D_ID" ]]; then
- D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ D_ID=$(openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
--os-identity-api-version=3 domain create heat \
--description "Owns users and projects created by heat" \
| grep ' id ' | get_field 2)
iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
- openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
--os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
--domain $D_ID heat_domain_admin \
--description "Manages users and projects created by heat"
- openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+ openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
--os-identity-api-version=3 role add \
--user heat_domain_admin --domain ${D_ID} admin
iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
diff --git a/lib/ironic b/lib/ironic
index b56abcb..b05edcf 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -102,6 +102,12 @@
# install_ironic() - Collect source and prepare
function install_ironic {
+ # make sure all needed service were enabled
+ for srv in nova glance key; do
+ if ! is_service_enabled "$srv"; then
+ die $LINENO "$srv should be enabled for Ironic."
+ fi
+ done
git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
setup_develop $IRONIC_DIR
}
@@ -119,11 +125,33 @@
sudo rm -rf $IRONIC_AUTH_CACHE_DIR
}
-# configure_ironic() - Set config files, create data dirs, etc
-function configure_ironic {
+# configure_ironic_dirs() - Create all directories required by Ironic and
+# associated services.
+function configure_ironic_dirs {
if [[ ! -d $IRONIC_CONF_DIR ]]; then
sudo mkdir -p $IRONIC_CONF_DIR
fi
+ sudo mkdir -p $IRONIC_DATA_DIR
+ sudo mkdir -p $IRONIC_STATE_PATH
+ sudo mkdir -p $IRONIC_TFTPBOOT_DIR
+ sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+ sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
+ if is_ubuntu; then
+ local pxebin=/usr/lib/syslinux/pxelinux.0
+ elif is_fedora; then
+ local pxebin=/usr/share/syslinux/pxelinux.0
+ fi
+ if [ ! -f $pxebin ]; then
+ die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+ fi
+
+ cp $pxebin $IRONIC_TFTPBOOT_DIR
+ mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+}
+
+# configure_ironic() - Set config files, create data dirs, etc
+function configure_ironic {
+ configure_ironic_dirs
sudo chown $STACK_USER $IRONIC_CONF_DIR
# Copy over ironic configuration file and configure common parameters.
@@ -147,10 +175,6 @@
if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
setup_colorized_logging $IRONIC_CONF_FILE DEFAULT
fi
-
- if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then
- configure_ironic_auxiliary
- fi
}
# configure_ironic_api() - Is used by configure_ironic(). Performs
@@ -175,15 +199,15 @@
function configure_ironic_conductor {
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
- IRONIC_ROOTWRAP=$(get_rootwrap_location ironic)
- ROOTWRAP_ISUDOER_CMD="$IRONIC_ROOTWRAP $IRONIC_CONF_DIR/rootwrap.conf *"
+ local ironic_rootwrap=$(get_rootwrap_location ironic)
+ local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for ironic
- TEMPFILE=`mktemp`
- echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_ISUDOER_CMD" >$TEMPFILE
- chmod 0440 $TEMPFILE
- sudo chown root:root $TEMPFILE
- sudo mv $TEMPFILE /etc/sudoers.d/ironic-rootwrap
+ local tempfile=`mktemp`
+ echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
+ chmod 0440 $tempfile
+ sudo chown root:root $tempfile
+ sudo mv $tempfile /etc/sudoers.d/ironic-rootwrap
iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
iniset $IRONIC_CONF_FILE DEFAULT enabled_drivers $IRONIC_ENABLED_DRIVERS
@@ -214,22 +238,22 @@
# service ironic admin # if enabled
function create_ironic_accounts {
- SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
- ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+ local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
# Ironic
if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
# Get ironic user if exists
- IRONIC_USER=$(get_or_create_user "ironic" \
- "$SERVICE_PASSWORD" $SERVICE_TENANT)
- get_or_add_user_role $ADMIN_ROLE $IRONIC_USER $SERVICE_TENANT
+ local ironic_user=$(get_or_create_user "ironic" \
+ "$SERVICE_PASSWORD" $service_tenant)
+ get_or_add_user_role $admin_role $ironic_user $service_tenant
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- IRONIC_SERVICE=$(get_or_create_service "ironic" \
+ local ironic_service=$(get_or_create_service "ironic" \
"baremetal" "Ironic baremetal provisioning service")
- get_or_create_endpoint $IRONIC_SERVICE \
+ get_or_create_endpoint $ironic_service \
"$REGION_NAME" \
"$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
"$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
@@ -294,56 +318,55 @@
return 1
}
-function configure_ironic_dirs {
- sudo mkdir -p $IRONIC_DATA_DIR
- sudo mkdir -p $IRONIC_STATE_PATH
- sudo mkdir -p $IRONIC_TFTPBOOT_DIR
- sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
- sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
- if is_ubuntu; then
- PXEBIN=/usr/lib/syslinux/pxelinux.0
- elif is_fedora; then
- PXEBIN=/usr/share/syslinux/pxelinux.0
- fi
- if [ ! -f $PXEBIN ]; then
- die $LINENO "pxelinux.0 (from SYSLINUX) not found."
- fi
+function create_ovs_taps {
+ local ironic_net_id=$(neutron net-list | grep private | get_field 1)
- cp $PXEBIN $IRONIC_TFTPBOOT_DIR
- mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+ # Work around: No netns exists on host until a Neutron port is created. We
+ # need to create one in Neutron to know what netns to tap into prior to the
+ # first node booting.
+ local port_id=$(neutron port-create private | grep " id " | get_field 2)
+
+ # intentional sleep to make sure the tag has been set to port
+ sleep 10
+
+ local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
+ local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+
+ # make sure veth pair is not existing, otherwise delete its links
+ sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
+ sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
+ # create veth pair for future interconnection between br-int and brbm
+ sudo ip link add brbm-tap1 type veth peer name ovs-tap1
+ sudo ip link set dev brbm-tap1 up
+ sudo ip link set dev ovs-tap1 up
+
+ sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$tag_id
+ sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
+
+ # Remove the port needed only for workaround.
+ neutron port-delete $port_id
}
function create_bridge_and_vms {
# Call libvirt setup scripts in a new shell to ensure any new group membership
sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
- LOG_ARG="$IRONIC_VM_LOG_DIR"
+ local log_arg="$IRONIC_VM_LOG_DIR"
else
- LOG_ARG=""
+ local log_arg=""
fi
sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \
$IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \
- $LOG_ARG" >> $IRONIC_VM_MACS_CSV_FILE
+ $log_arg" >> $IRONIC_VM_MACS_CSV_FILE
+ create_ovs_taps
}
function enroll_vms {
-
- CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
- IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1)
+ local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
local idx=0
-
- # work around; need to know what netns neutron uses for private network.
- # Without knowing how to interconnect the networks, PXE won't work properly
- # for fake baremetal instances. The network should be configured prior all
- # the instances operation. If we don't do this, the first port creation
- # only happens in the middle of fake baremetal instance's spawning by nova,
- # so we'll end up with unbootable fake baremetal VM due to broken PXE.
- PORT_ID=$(neutron port-create private | grep " id " | get_field 2)
-
while read MAC; do
-
- NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \
+ local node_id=$(ironic node-create --chassis_uuid $chassis_id --driver pxe_ssh \
-i pxe_deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID \
-i pxe_deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID \
-i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \
@@ -357,40 +380,19 @@
-p cpu_arch=x86_64 \
| grep " uuid " | get_field 2)
- ironic port-create --address $MAC --node_uuid $NODE_ID
+ ironic port-create --address $MAC --node_uuid $node_id
idx=$((idx+1))
-
done < $IRONIC_VM_MACS_CSV_FILE
# create the nova flavor
- adjusted_disk=$(($IRONIC_VM_SPECS_DISK - $IRONIC_VM_EPHEMERAL_DISK))
+ local adjusted_disk=$(($IRONIC_VM_SPECS_DISK - $IRONIC_VM_EPHEMERAL_DISK))
nova flavor-create --ephemeral $IRONIC_VM_EPHEMERAL_DISK baremetal auto $IRONIC_VM_SPECS_RAM $adjusted_disk $IRONIC_VM_SPECS_CPU
# TODO(lucasagomes): Remove the 'baremetal:deploy_kernel_id'
# and 'baremetal:deploy_ramdisk_id' parameters
# from the flavor after the completion of
# https://blueprints.launchpad.net/ironic/+spec/add-node-instance-info
nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$IRONIC_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$IRONIC_DEPLOY_RAMDISK_ID"
-
- # intentional sleep to make sure the tag has been set to port
- sleep 10
- TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
- TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
-
- # make sure veth pair is not existing, otherwise delete its links
- sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
- sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
- # create veth pair for future interconnection between br-int and brbm
- sudo ip link add brbm-tap1 type veth peer name ovs-tap1
- sudo ip link set dev brbm-tap1 up
- sudo ip link set dev ovs-tap1 up
-
- sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID
- sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
-
- # Remove the port needed only for workaround. For additional info read the
- # comment at the beginning of this function
- neutron port-delete $PORT_ID
}
function configure_iptables {
@@ -404,11 +406,11 @@
function configure_tftpd {
if is_ubuntu; then
- PXEBIN=/usr/lib/syslinux/pxelinux.0
+ local pxebin=/usr/lib/syslinux/pxelinux.0
elif is_fedora; then
- PXEBIN=/usr/share/syslinux/pxelinux.0
+ local pxebin=/usr/share/syslinux/pxelinux.0
fi
- if [ ! -f $PXEBIN ]; then
+ if [ ! -f $pxebin ]; then
die $LINENO "pxelinux.0 (from SYSLINUX) not found."
fi
@@ -441,33 +443,36 @@
}
function ironic_ssh_check {
- local KEY_FILE=$1
- local FLOATING_IP=$2
- local PORT=$3
- local DEFAULT_INSTANCE_USER=$4
- local ACTIVE_TIMEOUT=$5
- if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
+ local key_file=$1
+ local floating_ip=$2
+ local port=$3
+ local default_instance_user=$4
+ local active_timeout=$5
+ if ! timeout $active_timeout sh -c "while ! ssh -p $port -o StrictHostKeyChecking=no -i $key_file ${default_instance_user}@$floating_ip echo success; do sleep 1; done"; then
die $LINENO "server didn't become ssh-able!"
fi
}
function configure_ironic_auxiliary {
- configure_ironic_dirs
configure_ironic_ssh_keypair
ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
}
# build deploy kernel+ramdisk, then upload them to glance
-# this function sets IRONIC_DEPLOY_KERNEL_ID and IRONIC_DEPLOY_RAMDISK_ID
+# this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID``
function upload_baremetal_ironic_deploy {
- token=$1
+ declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID
+ echo_summary "Creating and uploading baremetal images for ironic"
+
+ # install diskimage-builder
+ git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then
- IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
- IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
+ local IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
+ local IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
else
- IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
- IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
+ local IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
+ local IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
fi
if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then
@@ -485,60 +490,41 @@
fi
fi
+ local token=$(keystone token-get | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO token "Keystone fail to get token"
+
# load them into glance
- IRONIC_DEPLOY_KERNEL_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
- --is-public True --disk-format=aki \
+ IRONIC_DEPLOY_KERNEL_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
+ --public --disk-format=aki \
< $IRONIC_DEPLOY_KERNEL_PATH | grep ' id ' | get_field 2)
- IRONIC_DEPLOY_RAMDISK_ID=$(glance \
- --os-auth-token $token \
- --os-image-url http://$GLANCE_HOSTPORT \
- image-create \
- --name $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
- --is-public True --disk-format=ari \
+ IRONIC_DEPLOY_RAMDISK_ID=$(openstack \
+ --os-token $token \
+ --os-url http://$GLANCE_HOSTPORT \
+ image create \
+ $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
+ --public --disk-format=ari \
< $IRONIC_DEPLOY_RAMDISK_PATH | grep ' id ' | get_field 2)
}
function prepare_baremetal_basic_ops {
-
- # install diskimage-builder
- git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
-
- # make sure all needed service were enabled
- for srv in nova glance key neutron; do
- if ! is_service_enabled "$srv"; then
- die $LINENO "$srv should be enabled for ironic tests"
- fi
- done
-
- TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
- die_if_not_set $LINENO TOKEN "Keystone fail to get token"
-
- echo_summary "Creating and uploading baremetal images for ironic"
-
- # build and upload separate deploy kernel & ramdisk
- upload_baremetal_ironic_deploy $TOKEN
-
+ upload_baremetal_ironic_deploy
create_bridge_and_vms
enroll_vms
configure_tftpd
configure_iptables
-
- # restart nova-compute to ensure its resource tracking is up to
- # date with newly enrolled nodes
- stop_nova_compute || true
- start_nova_compute
+ configure_ironic_auxiliary
}
function cleanup_baremetal_basic_ops {
rm -f $IRONIC_VM_MACS_CSV_FILE
if [ -f $IRONIC_KEY_FILE ]; then
- KEY=`cat $IRONIC_KEY_FILE.pub`
+ local key=$(cat $IRONIC_KEY_FILE.pub)
# remove public key from authorized_keys
- grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
+ grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
fi
sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH
diff --git a/lib/marconi b/lib/marconi
index 063ed3d..e05518c 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -42,7 +42,7 @@
MARCONI_BIN_DIR=$(get_python_exec_prefix)
# Set up database backend
-MARCONI_BACKEND=${MARCONI_BACKEND:-sqlite}
+MARCONI_BACKEND=${MARCONI_BACKEND:-mongodb}
# Set Marconi repository
@@ -77,6 +77,13 @@
function cleanup_marconi {
if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then
die $LINENO "Mongo DB did not start"
+ else
+ full_version=$(mongo marconi --eval 'db.dropDatabase();')
+ mongo_version=`echo $full_version | cut -d' ' -f4`
+ required_mongo_version='2.2'
+ if [[ $mongo_version < $required_mongo_version ]]; then
+ die $LINENO "Marconi needs Mongo DB version >= 2.2 to run."
+ fi
fi
}
diff --git a/lib/neutron b/lib/neutron
index 98636b4..f703bec 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -120,6 +120,21 @@
## Provider Network Information
PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"}
+# Use flat providernet for public network
+#
+# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network
+# for external interface of neutron l3-agent. In that case,
+# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value
+# used for the network. In case of openvswitch agent, you should
+# add the corresponding entry to your OVS_BRIDGE_MAPPINGS.
+#
+# eg.
+# Q_USE_PROVIDERNET_FOR_PUBLIC=True
+# PUBLIC_PHYSICAL_NETWORK=public
+# OVS_BRIDGE_MAPPINGS=public:br-ex
+Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False}
+PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
+
# The next two variables are configured by plugin
# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/*
#
@@ -446,7 +461,11 @@
fi
neutron router-interface-add $ROUTER_ID $SUBNET_ID
# Create an external network, and a subnet. Configure the external network as router gw
- EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
+ EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+ else
+ EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+ fi
die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP"
@@ -455,6 +474,9 @@
if is_service_enabled q-l3; then
# logic is specific to using the l3-agent for l3
if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+ # Disable in-band as we are going to use local port
+ # to communicate with VMs
+ sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE other_config:disable-in-band=true
CIDR_LEN=${FLOATING_RANGE#*/}
sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
sudo ip link set $PUBLIC_BRIDGE up
@@ -492,6 +514,10 @@
# install_neutron_agent_packages() - Collect source and prepare
function install_neutron_agent_packages {
+ # radvd doesn't come with the OS. Install it if the l3 service is enabled.
+ if is_service_enabled q-l3; then
+ install_package radvd
+ fi
# install packages that are specific to plugin agent(s)
if is_service_enabled q-agt q-dhcp q-l3; then
neutron_plugin_install_agent_packages
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index d270015..42dd57f 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -9,7 +9,7 @@
# Select either 'gre', 'vxlan', or '(gre vxlan)'
Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"}
# This has to be set here since the agent will set this in the config file
-if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "local" ]]; then
+if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then
Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre)
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 616a236..8375bb6 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -71,7 +71,11 @@
}
function _neutron_ovs_base_configure_l3_agent {
- iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ""
+ else
+ iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+ fi
neutron-ovs-cleanup
# --no-wait causes a race condition if $PUBLIC_BRIDGE is not up when ip addr flush is called
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index d920ba6..2478c47 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -7,7 +7,7 @@
AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent"
-VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin"
+VPN_PLUGIN=${VPN_PLUGIN:-"neutron.services.vpn.plugin.VPNDriverPlugin"}
IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"}
function neutron_vpn_install_agent_packages {
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 5802ebf..f4eb82d 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -105,9 +105,6 @@
if [[ "$NSX_PASSWORD" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
fi
- if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT
- fi
if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
fi
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c068c74..344ef04 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -56,6 +56,9 @@
# install_nova_hypervisor() - Install external components
function install_nova_hypervisor {
+ if ! is_service_enabled neutron; then
+ die $LINENO "Neutron should be enabled for usage of the Ironic Nova driver."
+ fi
install_libvirt
}
diff --git a/lib/oslo b/lib/oslo
index 421fbce..025815c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -39,10 +39,6 @@
# install_oslo() - Collect source and prepare
function install_oslo {
- # TODO(sdague): remove this once we get to Icehouse, this just makes
- # for a smoother transition of existing users.
- cleanup_oslo
-
git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
setup_install $CLIFF_DIR
@@ -74,17 +70,6 @@
setup_install $TASKFLOW_DIR
}
-# cleanup_oslo() - purge possibly old versions of oslo
-function cleanup_oslo {
- # this means we've got an old oslo installed, lets get rid of it
- if ! python -c 'import oslo.config' 2>/dev/null; then
- echo "Found old oslo.config... removing to ensure consistency"
- local PIP_CMD=$(get_pip_command)
- pip_install oslo.config
- sudo $PIP_CMD uninstall -y oslo.config
- fi
-}
-
# Restore xtrace
$XTRACE
diff --git a/lib/swift b/lib/swift
index 84304d3..d8e8f23 100644
--- a/lib/swift
+++ b/lib/swift
@@ -115,6 +115,10 @@
CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
+# Enable tempurl feature
+SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
+SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY}
+
# Tell Tempest this project is present
TEMPEST_SERVICES+=,swift
@@ -679,6 +683,10 @@
screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
done
fi
+
+ if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+ swift_configure_tempurls
+ fi
}
# stop_swift() - Stop running processes (non-screen)
@@ -701,6 +709,13 @@
pkill -f swift-
}
+function swift_configure_tempurls {
+ OS_USERNAME=swift \
+ OS_TENANT_NAME=$SERVICE_TENANT_NAME \
+ OS_PASSWORD=$SERVICE_PASSWORD \
+ swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
+}
+
# Restore xtrace
$XTRACE
diff --git a/lib/tempest b/lib/tempest
index 91ede0d..d6d6020 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -52,7 +52,11 @@
NOVA_SOURCE_DIR=$DEST/nova
BUILD_INTERVAL=1
-BUILD_TIMEOUT=196
+
+# This is the timeout that tempest will wait for a VM to change state,
+# spawn, delete, etc.
+# The default is set to 196 seconds.
+BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
@@ -371,6 +375,15 @@
# Baremetal
if [ "$VIRT_DRIVER" = "ironic" ] ; then
iniset $TEMPEST_CONFIG baremetal driver_enabled True
+ iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
+ iniset $TEMPEST_CONFIG compute-feature-enabled console_output False
+ iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
+ iniset $TEMPEST_CONFIG compute-feature-enabled live_migration False
+ iniset $TEMPEST_CONFIG compute-feature-enabled pause False
+ iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
+ iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+ iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+ iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
fi
# service_available
diff --git a/stack.sh b/stack.sh
index b807ab5..7a7655c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -142,7 +142,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -216,10 +216,18 @@
echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null
fi
+# upstream Rackspace centos7 images have an issue where cloud-init is
+# installed via pip because there were not official packages when the
+# image was created (fix in the works). Remove all pip packages
+# before we do anything else
+if [[ $DISTRO = "rhel7" && is_rackspace ]]; then
+ (sudo pip freeze | xargs sudo pip uninstall -y) || true
+fi
+
# Some distros need to add repos beyond the defaults provided by the vendor
# to pick up required packages.
-if [[ is_fedora && $DISTRO =~ (rhel) ]]; then
+if [[ is_fedora && $DISTRO == "rhel6" ]]; then
# Installing Open vSwitch on RHEL requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-icehouse/rdo-release-icehouse.rpm"}
RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-icehouse"}
@@ -228,10 +236,13 @@
yum_install $RHEL6_RDO_REPO_RPM || \
die $LINENO "Error installing RDO repo, cannot continue"
fi
+fi
+
+if [[ is_fedora && ( $DISTRO == "rhel6" || $DISTRO == "rhel7" ) ]]; then
# RHEL requires EPEL for many Open Stack dependencies
- if [[ $DISTRO =~ (rhel7) ]]; then
+ if [[ $DISTRO == "rhel7" ]]; then
EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-0.2.noarch.rpm"}
- else
+ elif [[ $DISTRO == "rhel6" ]]; then
EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
fi
if ! sudo yum repolist enabled epel | grep -q 'epel'; then
@@ -242,13 +253,12 @@
# ... and also optional to be enabled
is_package_installed yum-utils || install_package yum-utils
- if [[ $DISTRO =~ (rhel7) ]]; then
+ if [[ $DISTRO == "rhel7" ]]; then
OPTIONAL_REPO=rhel-7-server-optional-rpms
- else
+ elif [[ $DISTRO == "rhel6" ]]; then
OPTIONAL_REPO=rhel-6-server-optional-rpms
fi
sudo yum-config-manager --enable ${OPTIONAL_REPO}
-
fi
# Filesystem setup
@@ -472,6 +482,10 @@
# ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
+
+ if [[ -z "$SWIFT_TEMPURL_KEY" ]] && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+ read_password SWIFT_TEMPURL_KEY "ENTER A KEY FOR SWIFT TEMPURLS."
+ fi
fi
@@ -518,7 +532,7 @@
echo $@ >&3
}
-if [[ is_fedora && $DISTRO =~ (rhel) ]]; then
+if [[ is_fedora && $DISTRO == "rhel6" ]]; then
# poor old python2.6 doesn't have argparse by default, which
# outfilter.py uses
is_package_installed python-argparse || install_package python-argparse
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 8b1e4df..50fb31c 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -93,24 +93,6 @@
sudo chmod +r $dir/*
fi
-# Ubuntu 12.04
-# ------------
-
-# We can regularly get kernel crashes on the 12.04 default kernel, so attempt
-# to install a new kernel
-if [[ ${DISTRO} =~ (precise) ]]; then
- # Finally, because we suspect the Precise kernel is problematic, install a new kernel
- UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL)
- if [[ $UPGRADE_KERNEL == "True" ]]; then
- if [[ ! `uname -r` =~ (^3\.11) ]]; then
- apt_get install linux-generic-lts-saucy
- echo "Installing Saucy LTS kernel, please reboot before proceeding"
- exit 1
- fi
- fi
-fi
-
-
if is_fedora; then
# Disable selinux to avoid configuring to allow Apache access
# to Horizon files (LP#1175444)