Merge "Set up downloaded images for testing"
diff --git a/AUTHORS b/AUTHORS
index 84a565e..34e0474 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -10,6 +10,7 @@
Jason Cannavale <jason.cannavale@rackspace.com>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
+Johannes Erdfelt <johannes.erdfelt@rackspace.com>
Justin Shepherd <galstrom21@gmail.com>
Kiall Mac Innes <kiall@managedit.ie>
Scott Moser <smoser@ubuntu.com>
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 2f7a17b..e569196 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -20,6 +20,18 @@
source ./openrc
popd
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+
+# Instance type to create
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
# Find a machine image to boot
IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
@@ -27,10 +39,15 @@
SECGROUP=euca_secgroup
# Add a secgroup
-euca-add-group -d description $SECGROUP
+if ! euca-describe-group | grep -q $SECGROUP; then
+ euca-add-group -d "$SECGROUP description" $SECGROUP
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-group | grep -q $SECGROUP; do sleep 1; done"; then
+ echo "Security group not created"
+ exit 1
+ fi
+fi
# Launch it
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
# Assure it has booted within a reasonable time
@@ -42,15 +59,13 @@
# Allocate floating address
FLOATING_IP=`euca-allocate-address | cut -f2`
-# Release floating address
+# Associate floating address
euca-associate-address -i $INSTANCE $FLOATING_IP
-
# Authorize pinging
euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
echo "Couldn't ping server with floating ip"
exit 1
@@ -65,6 +80,12 @@
# Release floating address
euca-disassociate-address $FLOATING_IP
+# Wait just a tick for everything above to complete so release doesn't fail
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
+ echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
+ exit 1
+fi
+
# Release floating address
euca-release-address $FLOATING_IP
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 135c8c1..f7b5240 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -24,6 +24,30 @@
source ./openrc
popd
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+
+# Instance type to create
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
+# Boot this image, use first AMi image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
+# Security group name
+SECGROUP=${SECGROUP:-test_secgroup}
+
+# Default floating IP pool name
+DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova}
+
+# Additional floating IP pool and range
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+
# Get a token for clients that don't support service catalog
# ==========================================================
@@ -46,28 +70,32 @@
nova image-list
# But we recommend using glance directly
-glance -A $TOKEN index
+glance -f -A $TOKEN index
-# Let's grab the id of the first AMI image to launch
-IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
+# Grab the id of the image to launch
+IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1`
# Security Groups
# ---------------
-SECGROUP=test_secgroup
# List of secgroups:
nova secgroup-list
# Create a secgroup
-nova secgroup-create $SECGROUP "test_secgroup description"
+if ! nova secgroup-list | grep -q $SECGROUP; then
+ nova secgroup-create $SECGROUP "$SECGROUP description"
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+ echo "Security group not created"
+ exit 1
+ fi
+fi
-# determine flavor
-# ----------------
+# determinine instance type
+# -------------------------
-# List of flavors:
+# List of instance types:
nova flavor-list
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
@@ -76,7 +104,7 @@
NAME="myserver"
-nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
# Testing
# =======
@@ -88,23 +116,14 @@
# Waiting for boot
# ----------------
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
-
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
-
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
-
# check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
echo "server didn't become active!"
exit 1
fi
# get the IP of the server
-IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
+IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
@@ -124,20 +143,29 @@
# Security Groups & Floating IPs
# ------------------------------
-# allow icmp traffic (ping)
-nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
+ # allow icmp traffic (ping)
+ nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+ if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
+ echo "Security group rule not created"
+ exit 1
+ fi
+fi
# List rules for a secgroup
nova secgroup-list-rules $SECGROUP
-# allocate a floating ip
-nova floating-ip-create
+# allocate a floating ip from default pool
+FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | cut -d '|' -f2`
-# store floating address
-FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
+# list floating addresses
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
+ echo "Floating IP not allocated"
+ exit 1
+fi
# add floating ip to our server
-nova add-floating-ip $NAME $FLOATING_IP
+nova add-floating-ip $VM_UUID $FLOATING_IP
# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
@@ -145,6 +173,15 @@
exit 1
fi
+# Allocate an IP from second floating pool
+TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | cut -d '|' -f2`
+
+# list floating addresses
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
+ echo "Floating IP not allocated"
+ exit 1
+fi
+
# dis-allow icmp traffic (ping)
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
@@ -161,12 +198,14 @@
# de-allocate the floating ip
nova floating-ip-delete $FLOATING_IP
+# Delete second floating IP
+nova floating-ip-delete $TEST_FLOATING_IP
+
# shutdown the server
-nova delete $NAME
+nova delete $VM_UUID
# Delete a secgroup
nova secgroup-delete $SECGROUP
# FIXME: validate shutdown within 5 seconds
# (nova show $NAME returns 1 or status != ACTIVE)?
-
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 6ea9a51..c2288de 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -19,6 +19,21 @@
source ./openrc
popd
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
+
+# Instance type to create
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
+# Boot this image, use first AMi image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
# Get a token for clients that don't support service catalog
# ==========================================================
@@ -41,10 +56,10 @@
nova image-list
# But we recommend using glance directly
-glance -A $TOKEN index
+glance -f -A $TOKEN index
-# Let's grab the id of the first AMI image to launch
-IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1`
+# Grab the id of the image to launch
+IMAGE=`glance -f -A $TOKEN index | egrep $DEFAULT_IMAGE_NAME | head -1 | cut -d" " -f1`
# determinine instance type
# -------------------------
@@ -52,7 +67,6 @@
# List of instance types:
nova flavor-list
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
if [[ -z "$INSTANCE_TYPE" ]]; then
# grab the first flavor in the list to launch if default doesn't exist
@@ -73,24 +87,14 @@
# Waiting for boot
# ----------------
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
-
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
-
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
-
# check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $BOOT_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
echo "server didn't become active!"
exit 1
fi
# get the IP of the server
IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
-#VM_UUID=`nova list | grep $NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
# for single node deployments, we can ping private ips
MULTI_HOST=${MULTI_HOST:-0}
diff --git a/files/apts/n-vnc b/files/apts/n-novnc
similarity index 100%
rename from files/apts/n-vnc
rename to files/apts/n-novnc
diff --git a/files/apts/n-vol b/files/apts/n-vol
index edaee2c..5db06ea 100644
--- a/files/apts/n-vol
+++ b/files/apts/n-vol
@@ -1,3 +1,2 @@
-iscsitarget # NOPRIME
-iscsitarget-dkms # NOPRIME
+tgt
lvm2
diff --git a/files/glance-api-paste.ini b/files/glance-api-paste.ini
new file mode 100644
index 0000000..b8832ad
--- /dev/null
+++ b/files/glance-api-paste.ini
@@ -0,0 +1,44 @@
+[pipeline:glance-api]
+#pipeline = versionnegotiation context apiv1app
+# NOTE: use the following pipeline for keystone
+pipeline = versionnegotiation authtoken auth-context apiv1app
+
+# To enable Image Cache Management API replace pipeline with below:
+# pipeline = versionnegotiation context imagecache apiv1app
+# NOTE: use the following pipeline for keystone auth (with caching)
+# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
+
+[app:apiv1app]
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.api.v1.router:API
+
+[filter:versionnegotiation]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
+
+[filter:cache]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache:CacheFilter
+
+[filter:cachemanage]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
+
+[filter:context]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
+
+[filter:authtoken]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+service_host = %KEYSTONE_SERVICE_HOST%
+service_port = %KEYSTONE_SERVICE_PORT%
+service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
+auth_host = %KEYSTONE_AUTH_HOST%
+auth_port = %KEYSTONE_AUTH_PORT%
+auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
+auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
+admin_token = %SERVICE_TOKEN%
+
+[filter:auth-context]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/glance-api.conf b/files/glance-api.conf
index 6c670b5..b4ba098 100644
--- a/files/glance-api.conf
+++ b/files/glance-api.conf
@@ -137,48 +137,3 @@
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
-
-[pipeline:glance-api]
-#pipeline = versionnegotiation context apiv1app
-# NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken auth-context apiv1app
-
-# To enable Image Cache Management API replace pipeline with below:
-# pipeline = versionnegotiation context imagecache apiv1app
-# NOTE: use the following pipeline for keystone auth (with caching)
-# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
-
-[app:apiv1app]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.api.v1.router:API
-
-[filter:versionnegotiation]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
-
-[filter:cache]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache:CacheFilter
-
-[filter:cachemanage]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
-
-[filter:context]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_protocol = http
-service_host = 127.0.0.1
-service_port = 5000
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
-auth_uri = http://127.0.0.1:5000/
-admin_token = %SERVICE_TOKEN%
-
-[filter:auth-context]
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/glance-registry-paste.ini b/files/glance-registry-paste.ini
new file mode 100644
index 0000000..f4130ec
--- /dev/null
+++ b/files/glance-registry-paste.ini
@@ -0,0 +1,29 @@
+[pipeline:glance-registry]
+#pipeline = context registryapp
+# NOTE: use the following pipeline for keystone
+pipeline = authtoken auth-context context registryapp
+
+[app:registryapp]
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.registry.api.v1:API
+
+[filter:context]
+context_class = glance.registry.context.RequestContext
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
+
+[filter:authtoken]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+service_host = %KEYSTONE_SERVICE_HOST%
+service_port = %KEYSTONE_SERVICE_PORT%
+service_protocol = %KEYSTONE_SERVICE_PROTOCOL%
+auth_host = %KEYSTONE_AUTH_HOST%
+auth_port = %KEYSTONE_AUTH_PORT%
+auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
+auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
+admin_token = %SERVICE_TOKEN%
+
+[filter:auth-context]
+context_class = glance.registry.context.RequestContext
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
index e732e86..2c32745 100644
--- a/files/glance-registry.conf
+++ b/files/glance-registry.conf
@@ -42,33 +42,3 @@
# If a `limit` query param is not provided in an api request, it will
# default to `limit_param_default`
limit_param_default = 25
-
-[pipeline:glance-registry]
-#pipeline = context registryapp
-# NOTE: use the following pipeline for keystone
-pipeline = authtoken auth-context context registryapp
-
-[app:registryapp]
-paste.app_factory = glance.common.wsgi:app_factory
-glance.app_factory = glance.registry.api.v1:API
-
-[filter:context]
-context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = glance.common.context:ContextMiddleware
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_protocol = http
-service_host = 127.0.0.1
-service_port = 5000
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
-auth_uri = http://127.0.0.1:5000/
-admin_token = %SERVICE_TOKEN%
-
-[filter:auth-context]
-context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.wsgi:filter_factory
-glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/keystone.conf b/files/keystone.conf
index a646513..6d0fd7e 100644
--- a/files/keystone.conf
+++ b/files/keystone.conf
@@ -64,7 +64,7 @@
cert_required = True
#Role that allows to perform admin operations.
-keystone-admin-role = Admin
+keystone-admin-role = admin
#Role that allows to perform service admin operations.
keystone-service-admin-role = KeystoneServiceAdmin
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index a25ba20..77f6b93 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -10,19 +10,19 @@
$BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD%
# Roles
-$BIN_DIR/keystone-manage role add Admin
+$BIN_DIR/keystone-manage role add admin
$BIN_DIR/keystone-manage role add Member
$BIN_DIR/keystone-manage role add KeystoneAdmin
$BIN_DIR/keystone-manage role add KeystoneServiceAdmin
$BIN_DIR/keystone-manage role add sysadmin
$BIN_DIR/keystone-manage role add netadmin
-$BIN_DIR/keystone-manage role grant Admin admin admin
+$BIN_DIR/keystone-manage role grant admin admin admin
$BIN_DIR/keystone-manage role grant Member demo demo
$BIN_DIR/keystone-manage role grant sysadmin demo demo
$BIN_DIR/keystone-manage role grant netadmin demo demo
$BIN_DIR/keystone-manage role grant Member demo invisible_to_admin
-$BIN_DIR/keystone-manage role grant Admin admin demo
-$BIN_DIR/keystone-manage role grant Admin admin
+$BIN_DIR/keystone-manage role grant admin admin demo
+$BIN_DIR/keystone-manage role grant admin admin
$BIN_DIR/keystone-manage role grant KeystoneAdmin admin
$BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin
@@ -39,7 +39,7 @@
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%SERVICE_HOST%:5000/v2.0 http://%SERVICE_HOST%:35357/v2.0 http://%SERVICE_HOST%:5000/v2.0 1 1
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 %KEYSTONE_AUTH_PROTOCOL%://%KEYSTONE_AUTH_HOST%:%KEYSTONE_AUTH_PORT%/v2.0 %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/v2.0 1 1
if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1
fi
diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini
deleted file mode 100644
index 7f27fdc..0000000
--- a/files/nova-api-paste.ini
+++ /dev/null
@@ -1,138 +0,0 @@
-############
-# Metadata #
-############
-[composite:metadata]
-use = egg:Paste#urlmap
-/: metaversions
-/latest: meta
-/2007-01-19: meta
-/2007-03-01: meta
-/2007-08-29: meta
-/2007-10-10: meta
-/2007-12-15: meta
-/2008-02-01: meta
-/2008-09-01: meta
-/2009-04-04: meta
-
-[pipeline:metaversions]
-pipeline = ec2faultwrap logrequest metaverapp
-
-[pipeline:meta]
-pipeline = ec2faultwrap logrequest metaapp
-
-[app:metaverapp]
-paste.app_factory = nova.api.metadata.handler:Versions.factory
-
-[app:metaapp]
-paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
-
-#######
-# EC2 #
-#######
-
-[composite:ec2]
-use = egg:Paste#urlmap
-/services/Cloud: ec2cloud
-/services/Admin: ec2admin
-
-[pipeline:ec2cloud]
-pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
-
-[pipeline:ec2admin]
-pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
-
-[pipeline:ec2metadata]
-pipeline = ec2faultwrap logrequest ec2md
-
-[pipeline:ec2versions]
-pipeline = ec2faultwrap logrequest ec2ver
-
-[filter:ec2faultwrap]
-paste.filter_factory = nova.api.ec2:FaultWrapper.factory
-
-[filter:logrequest]
-paste.filter_factory = nova.api.ec2:RequestLogging.factory
-
-[filter:ec2lockout]
-paste.filter_factory = nova.api.ec2:Lockout.factory
-
-[filter:totoken]
-paste.filter_factory = keystone.middleware.ec2_token:EC2Token.factory
-
-[filter:ec2noauth]
-paste.filter_factory = nova.api.ec2:NoAuth.factory
-
-[filter:authenticate]
-paste.filter_factory = nova.api.ec2:Authenticate.factory
-
-[filter:cloudrequest]
-controller = nova.api.ec2.cloud.CloudController
-paste.filter_factory = nova.api.ec2:Requestify.factory
-
-[filter:adminrequest]
-controller = nova.api.ec2.admin.AdminController
-paste.filter_factory = nova.api.ec2:Requestify.factory
-
-[filter:authorizer]
-paste.filter_factory = nova.api.ec2:Authorizer.factory
-
-[app:ec2executor]
-paste.app_factory = nova.api.ec2:Executor.factory
-
-#############
-# Openstack #
-#############
-
-[composite:osapi]
-use = call:nova.api.openstack.v2.urlmap:urlmap_factory
-/: osversions
-/v1.1: openstack_api_v2
-/v2: openstack_api_v2
-
-[pipeline:openstack_api_v2]
-pipeline = faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2
-
-[filter:faultwrap]
-paste.filter_factory = nova.api.openstack.v2:FaultWrapper.factory
-
-[filter:auth]
-paste.filter_factory = nova.api.openstack.v2.auth:AuthMiddleware.factory
-
-[filter:noauth]
-paste.filter_factory = nova.api.openstack.v2.auth:NoAuthMiddleware.factory
-
-[filter:ratelimit]
-paste.filter_factory = nova.api.openstack.v2.limits:RateLimitingMiddleware.factory
-
-[filter:serialize]
-paste.filter_factory = nova.api.openstack.wsgi:LazySerializationMiddleware.factory
-
-[filter:extensions]
-paste.filter_factory = nova.api.openstack.v2.extensions:ExtensionMiddleware.factory
-
-[app:osapi_app_v2]
-paste.app_factory = nova.api.openstack.v2:APIRouter.factory
-
-[pipeline:osversions]
-pipeline = faultwrap osversionapp
-
-[app:osversionapp]
-paste.app_factory = nova.api.openstack.v2.versions:Versions.factory
-
-##########
-# Shared #
-##########
-
-[filter:keystonecontext]
-paste.filter_factory = keystone.middleware.nova_keystone_context:NovaKeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_protocol = http
-service_host = 127.0.0.1
-service_port = 5000
-auth_host = 127.0.0.1
-auth_port = 35357
-auth_protocol = http
-auth_uri = http://127.0.0.1:5000/
-admin_token = %SERVICE_TOKEN%
diff --git a/files/pips/horizon b/files/pips/horizon
index 893efb7..44bf6db 100644
--- a/files/pips/horizon
+++ b/files/pips/horizon
@@ -1,4 +1,2 @@
django-nose-selenium
pycrypto==2.3
-
--e git+https://github.com/jacobian/openstack.compute.git#egg=openstack
diff --git a/files/sudo/nova b/files/sudo/nova
index 0a79c21..bde1519 100644
--- a/files/sudo/nova
+++ b/files/sudo/nova
@@ -41,6 +41,7 @@
/usr/bin/socat, \
/sbin/parted, \
/usr/sbin/dnsmasq, \
+ /usr/bin/ovs-vsctl, \
/usr/sbin/arping
%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS
diff --git a/openrc b/openrc
index 4395975..0f327d2 100644
--- a/openrc
+++ b/openrc
@@ -53,13 +53,13 @@
# export NOVACLIENT_DEBUG=1
# Max time till the vm is bootable
-export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+export BOOT_TIMEOUT=${BOOT_TIMEOUT:-30}
# Max time to wait while vm goes from build to active state
-export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
+export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
# Max time from run instance command until it is running
export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
# Max time to wait for proper IP association and dis-association.
-export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
+export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-15}
diff --git a/stack.sh b/stack.sh
index 420fc25..d6a7869 100755
--- a/stack.sh
+++ b/stack.sh
@@ -66,6 +66,12 @@
# We try to have sensible defaults, so you should be able to run ``./stack.sh``
# in most cases.
#
+# We support HTTP and HTTPS proxy servers via the usual environment variables
+# http_proxy and https_proxy. They can be set in localrc if necessary or
+# on the command line::
+#
+# http_proxy=http://proxy.example.com:3128/ ./stack.sh
+#
# We source our settings from ``stackrc``. This file is distributed with devstack
# and contains locations for what repositories to use. If you want to use other
# repositories and branches, you can add your own settings with another file called
@@ -85,8 +91,9 @@
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[ "$(id -u)" = "0" ] && sudo="env"
- $sudo DEBIAN_FRONTEND=noninteractive apt-get \
- --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+ $sudo DEBIAN_FRONTEND=noninteractive \
+ http_proxy=$http_proxy https_proxy=$https_proxy \
+ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
# Check to see if we are already running a stack.sh
@@ -186,7 +193,7 @@
Q_HOST=${Q_HOST:-localhost}
# Specify which services to launch. These generally correspond to screen tabs
-ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx}
+ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit,openstackx}
# Name of the lvm volume group to use/create for iscsi volumes
VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
@@ -283,6 +290,11 @@
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100}
VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
+# Test floating pool and range are used for testing. They are defined
+# here until the admin APIs can replace nova-manage
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
+
# Multi-host is a mode where each compute node runs its own network node. This
# allows network operations and routing for a VM to occur on the server that is
# running the VM - removing a SPOF and bandwidth bottleneck.
@@ -390,8 +402,39 @@
# Horizon currently truncates usernames and passwords at 20 characters
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
-LOGFILE=${LOGFILE:-"$PWD/stack.sh.$$.log"}
-(
+# Set Keystone interface configuration
+KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
+KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
+KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
+KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
+KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
+
+# Log files
+# ---------
+
+# Set up logging for stack.sh
+# Set LOGFILE to turn on logging
+# We append '.xxxxxxxx' to the given name to maintain history
+# where xxxxxxxx is a representation of the date the file was created
+if [[ -n "$LOGFILE" ]]; then
+ # First clean up old log files. Use the user-specified LOGFILE
+ # as the template to search for, appending '.*' to match the date
+ # we added on earlier runs.
+ LOGDAYS=${LOGDAYS:-7}
+ LOGDIR=$(dirname "$LOGFILE")
+ LOGNAME=$(basename "$LOGFILE")
+ find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \;
+
+ TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+ LOGFILE=$LOGFILE.$(date "+$TIMESTAMP_FORMAT")
+ # Redirect stdout/stderr to tee to write the log file
+ exec 1> >( tee "${LOGFILE}" ) 2>&1
+ echo "stack.sh log $LOGFILE"
+ # Specified logfile name always links to the most recent log
+ ln -sf $LOGFILE $LOGDIR/$LOGNAME
+fi
+
# So that errors don't compound we exit on any errors so you see only the
# first error that occurred.
trap failed ERR
@@ -477,7 +520,10 @@
function pip_install {
[[ "$OFFLINE" = "True" ]] && return
- sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors $@
+ sudo PIP_DOWNLOAD_CACHE=/var/cache/pip \
+ HTTP_PROXY=$http_proxy \
+ HTTPS_PROXY=$https_proxy \
+ pip install --use-mirrors $@
}
# install apt requirements
@@ -553,7 +599,7 @@
# image catalog service
git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
fi
-if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then
# a websockets/html5 or flash powered VNC console for vm instances
git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH
fi
@@ -737,31 +783,70 @@
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance;'
+ function glance_config {
+ sudo sed -e "
+ s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
+ s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
+ s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
+ s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g;
+ s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
+ s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
+ s,%SQL_CONN%,$BASE_SQL_CONN/glance,g;
+ s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
+ s,%DEST%,$DEST,g;
+ s,%SYSLOG%,$SYSLOG,g;
+ " -i $1
+ }
+
# Copy over our glance configurations and update them
- GLANCE_CONF=$GLANCE_DIR/etc/glance-registry.conf
- cp $FILES/glance-registry.conf $GLANCE_CONF
- sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/glance,g" -i $GLANCE_CONF
- sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_CONF
- sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_CONF
- sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_CONF
+ GLANCE_REGISTRY_CONF=$GLANCE_DIR/etc/glance-registry.conf
+ cp $FILES/glance-registry.conf $GLANCE_REGISTRY_CONF
+ glance_config $GLANCE_REGISTRY_CONF
+
+ if [[ -e $FILES/glance-registry-paste.ini ]]; then
+ GLANCE_REGISTRY_PASTE_INI=$GLANCE_DIR/etc/glance-registry-paste.ini
+ cp $FILES/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
+ glance_config $GLANCE_REGISTRY_PASTE_INI
+ # During the transition for Glance to the split config files
+ # we cat them together to handle both pre- and post-merge
+ cat $GLANCE_REGISTRY_PASTE_INI >>$GLANCE_REGISTRY_CONF
+ fi
GLANCE_API_CONF=$GLANCE_DIR/etc/glance-api.conf
cp $FILES/glance-api.conf $GLANCE_API_CONF
- sudo sed -e "s,%DEST%,$DEST,g" -i $GLANCE_API_CONF
- sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $GLANCE_API_CONF
- sudo sed -e "s,%SYSLOG%,$SYSLOG,g" -i $GLANCE_API_CONF
+ glance_config $GLANCE_API_CONF
+
+ if [[ -e $FILES/glance-api-paste.ini ]]; then
+ GLANCE_API_PASTE_INI=$GLANCE_DIR/etc/glance-api-paste.ini
+ cp $FILES/glance-api-paste.ini $GLANCE_API_PASTE_INI
+ glance_config $GLANCE_API_PASTE_INI
+ # During the transition for Glance to the split config files
+ # we cat them together to handle both pre- and post-merge
+ cat $GLANCE_API_PASTE_INI >>$GLANCE_API_CONF
+ fi
fi
# Nova
# ----
-
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
# We are going to use a sample http middleware configuration based on the
# one from the keystone project to launch nova. This paste config adds
- # the configuration required for nova to validate keystone tokens. We add
- # our own service token to the configuration.
- cp $FILES/nova-api-paste.ini $NOVA_DIR/bin
+ # the configuration required for nova to validate keystone tokens.
+
+ # First we add a some extra data to the default paste config from nova
+ cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_DIR/bin/nova-api-paste.ini
+
+ # Then we add our own service token to the configuration
sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
+
+ # Finally, we change the pipelines in nova to use keystone
+ function replace_pipeline() {
+ sed "/\[pipeline:$1\]/,/\[/s/^pipeline = .*/pipeline = $2/" -i $NOVA_DIR/bin/nova-api-paste.ini
+ }
+ replace_pipeline "ec2cloud" "ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer validator ec2executor"
+ replace_pipeline "ec2admin" "ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor"
+ replace_pipeline "openstack_compute_api_v2" "faultwrap authtoken keystonecontext ratelimit osapi_compute_app_v2"
+ replace_pipeline "openstack_volume_api_v1" "faultwrap authtoken keystonecontext ratelimit osapi_volume_app_v1"
fi
# Helper to clean iptables rules
@@ -844,7 +929,7 @@
# Destroy old instances
instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
- if [ ! $instances = "" ]; then
+ if [ ! "$instances" = "" ]; then
echo $instances | xargs -n1 virsh destroy || true
echo $instances | xargs -n1 virsh undefine || true
fi
@@ -931,7 +1016,7 @@
# We need a special version of bin/swift which understand the
# OpenStack api 2.0, we download it until this is getting
# integrated in swift.
- sudo curl -s -o/usr/local/bin/swift \
+ sudo https_proxy=$https_proxy curl -s -o/usr/local/bin/swift \
'https://review.openstack.org/gitweb?p=openstack/swift.git;a=blob_plain;f=bin/swift;hb=48bfda6e2fdf3886c98bd15649887d54b9a2574e'
else
swift_auth_server=tempauth
@@ -975,7 +1060,7 @@
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
tee /etc/rsyslog.d/10-swift.conf
sudo restart rsyslog
-
+
# We create two helper scripts :
#
# - swift-remakerings
@@ -1013,7 +1098,8 @@
#
# By default, the backing file is 2G in size, and is stored in /opt/stack.
- apt_get install iscsitarget-dkms iscsitarget
+ # install the package
+ apt_get install tgt
if ! sudo vgs $VOLUME_GROUP; then
VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
@@ -1040,9 +1126,10 @@
done
fi
- # Configure iscsitarget
- sudo sed 's/ISCSITARGET_ENABLE=false/ISCSITARGET_ENABLE=true/' -i /etc/default/iscsitarget
- sudo /etc/init.d/iscsitarget restart
+ # tgt in oneiric doesn't restart properly if tgtd isn't running
+ # do it in two steps
+ sudo stop tgt || true
+ sudo start tgt
fi
function add_nova_flag {
@@ -1064,7 +1151,7 @@
add_nova_flag "--libvirt_vif_type=ethernet"
add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
- add_nova_flag "--quantum-use-dhcp"
+ add_nova_flag "--quantum_use_dhcp"
fi
else
add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
@@ -1072,6 +1159,8 @@
if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
add_nova_flag "--volume_group=$VOLUME_GROUP"
add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x"
+ # oneiric no longer supports ietadm
+ add_nova_flag "--iscsi_helper=tgtadm"
fi
add_nova_flag "--my_ip=$HOST_IP"
add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
@@ -1080,14 +1169,23 @@
add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then
- add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions"
- add_nova_flag "--osapi_extension=extensions.admin.Admin"
+ add_nova_flag "--osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions"
+ add_nova_flag "--osapi_compute_extension=extensions.admin.Admin"
fi
-if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then
- VNCPROXY_URL=${VNCPROXY_URL:-"http://$SERVICE_HOST:6080"}
- add_nova_flag "--vncproxy_url=$VNCPROXY_URL"
- add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/"
+if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ add_nova_flag "--novncproxy_base_url=$NOVNCPROXY_URL"
fi
+if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then
+ XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+ add_nova_flag "--xvpvncproxy_base_url=$XVPVNCPROXY_URL"
+fi
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
+else
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+fi
+add_nova_flag "--vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS"
add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini"
add_nova_flag "--image_service=nova.image.glance.GlanceImageService"
add_nova_flag "--ec2_dmz_host=$EC2_DMZ_HOST"
@@ -1121,11 +1219,15 @@
add_nova_flag "--xenapi_connection_url=http://169.254.0.1"
add_nova_flag "--xenapi_connection_username=root"
add_nova_flag "--xenapi_connection_password=$XENAPI_PASSWORD"
- add_nova_flag "--flat_injected=False"
+ add_nova_flag "--noflat_injected"
add_nova_flag "--flat_interface=eth1"
- add_nova_flag "--flat_network_bridge=xenbr1"
+ add_nova_flag "--flat_network_bridge=xapi1"
add_nova_flag "--public_interface=eth3"
+ # Need to avoid crash due to new firewall support
+ XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
+ add_nova_flag "--firewall_driver=$XEN_FIREWALL_DRIVER"
else
+ add_nova_flag "--connection_type=libvirt"
add_nova_flag "--flat_network_bridge=$FLAT_NETWORK_BRIDGE"
if [ -n "$FLAT_INTERFACE" ]; then
add_nova_flag "--flat_interface=$FLAT_INTERFACE"
@@ -1165,9 +1267,17 @@
# keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``.
KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh
cp $FILES/keystone_data.sh $KEYSTONE_DATA
- sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_DATA
- sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA
- sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA
+ sudo sed -e "
+ s,%KEYSTONE_AUTH_HOST%,$KEYSTONE_AUTH_HOST,g;
+ s,%KEYSTONE_AUTH_PORT%,$KEYSTONE_AUTH_PORT,g;
+ s,%KEYSTONE_AUTH_PROTOCOL%,$KEYSTONE_AUTH_PROTOCOL,g;
+ s,%KEYSTONE_SERVICE_HOST%,$KEYSTONE_SERVICE_HOST,g;
+ s,%KEYSTONE_SERVICE_PORT%,$KEYSTONE_SERVICE_PORT,g;
+ s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
+ s,%SERVICE_HOST%,$SERVICE_HOST,g;
+ s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g;
+ s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g;
+ " -i $KEYSTONE_DATA
# initialize keystone with default users/endpoints
ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA
@@ -1200,7 +1310,7 @@
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
- sleep 1
+ sleep 1.5
screen -S stack -p $1 -X stuff "$2$NL"
fi
fi
@@ -1221,7 +1331,7 @@
if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=etc/glance-api.conf"
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
echo "g-api did not start"
exit 1
fi
@@ -1231,7 +1341,7 @@
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d"
echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT; do sleep 1; done"; then
echo "keystone did not start"
exit 1
fi
@@ -1241,7 +1351,7 @@
if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
echo "Waiting for nova-api to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
echo "nova-api did not start"
exit 1
fi
@@ -1255,6 +1365,7 @@
apt_get install openvswitch-switch openvswitch-datapath-dkms
# Create database for the plugin/agent
if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS ovs_quantum;'
mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
else
echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
@@ -1275,10 +1386,13 @@
sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
+
+ # Start up the quantum <-> openvswitch agent
+ QUANTUM_OVS_CONFIG_FILE=$QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
+ sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/ovs_quantum/g" $QUANTUM_OVS_CONFIG_FILE
+ screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_OVS_CONFIG_FILE -v"
fi
- # Start up the quantum <-> openvswitch agent
- screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v"
fi
# If we're using Quantum (i.e. q-svc is enabled), network creation has to
@@ -1292,6 +1406,9 @@
else
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create $FLOATING_RANGE
+
+ # create a second pool
+ $NOVA_DIR/bin/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
fi
fi
@@ -1304,8 +1421,14 @@
screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
screen_it n-net "cd $NOVA_DIR && $NOVA_DIR/bin/nova-network"
screen_it n-sch "cd $NOVA_DIR && $NOVA_DIR/bin/nova-scheduler"
-if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then
- screen_it n-vnc "cd $NOVNC_DIR && ./utils/nova-wsproxy.py --flagfile $NOVA_DIR/bin/nova.conf --web . 6080"
+if [[ "$ENABLED_SERVICES" =~ "n-novnc" ]]; then
+ screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --flagfile $NOVA_DIR/bin/nova.conf --web ."
+fi
+if [[ "$ENABLED_SERVICES" =~ "n-xvnc" ]]; then
+ screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --flagfile $NOVA_DIR/bin/nova.conf"
+fi
+if [[ "$ENABLED_SERVICES" =~ "n-cauth" ]]; then
+ screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
fi
if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/apache2/error.log"
@@ -1403,13 +1526,8 @@
# Fin
# ===
+set +o xtrace
-) 2>&1 | tee "${LOGFILE}"
-
-# Check that the left side of the above pipe succeeded
-for ret in "${PIPESTATUS[@]}"; do [ $ret -eq 0 ] || exit $ret; done
-
-(
# Using the cloud
# ===============
@@ -1425,7 +1543,7 @@
# If keystone is present, you can point nova cli to this server
if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
- echo "keystone is serving at http://$SERVICE_HOST:5000/v2.0/"
+ echo "keystone is serving at $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
echo "examples on using novaclient command line is in exercise.sh"
echo "the default users are: admin and demo"
echo "the password: $ADMIN_PASSWORD"
@@ -1436,5 +1554,3 @@
# Indicate how long this took to run (bash maintained variable 'SECONDS')
echo "stack.sh completed in $SECONDS seconds."
-
-) | tee -a "$LOGFILE"
diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh
index d79d5c3..642b40f 100755
--- a/tools/xen/build_domU.sh
+++ b/tools/xen/build_domU.sh
@@ -33,12 +33,12 @@
# VM network params
VM_NETMASK=${VM_NETMASK:-255.255.255.0}
-VM_BR=${VM_BR:-xenbr1}
+VM_BR=${VM_BR:-xapi1}
VM_VLAN=${VM_VLAN:-100}
# MGMT network params
MGT_NETMASK=${MGT_NETMASK:-255.255.255.0}
-MGT_BR=${MGT_BR:-xenbr2}
+MGT_BR=${MGT_BR:-xapi2}
MGT_VLAN=${MGT_VLAN:-101}
# VM Password
@@ -205,8 +205,9 @@
# Checkout nova
if [ ! -d $TOP_DIR/nova ]; then
- git clone git://github.com/cloudbuilders/nova.git
- git checkout diablo
+ git clone $NOVA_REPO
+ cd $TOP_DIR/nova
+ git checkout $NOVA_BRANCH
fi
# Run devstack on launch
diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in
index 8443dcb..01041e2 100644
--- a/tools/xen/templates/ova.xml.in
+++ b/tools/xen/templates/ova.xml.in
@@ -5,7 +5,7 @@
@PRODUCT_BRAND@ @PRODUCT_VERSION@-@BUILD_NUMBER@
</label>
<shortdesc></shortdesc>
- <config mem_set="671088640" vcpus="1"/>
+ <config mem_set="1073741824" vcpus="1"/>
<hacks is_hvm="false"/>
<vbd device="xvda" function="root" mode="w" vdi="vdi_xvda"/>
</vm>