Merge "XenAPI: Upgrade built-VM to Trusty"
diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template
new file mode 100644
index 0000000..1c57b32
--- /dev/null
+++ b/files/apache-ceilometer.template
@@ -0,0 +1,15 @@
+Listen %PORT%
+
+<VirtualHost *:%PORT%>
+ WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP}
+ WSGIProcessGroup ceilometer-api
+ WSGIScriptAlias / %WSGIAPP%
+ WSGIApplicationGroup %{GLOBAL}
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/%APACHE_NAME%/ceilometer.log
+ CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined
+</VirtualHost>
+
+WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 0a286b9..88492d3 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -11,6 +11,9 @@
</IfVersion>
ErrorLog /var/log/%APACHE_NAME%/keystone.log
CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
</VirtualHost>
<VirtualHost *:%ADMINPORT%>
@@ -23,6 +26,9 @@
</IfVersion>
ErrorLog /var/log/%APACHE_NAME%/keystone.log
CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+ %SSLENGINE%
+ %SSLCERTFILE%
+ %SSLKEYFILE%
</VirtualHost>
# Workaround for missing path on RHEL6, see
diff --git a/files/apts/horizon b/files/apts/horizon
index 8969046..03df3cb 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -9,13 +9,11 @@
python-xattr
python-sqlalchemy
python-webob
-python-kombu
pylint
python-eventlet
python-nose
python-sphinx
python-mox
-python-kombu
python-coverage
python-cherrypy3 # why?
python-migrate
diff --git a/files/apts/neutron b/files/apts/neutron
index 381c758..a48a800 100644
--- a/files/apts/neutron
+++ b/files/apts/neutron
@@ -11,7 +11,6 @@
python-suds
python-pastedeploy
python-greenlet
-python-kombu
python-eventlet
python-sqlalchemy
python-mysqldb
diff --git a/files/apts/nova b/files/apts/nova
index b1b969a..a3b0cb1 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -42,7 +42,6 @@
python-suds
python-lockfile
python-m2crypto
-python-kombu
python-feedparser
python-iso8601
python-qpid # NOPRIME
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index ff00e38..9016355 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -12,12 +12,6 @@
catalog.RegionOne.compute.name = Compute Service
-catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.name = Compute Service V3
-
-
catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index d3bde26..fa7e439 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -12,7 +12,6 @@
python-coverage
python-dateutil
python-eventlet
-python-kombu
python-mox
python-nose
python-pylint
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
index a734cb9..4c37ade 100644
--- a/files/rpms-suse/keystone
+++ b/files/rpms-suse/keystone
@@ -10,6 +10,6 @@
python-greenlet
python-lxml
python-mysql
-python-mysql.connector
+python-mysql-connector-python
python-pysqlite
sqlite3
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index 8ad69b0..8431bd1 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -7,9 +7,8 @@
python-eventlet
python-greenlet
python-iso8601
-python-kombu
python-mysql
-python-mysql.connector
+python-mysql-connector-python
python-Paste
python-PasteDeploy
python-pyudev
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 73c0604..b1c4f6a 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -28,13 +28,12 @@
python-feedparser
python-greenlet
python-iso8601
-python-kombu
python-libxml2
python-lockfile
python-lxml # needed for glance which is needed for nova --- this shouldn't be here
python-mox
python-mysql
-python-mysql.connector
+python-mysql-connector-python
python-numpy # needed by websockify for spice console
python-paramiko
python-sqlalchemy-migrate
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 8ecb030..fe3a2f4 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -9,7 +9,6 @@
python-eventlet
python-greenlet
python-httplib2
-python-kombu
python-migrate
python-mox
python-nose
diff --git a/files/rpms/keystone b/files/rpms/keystone
index e1873b7..ce41ee5 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -9,5 +9,6 @@
python-sqlalchemy
python-webob
sqlite
+mod_ssl
# Deps installed via pip for RHEL
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 7020d33..2c9dd3d 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,7 +11,6 @@
python-eventlet
python-greenlet
python-iso8601
-python-kombu
#rhel6 gets via pip
python-paste # dist:f19,f20,rhel7
python-paste-deploy # dist:f19,f20,rhel7
diff --git a/files/rpms/nova b/files/rpms/nova
index 695d814..dc1944b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -25,7 +25,6 @@
python-feedparser
python-greenlet
python-iso8601
-python-kombu
python-lockfile
python-migrate
python-mox
diff --git a/functions b/functions
index 0194acf..376aff0 100644
--- a/functions
+++ b/functions
@@ -73,7 +73,7 @@
# OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
if [[ "$image_url" =~ 'openvz' ]]; then
image_name="${image_fname%.tar.gz}"
- openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
+ openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
return
fi
@@ -184,7 +184,7 @@
vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
- openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
+ openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
return
fi
@@ -202,7 +202,7 @@
fi
openstack \
--os-token $token \
- --os-url http://$GLANCE_HOSTPORT \
+ --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
image create \
"$image_name" --public \
--container-format=ovf --disk-format=vhd \
@@ -217,7 +217,7 @@
image_name="${image_fname%.xen-raw.tgz}"
openstack \
--os-token $token \
- --os-url http://$GLANCE_HOSTPORT \
+ --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
image create \
"$image_name" --public \
--container-format=tgz --disk-format=raw \
@@ -295,9 +295,9 @@
if [ "$container_format" = "bare" ]; then
if [ "$unpack" = "zcat" ]; then
- openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
+ openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
else
- openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
+ openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
fi
else
# Use glance client to add the kernel the root filesystem.
@@ -305,12 +305,12 @@
# kernel for use when uploading the root filesystem.
local kernel_id="" ramdisk_id="";
if [ -n "$kernel" ]; then
- kernel_id=$(openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+ kernel_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
fi
if [ -n "$ramdisk" ]; then
- ramdisk_id=$(openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+ ramdisk_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
fi
- openstack --os-token $token --os-url http://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
+ openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
fi
}
@@ -339,7 +339,7 @@
function wait_for_service {
local timeout=$1
local url=$2
- timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+ timeout $timeout sh -c "while ! curl -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
}
diff --git a/lib/ceilometer b/lib/ceilometer
index 00fc0d3..9bb3121 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -41,6 +41,7 @@
CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
+CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
# Support potential entry-points console scripts
CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
@@ -52,6 +53,7 @@
CEILOMETER_SERVICE_PROTOCOL=http
CEILOMETER_SERVICE_HOST=$SERVICE_HOST
CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
+CEILOMETER_USE_MOD_WSGI=$(trueorfalse False $CEILOMETER_USE_MOD_WSGI)
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
@@ -105,12 +107,39 @@
}
+# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_ceilometer_apache_wsgi {
+ sudo rm -f $CEILOMETER_WSGI_DIR/*
+ sudo rm -f $(apache_site_config_for ceilometer)
+}
+
# cleanup_ceilometer() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceilometer {
if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
mongo ceilometer --eval "db.dropDatabase();"
fi
+ if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+ _cleanup_ceilometer_apache_wsgi
+ fi
+}
+
+function _config_ceilometer_apache_wsgi {
+ sudo mkdir -p $CEILOMETER_WSGI_DIR
+
+ local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
+ local apache_version=$(get_apache_version)
+
+ # copy proxy vhost and wsgi file
+ sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
+
+ sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf
+ sudo sed -e "
+ s|%PORT%|$CEILOMETER_SERVICE_PORT|g;
+ s|%APACHE_NAME%|$APACHE_NAME|g;
+ s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g;
+ s|%USER%|$STACK_USER|g
+ " -i $ceilometer_apache_conf
}
# configure_ceilometer() - Set config files, create data dirs, etc
@@ -163,6 +192,11 @@
iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER"
iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
fi
+
+ if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+ iniset $CEILOMETER_CONF api pecan_debug "False"
+ _config_ceilometer_apache_wsgi
+ fi
}
function configure_mongodb {
@@ -223,7 +257,16 @@
run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
- run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+
+ if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
+ run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+ else
+ enable_apache_site ceilometer
+ restart_apache_server
+ tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log
+ tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log
+ fi
+
# Start the compute agent last to allow time for the collector to
# fully wake up and connect to the message bus. See bug #1355809
@@ -248,6 +291,10 @@
# stop_ceilometer() - Stop running processes
function stop_ceilometer {
+ if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+ disable_apache_site ceilometer
+ restart_apache_server
+ fi
# Kill the ceilometer screen windows
for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
stop_process $serv
diff --git a/lib/cinder b/lib/cinder
index cbca9c0..b30a036 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -46,6 +46,9 @@
CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
# Public facing bits
+if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+ CINDER_SERVICE_PROTOCOL="https"
+fi
CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
@@ -299,6 +302,20 @@
fi
iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS"
+
+ iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
+ if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT glance_protocol https
+ fi
+
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service cinder; then
+ ensure_certificates CINDER
+
+ iniset $CINDER_CONF DEFAULT ssl_cert_file "$CINDER_SSL_CERT"
+ iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY"
+ fi
+
}
# create_cinder_accounts() - Set up common required cinder accounts
@@ -399,6 +416,12 @@
# start_cinder() - Start running processes, including screen
function start_cinder {
+ local service_port=$CINDER_SERVICE_PORT
+ local service_protocol=$CINDER_SERVICE_PROTOCOL
+ if is_service_enabled tls-proxy; then
+ service_port=$CINDER_SERVICE_PORT_INT
+ service_protocol="http"
+ fi
if is_service_enabled c-vol; then
# Delete any old stack.conf
sudo rm -f /etc/tgt/conf.d/stack.conf
@@ -425,7 +448,7 @@
run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
echo "Waiting for Cinder API to start..."
- if ! wait_for_service $SERVICE_TIMEOUT $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT; then
+ if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then
die $LINENO "c-api did not start"
fi
diff --git a/lib/glance b/lib/glance
index 6ca2fb5..4194842 100644
--- a/lib/glance
+++ b/lib/glance
@@ -51,8 +51,18 @@
GLANCE_BIN_DIR=$(get_python_exec_prefix)
fi
+if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then
+ GLANCE_SERVICE_PROTOCOL="https"
+fi
+
# Glance connection info. Note the port must be specified.
-GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
+GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST}
+GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292}
+GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
+GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
+GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
+GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
# Tell Tempest this project is present
TEMPEST_SERVICES+=,glance
@@ -148,6 +158,26 @@
iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
fi
+ if is_service_enabled tls-proxy; then
+ iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
+ iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
+ fi
+
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service glance; then
+ ensure_certificates GLANCE
+
+ iniset $GLANCE_API_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
+ iniset $GLANCE_API_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
+
+ iniset $GLANCE_REGISTRY_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
+ iniset $GLANCE_REGISTRY_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
+ fi
+
+ if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+ iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https
+ fi
+
cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
@@ -176,6 +206,14 @@
cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
+
+ if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+ CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+ CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+
+ iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+ iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+ fi
}
# create_glance_accounts() - Set up common required glance accounts
@@ -206,9 +244,9 @@
"image" "Glance Image Service")
get_or_create_endpoint $glance_service \
"$REGION_NAME" \
- "http://$GLANCE_HOSTPORT" \
- "http://$GLANCE_HOSTPORT" \
- "http://$GLANCE_HOSTPORT"
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
fi
fi
}
@@ -265,10 +303,17 @@
# start_glance() - Start running processes, including screen
function start_glance {
+ local service_protocol=$GLANCE_SERVICE_PROTOCOL
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT &
+ start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT &
+ fi
+
run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+
echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then
die $LINENO "g-api did not start"
fi
}
diff --git a/lib/heat b/lib/heat
index f64cc90..ff3b307 100644
--- a/lib/heat
+++ b/lib/heat
@@ -113,7 +113,7 @@
configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR
if is_ssl_enabled_service "key"; then
- iniset $HEAT_CONF clients_keystone ca_file $KEYSTONE_SSL_CA
+ iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
fi
# ec2authtoken
@@ -131,6 +131,18 @@
# Cloudwatch API
iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
+ if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
+ iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
+ fi
+
+ if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
+ iniset $HEAT_CONF clients_nova ca_file $SSL_BUNDLE_FILE
+ fi
+
+ if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+ iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE
+ fi
+
# heat environment
sudo mkdir -p $HEAT_ENV_DIR
sudo chown $STACK_USER $HEAT_ENV_DIR
diff --git a/lib/horizon b/lib/horizon
index 4dd12da..755be18 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -92,24 +92,6 @@
local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
cp $HORIZON_SETTINGS $local_settings
- if is_service_enabled neutron; then
- _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP
- fi
- # enable loadbalancer dashboard in case service is enabled
- if is_service_enabled q-lbaas; then
- _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True
- fi
-
- # enable firewall dashboard in case service is enabled
- if is_service_enabled q-fwaas; then
- _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True
- fi
-
- # enable VPN dashboard in case service is enabled
- if is_service_enabled q-vpn; then
- _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True
- fi
-
_horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
_horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
if [[ -n "$KEYSTONE_TOKEN_HASH_ALGORITHM" ]]; then
diff --git a/lib/keystone b/lib/keystone
index 9eca80a..1c67835 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -95,7 +95,7 @@
KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
# if we are running with SSL use https protocols
-if is_ssl_enabled_service "key"; then
+if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
KEYSTONE_AUTH_PROTOCOL="https"
KEYSTONE_SERVICE_PROTOCOL="https"
fi
@@ -123,7 +123,21 @@
sudo mkdir -p $KEYSTONE_WSGI_DIR
local keystone_apache_conf=$(apache_site_config_for keystone)
- local apache_version=$(get_apache_version)
+ local keystone_ssl=""
+ local keystone_certfile=""
+ local keystone_keyfile=""
+ local keystone_service_port=$KEYSTONE_SERVICE_PORT
+ local keystone_auth_port=$KEYSTONE_AUTH_PORT
+
+ if is_ssl_enabled_service key; then
+ keystone_ssl="SSLEngine On"
+ keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT"
+ keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY"
+ fi
+ if is_service_enabled tls-proxy; then
+ keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
+ keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
+ fi
# copy proxy vhost and wsgi file
sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main
@@ -131,11 +145,14 @@
sudo cp $FILES/apache-keystone.template $keystone_apache_conf
sudo sed -e "
- s|%PUBLICPORT%|$KEYSTONE_SERVICE_PORT|g;
- s|%ADMINPORT%|$KEYSTONE_AUTH_PORT|g;
+ s|%PUBLICPORT%|$keystone_service_port|g;
+ s|%ADMINPORT%|$keystone_auth_port|g;
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g;
s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g;
+ s|%SSLENGINE%|$keystone_ssl|g;
+ s|%SSLCERTFILE%|$keystone_certfile|g;
+ s|%SSLKEYFILE%|$keystone_keyfile|g;
s|%USER%|$STACK_USER|g
" -i $keystone_apache_conf
}
@@ -200,8 +217,13 @@
fi
# Set the URL advertised in the ``versions`` structure returned by the '/' route
- iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
- iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+ if is_service_enabled tls-proxy; then
+ iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/"
+ iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/"
+ else
+ iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
+ iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+ fi
iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
# Register SSL certificates if provided
@@ -280,7 +302,7 @@
fi
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
- iniset $KEYSTONE_CONF DEFAULT debug "True"
+ iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
# Eliminate the %(asctime)s.%(msecs)03d from the log format strings
iniset $KEYSTONE_CONF DEFAULT logging_context_format_string "%(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s"
iniset $KEYSTONE_CONF DEFAULT logging_default_format_string "%(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s"
@@ -412,7 +434,7 @@
iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT
iniset $conf_file $section auth_protocol $KEYSTONE_AUTH_PROTOCOL
iniset $conf_file $section identity_uri $KEYSTONE_AUTH_URI
- iniset $conf_file $section cafile $KEYSTONE_SSL_CA
+ iniset $conf_file $section cafile $SSL_BUNDLE_FILE
configure_API_version $conf_file $IDENTITY_API_VERSION $section
iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
iniset $conf_file $section admin_user $admin_user
@@ -489,6 +511,9 @@
setup_develop $KEYSTONE_DIR
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
install_apache_wsgi
+ if is_ssl_enabled_service "key"; then
+ enable_mod_ssl
+ fi
fi
}
@@ -496,8 +521,10 @@
function start_keystone {
# Get right service port for testing
local service_port=$KEYSTONE_SERVICE_PORT
+ local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$KEYSTONE_SERVICE_PORT_INT
+ auth_protocol="http"
fi
if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
@@ -506,15 +533,19 @@
tail_log key /var/log/$APACHE_NAME/keystone.log
tail_log key-access /var/log/$APACHE_NAME/keystone_access.log
else
+ local EXTRA_PARAMS=""
+ if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
+ EXTRA_PARAMS="--debug"
+ fi
# Start Keystone in a screen window
- run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
+ run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $EXTRA_PARAMS"
fi
echo "Waiting for keystone to start..."
# Check that the keystone service is running. Even if the tls tunnel
# should be enabled, make sure the internal port is checked using
# unencryted traffic at this point.
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s http://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
die $LINENO "keystone did not start"
fi
diff --git a/lib/neutron b/lib/neutron
index 96cd47b..81f2697 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -69,6 +69,11 @@
PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
+if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
+ Q_PROTOCOL="https"
+fi
+
+
# Set up default directories
NEUTRON_DIR=$DEST/neutron
NEUTRONCLIENT_DIR=$DEST/python-neutronclient
@@ -105,8 +110,12 @@
Q_PLUGIN=${Q_PLUGIN:-ml2}
# Default Neutron Port
Q_PORT=${Q_PORT:-9696}
+# Default Neutron Internal Port when using TLS proxy
+Q_PORT_INT=${Q_PORT_INT:-19696}
# Default Neutron Host
Q_HOST=${Q_HOST:-$SERVICE_HOST}
+# Default protocol
+Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
# Default admin username
Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
# Default auth strategy
@@ -409,7 +418,7 @@
iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME"
iniset $NOVA_CONF neutron region_name "$REGION_NAME"
- iniset $NOVA_CONF neutron url "http://$Q_HOST:$Q_PORT"
+ iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
if [[ "$Q_USE_SECGROUP" == "True" ]]; then
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
@@ -462,9 +471,9 @@
"network" "Neutron Service")
get_or_create_endpoint $neutron_service \
"$REGION_NAME" \
- "http://$SERVICE_HOST:$Q_PORT/" \
- "http://$SERVICE_HOST:$Q_PORT/" \
- "http://$SERVICE_HOST:$Q_PORT/"
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
fi
fi
}
@@ -590,12 +599,25 @@
# Start running processes, including screen
function start_neutron_service_and_check {
local cfg_file_options="$(determine_config_files neutron-server)"
+ local service_port=$Q_PORT
+ local service_protocol=$Q_PROTOCOL
+ if is_service_enabled tls-proxy; then
+ service_port=$Q_PORT_INT
+ service_protocol="http"
+ fi
# Start the Neutron service
run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
echo "Waiting for Neutron to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
+ if is_ssl_enabled_service "neutron"; then
+ ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
+ fi
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then
die $LINENO "Neutron did not start"
fi
+ # Start proxy if enabled
+ if is_service_enabled tls-proxy; then
+ start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
+ fi
}
# Start running processes, including screen
@@ -730,6 +752,23 @@
setup_colorized_logging $NEUTRON_CONF DEFAULT project_id
fi
+ if is_service_enabled tls-proxy; then
+ # Set the service port for a proxy to take the original
+ iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+ fi
+
+ if is_ssl_enabled_service "nova"; then
+ iniset $NEUTRON_CONF DEFAULT nova_ca_certificates_file "$SSL_BUNDLE_FILE"
+ fi
+
+ if is_ssl_enabled_service "neutron"; then
+ ensure_certificates NEUTRON
+
+ iniset $NEUTRON_CONF DEFAULT use_ssl True
+ iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT"
+ iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY"
+ fi
+
_neutron_setup_rootwrap
}
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index da90ee3..1406e37 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -20,38 +20,12 @@
# Specify the VLAN range
Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094}
-# Specify ncclient package information
-NCCLIENT_DIR=$DEST/ncclient
-NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
-NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git}
-NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
-
# This routine put a prefix on an existing function name
function _prefix_function {
declare -F $1 > /dev/null || die "$1 doesn't exist"
eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)"
}
-function _has_ovs_subplugin {
- local subplugin
- for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
- if [[ "$subplugin" == "openvswitch" ]]; then
- return 0
- fi
- done
- return 1
-}
-
-function _has_nexus_subplugin {
- local subplugin
- for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
- if [[ "$subplugin" == "nexus" ]]; then
- return 0
- fi
- done
- return 1
-}
-
function _has_n1kv_subplugin {
local subplugin
for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
@@ -62,27 +36,6 @@
return 1
}
-# This routine populates the cisco config file with the information for
-# a particular nexus switch
-function _config_switch {
- local cisco_cfg_file=$1
- local switch_ip=$2
- local username=$3
- local password=$4
- local ssh_port=$5
- shift 5
-
- local section="NEXUS_SWITCH:$switch_ip"
- iniset $cisco_cfg_file $section username $username
- iniset $cisco_cfg_file $section password $password
- iniset $cisco_cfg_file $section ssh_port $ssh_port
-
- while [[ ${#@} != 0 ]]; do
- iniset $cisco_cfg_file $section $1 $2
- shift 2
- done
-}
-
# Prefix openvswitch plugin routines with "ovs" in order to differentiate from
# cisco plugin routines. This means, ovs plugin routines will coexist with cisco
# plugin routines in this script.
@@ -98,73 +51,17 @@
_prefix_function neutron_plugin_setup_interface_driver ovs
_prefix_function has_neutron_plugin_security_group ovs
-# Check the version of the installed ncclient package
-function check_ncclient_version {
-python << EOF
-version = '$NCCLIENT_VERSION'
-import sys
-try:
- import pkg_resources
- import ncclient
- module_version = pkg_resources.get_distribution('ncclient').version
- if version != module_version:
- sys.exit(1)
-except:
- sys.exit(1)
-EOF
-}
-
-# Install the ncclient package
-function install_ncclient {
- git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH
- (cd $NCCLIENT_DIR; sudo python setup.py install)
-}
-
-# Check if the required version of ncclient has been installed
-function is_ncclient_installed {
- # Check if the Cisco ncclient repository exists
- if [[ -d $NCCLIENT_DIR ]]; then
- remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}')
- for remote in $remotes; do
- if [[ $remote == $NCCLIENT_REPO ]]; then
- break;
- fi
- done
- if [[ $remote != $NCCLIENT_REPO ]]; then
- return 1
- fi
- else
- return 1
- fi
-
- # Check if the ncclient is installed with the right version
- if ! check_ncclient_version; then
- return 1
- fi
- return 0
-}
-
function has_neutron_plugin_security_group {
- if _has_ovs_subplugin; then
- ovs_has_neutron_plugin_security_group
- else
- return 1
- fi
+ return 1
}
function is_neutron_ovs_base_plugin {
- # Cisco uses OVS if openvswitch subplugin is deployed
- _has_ovs_subplugin
return
}
# populate required nova configuration parameters
function neutron_plugin_create_nova_conf {
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_create_nova_conf
- else
- _neutron_ovs_base_configure_nova_vif_driver
- fi
+ _neutron_ovs_base_configure_nova_vif_driver
}
function neutron_plugin_install_agent_packages {
@@ -177,32 +74,14 @@
# setup default subplugins
if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then
declare -ga Q_CISCO_PLUGIN_SUBPLUGINS
- Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus)
+ Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv)
fi
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_configure_common
- Q_PLUGIN_EXTRA_CONF_PATH=etc/neutron/plugins/cisco
- Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini)
- # Copy extra config files to /etc so that they can be modified
- # later according to Cisco-specific localrc settings.
- mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
- local f
- local extra_conf_file
- for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
- extra_conf_file=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
- cp $NEUTRON_DIR/$extra_conf_file /$extra_conf_file
- done
- else
- Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco
- Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
- fi
+ Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco
+ Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2"
}
function neutron_plugin_configure_debug_command {
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_configure_debug_command
- fi
}
function neutron_plugin_configure_dhcp_agent {
@@ -210,53 +89,6 @@
}
function neutron_plugin_configure_l3_agent {
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_configure_l3_agent
- fi
-}
-
-function _configure_nexus_subplugin {
- local cisco_cfg_file=$1
-
- # Install a known compatible ncclient from the Cisco repository if necessary
- if ! is_ncclient_installed; then
- # Preserve the two global variables
- local offline=$OFFLINE
- local reclone=$RECLONE
- # Change their values to allow installation
- OFFLINE=False
- RECLONE=yes
- install_ncclient
- # Restore their values
- OFFLINE=$offline
- RECLONE=$reclone
- fi
-
- # Setup default nexus switch information
- if [ ! -v Q_CISCO_PLUGIN_SWITCH_INFO ]; then
- declare -A Q_CISCO_PLUGIN_SWITCH_INFO
- HOST_NAME=$(hostname)
- Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10)
- else
- iniset $cisco_cfg_file CISCO nexus_driver neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver
- fi
-
- # Setup the switch configurations
- local nswitch
- local sw_info
- local segment
- local sw_info_array
- declare -i count=0
- for nswitch in ${!Q_CISCO_PLUGIN_SWITCH_INFO[@]}; do
- sw_info=${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}
- sw_info_array=${sw_info//:/ }
- sw_info_array=( $sw_info_array )
- count=${#sw_info_array[@]}
- if [[ $count < 5 || $(( ($count-3) % 2 )) != 0 ]]; then
- die $LINENO "Incorrect switch configuration: ${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}"
- fi
- _config_switch $cisco_cfg_file $nswitch ${sw_info_array[@]}
- done
}
# Configure n1kv plugin
@@ -279,48 +111,29 @@
}
function neutron_plugin_configure_plugin_agent {
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_configure_plugin_agent
- fi
}
function neutron_plugin_configure_service {
local subplugin
local cisco_cfg_file
- if _has_ovs_subplugin; then
- ovs_neutron_plugin_configure_service
- cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]}
- else
- cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
- fi
+ cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
# Setup the [CISCO_PLUGINS] section
if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then
die $LINENO "At most two subplugins are supported."
fi
- if _has_ovs_subplugin && _has_n1kv_subplugin; then
- die $LINENO "OVS subplugin and n1kv subplugin cannot coexist"
- fi
-
# Setup the subplugins
- inicomment $cisco_cfg_file CISCO_PLUGINS nexus_plugin
inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin
inicomment $cisco_cfg_file CISCO_TEST host
for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
case $subplugin in
- nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;;
- openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2;;
n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2;;
*) die $LINENO "Unsupported cisco subplugin: $subplugin";;
esac
done
- if _has_nexus_subplugin; then
- _configure_nexus_subplugin $cisco_cfg_file
- fi
-
if _has_n1kv_subplugin; then
_configure_n1kv_subplugin $cisco_cfg_file
fi
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 37b9e4c..7950ac0 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -45,8 +45,8 @@
}
function has_neutron_plugin_security_group {
- # False
- return 1
+ # return 0 means enabled
+ return 0
}
function neutron_plugin_check_adv_test_requirements {
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 78e7738..f84b710 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -10,11 +10,8 @@
LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin
function neutron_agent_lbaas_install_agent_packages {
- if is_ubuntu || is_fedora; then
+ if is_ubuntu || is_fedora || is_suse; then
install_package haproxy
- elif is_suse; then
- ### FIXME: Find out if package can be pushed to Factory
- echo "HAProxy packages can be installed from server:http project in OBS"
fi
}
diff --git a/lib/nova b/lib/nova
index 2a3aae1..c24bc2f 100644
--- a/lib/nova
+++ b/lib/nova
@@ -44,11 +44,20 @@
NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
+if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
+ NOVA_SERVICE_PROTOCOL="https"
+ EC2_SERVICE_PROTOCOL="https"
+else
+ EC2_SERVICE_PROTOCOL="http"
+fi
+
# Public facing bits
NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
+EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
# Support entry points installation of console scripts
if [[ -d $NOVA_DIR/bin ]]; then
@@ -349,14 +358,6 @@
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
-
- local nova_v3_service=$(get_or_create_service "novav3" \
- "computev3" "Nova Compute Service V3")
- get_or_create_endpoint $nova_v3_service \
- "$REGION_NAME" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
- "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
fi
fi
@@ -375,9 +376,9 @@
"ec2" "EC2 Compatibility Layer")
get_or_create_endpoint $ec2_service \
"$REGION_NAME" \
- "http://$SERVICE_HOST:8773/services/Cloud" \
- "http://$SERVICE_HOST:8773/services/Admin" \
- "http://$SERVICE_HOST:8773/services/Cloud"
+ "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud" \
+ "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Admin" \
+ "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud"
fi
fi
@@ -412,7 +413,6 @@
iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
- iniset $NOVA_CONF DEFAULT fixed_range ""
iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
@@ -441,6 +441,15 @@
configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
fi
+ if is_service_enabled cinder; then
+ if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+ CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+ CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+ iniset $NOVA_CONF cinder endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+ iniset $NOVA_CONF cinder ca_certificates_file $SSL_BUNDLE_FILE
+ fi
+ fi
+
if [ -n "$NOVA_STATE_PATH" ]; then
iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH"
@@ -508,12 +517,31 @@
fi
iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
+ iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
iniset_rpc_backend nova $NOVA_CONF DEFAULT
- iniset $NOVA_CONF glance api_servers "$GLANCE_HOSTPORT"
+ iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
- iniset $NOVA_CONF DEFAULT osci_compute_workers "$API_WORKERS"
+ iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
+
+ if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+ iniset $NOVA_CONF DEFAULT glance_protocol https
+ fi
+
+ # Register SSL certificates if provided
+ if is_ssl_enabled_service nova; then
+ ensure_certificates NOVA
+
+ iniset $NOVA_CONF DEFAULT ssl_cert_file "$NOVA_SSL_CERT"
+ iniset $NOVA_CONF DEFAULT ssl_key_file "$NOVA_SSL_KEY"
+
+ iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS"
+ fi
+
+ if is_service_enabled tls-proxy; then
+ iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
+ fi
}
function init_nova_cells {
@@ -642,19 +670,22 @@
function start_nova_api {
# Get right service port for testing
local service_port=$NOVA_SERVICE_PORT
+ local service_protocol=$NOVA_SERVICE_PROTOCOL
if is_service_enabled tls-proxy; then
service_port=$NOVA_SERVICE_PORT_INT
+ service_protocol="http"
fi
run_process n-api "$NOVA_BIN_DIR/nova-api"
echo "Waiting for nova-api to start..."
- if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
+ if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
die $LINENO "nova-api did not start"
fi
# Start proxies if enabled
if is_service_enabled tls-proxy; then
start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+ start_tls_proxy '*' $EC2_SERVICE_PORT $NOVA_SERVICE_HOST $EC2_SERVICE_PORT_INT &
fi
}
diff --git a/lib/rpc_backend b/lib/rpc_backend
index f2d2859..de82fe1 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -44,7 +44,7 @@
local rpc_backend_cnt=0
for svc in qpid zeromq rabbit; do
is_service_enabled $svc &&
- ((rpc_backend_cnt++))
+ (( rpc_backend_cnt++ )) || true
done
if [ "$rpc_backend_cnt" -gt 1 ]; then
echo "ERROR: only one rpc backend may be enabled,"
diff --git a/lib/swift b/lib/swift
index 3c31dd2..8139552 100644
--- a/lib/swift
+++ b/lib/swift
@@ -29,6 +29,10 @@
# Defaults
# --------
+if is_ssl_enabled_service "s-proxy" || is_service_enabled tls-proxy; then
+ SWIFT_SERVICE_PROTOCOL="https"
+fi
+
# Set up default directories
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
@@ -36,6 +40,9 @@
SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
SWIFT3_DIR=$DEST/swift3
+SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
+
# TODO: add logging to different location.
# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
@@ -334,7 +341,18 @@
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
- iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+ if is_service_enabled tls-proxy; then
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
+ else
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+ fi
+
+ if is_ssl_enabled_service s-proxy; then
+ ensure_certificates SWIFT
+
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT cert_file "$SWIFT_SSL_CERT"
+ iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY"
+ fi
# Devstack is commonly run in a small slow environment, so bump the
# timeouts up.
@@ -401,7 +419,7 @@
auth_port = ${KEYSTONE_AUTH_PORT}
auth_host = ${KEYSTONE_AUTH_HOST}
auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
-cafile = ${KEYSTONE_SSL_CA}
+cafile = ${SSL_BUNDLE_FILE}
auth_token = ${SERVICE_TOKEN}
admin_token = ${SERVICE_TOKEN}
@@ -560,9 +578,9 @@
"object-store" "Swift Service")
get_or_create_endpoint $swift_service \
"$REGION_NAME" \
- "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
- "http://$SERVICE_HOST:8080" \
- "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
+ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \
+ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
fi
local swift_tenant_test1=$(get_or_create_project swifttenanttest1)
@@ -675,6 +693,10 @@
for type in proxy ${todo}; do
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
done
+ if is_service_enabled tls-proxy; then
+ local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+ start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
+ fi
run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
for type in object container account; do
diff --git a/lib/tempest b/lib/tempest
index 906cb00..d677c7e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -290,15 +290,12 @@
iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
# Compute Features
- iniset $TEMPEST_CONFIG compute-feature-enabled api_v3 ${TEMPEST_NOVA_API_V3:-False}
iniset $TEMPEST_CONFIG compute-feature-enabled resize True
iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions ${COMPUTE_API_EXTENSIONS:-"all"}
iniset $TEMPEST_CONFIG compute-feature-disabled api_extensions ${DISABLE_COMPUTE_API_EXTENSIONS}
- iniset $TEMPEST_CONFIG compute-feature-enabled api_v3_extensions ${COMPUTE_API_V3_EXTENSIONS:-"all"}
- iniset $TEMPEST_CONFIG compute-feature-disabled api_v3_extensions ${DISABLE_COMPUTE_API_V3_EXTENSIONS}
# Compute admin
iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
@@ -317,7 +314,7 @@
iniset $TEMPEST_CONFIG network-feature-disabled api_extensions ${DISABLE_NETWORK_API_EXTENSIONS}
# boto
- iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
+ iniset $TEMPEST_CONFIG boto ec2_url "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud"
iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml
diff --git a/lib/tls b/lib/tls
index 061c1ca..15e8692 100644
--- a/lib/tls
+++ b/lib/tls
@@ -14,6 +14,7 @@
#
# - configure_CA
# - init_CA
+# - cleanup_CA
# - configure_proxy
# - start_tls_proxy
@@ -27,6 +28,7 @@
# - start_tls_proxy HOST_IP 5000 localhost 5000
# - ensure_certificates
# - is_ssl_enabled_service
+# - enable_mod_ssl
# Defaults
# --------
@@ -34,14 +36,9 @@
if is_service_enabled tls-proxy; then
# TODO(dtroyer): revisit this below after the search for HOST_IP has been done
TLS_IP=${TLS_IP:-$SERVICE_IP}
-
- # Set the default ``SERVICE_PROTOCOL`` for TLS
- SERVICE_PROTOCOL=https
fi
-# Make up a hostname for cert purposes
-# will be added to /etc/hosts?
-DEVSTACK_HOSTNAME=secure.devstack.org
+DEVSTACK_HOSTNAME=$(hostname -f)
DEVSTACK_CERT_NAME=devstack-cert
DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem
@@ -209,6 +206,29 @@
# Create the CA bundle
cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem
+ cat $INT_CA_DIR/ca-chain.pem >> $SSL_BUNDLE_FILE
+
+ if is_fedora; then
+ sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+ sudo update-ca-trust
+ elif is_ubuntu; then
+ sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt
+ sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt
+ sudo update-ca-certificates
+ fi
+}
+
+# Clean up the CA files
+# cleanup_CA
+function cleanup_CA {
+ if is_fedora; then
+ sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+ sudo update-ca-trust
+ elif is_ubuntu; then
+ sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
+ sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
+ sudo update-ca-certificates
+ fi
}
# Create an initial server cert
@@ -331,6 +351,9 @@
function is_ssl_enabled_service {
local services=$@
local service=""
+ if [ "$USE_SSL" == "False" ]; then
+ return 1
+ fi
for service in ${services}; do
[[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
done
@@ -345,8 +368,12 @@
# The function expects to find a certificate, key and CA certificate in the
# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For
# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
-# KEYSTONE_SSL_CA. If it does not find these certificates the program will
-# quit.
+# KEYSTONE_SSL_CA.
+#
+# If it does not find these certificates then the devstack-issued server
+# certificate, key and CA certificate will be associated with the service.
+#
+# If only some of the variables are provided then the function will quit.
function ensure_certificates {
local service=$1
@@ -358,7 +385,15 @@
local key=${!key_var}
local ca=${!ca_var}
- if [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
+ if [[ -z "$cert" && -z "$key" && -z "$ca" ]]; then
+ local cert="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+ local key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+ local ca="$INT_CA_DIR/ca-chain.pem"
+ eval ${service}_SSL_CERT=\$cert
+ eval ${service}_SSL_KEY=\$key
+ eval ${service}_SSL_CA=\$ca
+ return # the CA certificate is already in the bundle
+ elif [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
"variable to enable SSL for ${service}"
fi
@@ -366,6 +401,21 @@
cat $ca >> $SSL_BUNDLE_FILE
}
+# Enable the mod_ssl plugin in Apache
+function enable_mod_ssl {
+ echo "Enabling mod_ssl"
+
+ if is_ubuntu; then
+ sudo a2enmod ssl
+ elif is_fedora; then
+ # Fedora enables mod_ssl by default
+ :
+ fi
+ if ! sudo `which httpd || which apache2ctl` -M | grep -w -q ssl_module; then
+ die $LINENO "mod_ssl is not enabled in apache2/httpd, please check for it manually and run stack.sh again"
+ fi
+}
+
# Proxy Functions
# ===============
diff --git a/stack.sh b/stack.sh
index c20e610..d23417e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -177,9 +177,6 @@
exit 1
fi
-# Set up logging level
-VERBOSE=$(trueorfalse True $VERBOSE)
-
# Configure sudo
# --------------
@@ -285,6 +282,182 @@
fi
+# Configure Logging
+# -----------------
+
+# Set up logging level
+VERBOSE=$(trueorfalse True $VERBOSE)
+
+# Draw a spinner so the user knows something is happening
+function spinner {
+ local delay=0.75
+ local spinstr='/-\|'
+ printf "..." >&3
+ while [ true ]; do
+ local temp=${spinstr#?}
+ printf "[%c]" "$spinstr" >&3
+ local spinstr=$temp${spinstr%"$temp"}
+ sleep $delay
+ printf "\b\b\b" >&3
+ done
+}
+
+function kill_spinner {
+ if [ ! -z "$LAST_SPINNER_PID" ]; then
+ kill >/dev/null 2>&1 $LAST_SPINNER_PID
+ printf "\b\b\bdone\n" >&3
+ fi
+}
+
+# Echo text to the log file, summary log file and stdout
+# echo_summary "something to say"
+function echo_summary {
+ if [[ -t 3 && "$VERBOSE" != "True" ]]; then
+ kill_spinner
+ echo -n -e $@ >&6
+ spinner &
+ LAST_SPINNER_PID=$!
+ else
+ echo -e $@ >&6
+ fi
+}
+
+# Echo text only to stdout, no log files
+# echo_nolog "something not for the logs"
+function echo_nolog {
+ echo $@ >&3
+}
+
+if [[ is_fedora && $DISTRO == "rhel6" ]]; then
+ # poor old python2.6 doesn't have argparse by default, which
+ # outfilter.py uses
+ is_package_installed python-argparse || install_package python-argparse
+fi
+
+# Set up logging for ``stack.sh``
+# Set ``LOGFILE`` to turn on logging
+# Append '.xxxxxxxx' to the given name to maintain history
+# where 'xxxxxxxx' is a representation of the date the file was created
+TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then
+ LOGDAYS=${LOGDAYS:-7}
+ CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
+fi
+
+if [[ -n "$LOGFILE" ]]; then
+ # First clean up old log files. Use the user-specified ``LOGFILE``
+ # as the template to search for, appending '.*' to match the date
+ # we added on earlier runs.
+ LOGDIR=$(dirname "$LOGFILE")
+ LOGFILENAME=$(basename "$LOGFILE")
+ mkdir -p $LOGDIR
+ find $LOGDIR -maxdepth 1 -name $LOGFILENAME.\* -mtime +$LOGDAYS -exec rm {} \;
+ LOGFILE=$LOGFILE.${CURRENT_LOG_TIME}
+ SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary
+
+ # Redirect output according to config
+
+ # Set fd 3 to a copy of stdout. So we can set fd 1 without losing
+ # stdout later.
+ exec 3>&1
+ if [[ "$VERBOSE" == "True" ]]; then
+ # Set fd 1 and 2 to write the log file
+ exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1
+ # Set fd 6 to summary log file
+ exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
+ else
+ # Set fd 1 and 2 to primary logfile
+ exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
+ # Set fd 6 to summary logfile and stdout
+ exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
+ fi
+
+ echo_summary "stack.sh log $LOGFILE"
+ # Specified logfile name always links to the most recent log
+ ln -sf $LOGFILE $LOGDIR/$LOGFILENAME
+ ln -sf $SUMFILE $LOGDIR/$LOGFILENAME.summary
+else
+ # Set up output redirection without log files
+ # Set fd 3 to a copy of stdout. So we can set fd 1 without losing
+ # stdout later.
+ exec 3>&1
+ if [[ "$VERBOSE" != "True" ]]; then
+ # Throw away stdout and stderr
+ exec 1>/dev/null 2>&1
+ fi
+ # Always send summary fd to original stdout
+ exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
+fi
+
+# Set up logging of screen windows
+# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
+# directory specified in ``SCREEN_LOGDIR``, we will log to the the file
+# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
+# ``screen-$SERVICE_NAME.log`` to the latest log file.
+# Logs are kept for as long specified in ``LOGDAYS``.
+if [[ -n "$SCREEN_LOGDIR" ]]; then
+
+ # We make sure the directory is created.
+ if [[ -d "$SCREEN_LOGDIR" ]]; then
+ # We cleanup the old logs
+ find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \;
+ else
+ mkdir -p $SCREEN_LOGDIR
+ fi
+fi
+
+
+# Configure Error Traps
+# ---------------------
+
+# Kill background processes on exit
+trap exit_trap EXIT
+function exit_trap {
+ local r=$?
+ jobs=$(jobs -p)
+ # Only do the kill when we're logging through a process substitution,
+ # which currently is only to verbose logfile
+ if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then
+ echo "exit_trap: cleaning up child processes"
+ kill 2>&1 $jobs
+ fi
+
+ # Kill the last spinner process
+ kill_spinner
+
+ if [[ $r -ne 0 ]]; then
+ echo "Error on exit"
+ if [[ -z $LOGDIR ]]; then
+ $TOP_DIR/tools/worlddump.py
+ else
+ $TOP_DIR/tools/worlddump.py -d $LOGDIR
+ fi
+ fi
+
+ exit $r
+}
+
+# Exit on any errors so that errors don't compound
+trap err_trap ERR
+function err_trap {
+ local r=$?
+ set +o xtrace
+ if [[ -n "$LOGFILE" ]]; then
+ echo "${0##*/} failed: full log in $LOGFILE"
+ else
+ echo "${0##*/} failed"
+ fi
+ exit $r
+}
+
+# Begin trapping error exit codes
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
# Common Configuration
# --------------------
@@ -340,6 +513,15 @@
# and the specified rpc backend is available on your platform.
check_rpc_backend
+# Use native SSL for servers in SSL_ENABLED_SERVICES
+USE_SSL=$(trueorfalse False $USE_SSL)
+
+# Service to enable with SSL if USE_SSL is True
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
+
+if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
+ die $LINENO "tls-proxy and SSL are mutually exclusive"
+fi
# Configure Projects
# ==================
@@ -494,179 +676,6 @@
fi
-# Configure logging
-# -----------------
-
-# Draw a spinner so the user knows something is happening
-function spinner {
- local delay=0.75
- local spinstr='/-\|'
- printf "..." >&3
- while [ true ]; do
- local temp=${spinstr#?}
- printf "[%c]" "$spinstr" >&3
- local spinstr=$temp${spinstr%"$temp"}
- sleep $delay
- printf "\b\b\b" >&3
- done
-}
-
-function kill_spinner {
- if [ ! -z "$LAST_SPINNER_PID" ]; then
- kill >/dev/null 2>&1 $LAST_SPINNER_PID
- printf "\b\b\bdone\n" >&3
- fi
-}
-
-# Echo text to the log file, summary log file and stdout
-# echo_summary "something to say"
-function echo_summary {
- if [[ -t 3 && "$VERBOSE" != "True" ]]; then
- kill_spinner
- echo -n -e $@ >&6
- spinner &
- LAST_SPINNER_PID=$!
- else
- echo -e $@ >&6
- fi
-}
-
-# Echo text only to stdout, no log files
-# echo_nolog "something not for the logs"
-function echo_nolog {
- echo $@ >&3
-}
-
-if [[ is_fedora && $DISTRO == "rhel6" ]]; then
- # poor old python2.6 doesn't have argparse by default, which
- # outfilter.py uses
- is_package_installed python-argparse || install_package python-argparse
-fi
-
-# Set up logging for ``stack.sh``
-# Set ``LOGFILE`` to turn on logging
-# Append '.xxxxxxxx' to the given name to maintain history
-# where 'xxxxxxxx' is a representation of the date the file was created
-TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
-if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then
- LOGDAYS=${LOGDAYS:-7}
- CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
-fi
-
-if [[ -n "$LOGFILE" ]]; then
- # First clean up old log files. Use the user-specified ``LOGFILE``
- # as the template to search for, appending '.*' to match the date
- # we added on earlier runs.
- LOGDIR=$(dirname "$LOGFILE")
- LOGFILENAME=$(basename "$LOGFILE")
- mkdir -p $LOGDIR
- find $LOGDIR -maxdepth 1 -name $LOGFILENAME.\* -mtime +$LOGDAYS -exec rm {} \;
- LOGFILE=$LOGFILE.${CURRENT_LOG_TIME}
- SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary
-
- # Redirect output according to config
-
- # Set fd 3 to a copy of stdout. So we can set fd 1 without losing
- # stdout later.
- exec 3>&1
- if [[ "$VERBOSE" == "True" ]]; then
- # Set fd 1 and 2 to write the log file
- exec 1> >( $TOP_DIR/tools/outfilter.py -v -o "${LOGFILE}" ) 2>&1
- # Set fd 6 to summary log file
- exec 6> >( $TOP_DIR/tools/outfilter.py -o "${SUMFILE}" )
- else
- # Set fd 1 and 2 to primary logfile
- exec 1> >( $TOP_DIR/tools/outfilter.py -o "${LOGFILE}" ) 2>&1
- # Set fd 6 to summary logfile and stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v -o "${SUMFILE}" >&3 )
- fi
-
- echo_summary "stack.sh log $LOGFILE"
- # Specified logfile name always links to the most recent log
- ln -sf $LOGFILE $LOGDIR/$LOGFILENAME
- ln -sf $SUMFILE $LOGDIR/$LOGFILENAME.summary
-else
- # Set up output redirection without log files
- # Set fd 3 to a copy of stdout. So we can set fd 1 without losing
- # stdout later.
- exec 3>&1
- if [[ "$VERBOSE" != "True" ]]; then
- # Throw away stdout and stderr
- exec 1>/dev/null 2>&1
- fi
- # Always send summary fd to original stdout
- exec 6> >( $TOP_DIR/tools/outfilter.py -v >&3 )
-fi
-
-# Set up logging of screen windows
-# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
-# directory specified in ``SCREEN_LOGDIR``, we will log to the the file
-# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
-# ``screen-$SERVICE_NAME.log`` to the latest log file.
-# Logs are kept for as long specified in ``LOGDAYS``.
-if [[ -n "$SCREEN_LOGDIR" ]]; then
-
- # We make sure the directory is created.
- if [[ -d "$SCREEN_LOGDIR" ]]; then
- # We cleanup the old logs
- find $SCREEN_LOGDIR -maxdepth 1 -name screen-\*.log -mtime +$LOGDAYS -exec rm {} \;
- else
- mkdir -p $SCREEN_LOGDIR
- fi
-fi
-
-
-# Set Up Script Execution
-# -----------------------
-
-# Kill background processes on exit
-trap exit_trap EXIT
-function exit_trap {
- local r=$?
- jobs=$(jobs -p)
- # Only do the kill when we're logging through a process substitution,
- # which currently is only to verbose logfile
- if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then
- echo "exit_trap: cleaning up child processes"
- kill 2>&1 $jobs
- fi
-
- # Kill the last spinner process
- kill_spinner
-
- if [[ $r -ne 0 ]]; then
- echo "Error on exit"
- if [[ -z $LOGDIR ]]; then
- $TOP_DIR/tools/worlddump.py
- else
- $TOP_DIR/tools/worlddump.py -d $LOGDIR
- fi
- fi
-
- exit $r
-}
-
-# Exit on any errors so that errors don't compound
-trap err_trap ERR
-function err_trap {
- local r=$?
- set +o xtrace
- if [[ -n "$LOGFILE" ]]; then
- echo "${0##*/} failed: full log in $LOGFILE"
- else
- echo "${0##*/} failed"
- fi
- exit $r
-}
-
-
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error. It is also useful for following along as the install occurs.
-set -o xtrace
-
-
# Install Packages
# ================
@@ -822,7 +831,7 @@
configure_heat
fi
-if is_service_enabled tls-proxy; then
+if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
configure_CA
init_CA
init_cert
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 12e861e..9bf8f73 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -171,6 +171,7 @@
echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:"
mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
domid=$(get_domid "$GUEST_NAME")
+ sleep 20 # Wait for the vnc-port to be written
port=$(xenstore-read /local/domain/$domid/console/vnc-port)
echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
while true; do