Merge "Make sure tgt is using config.d approach"
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 2a8d070..3791917 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -101,7 +101,7 @@
--service_id $KEYSTONE_SERVICE \
--publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \
--adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \
- --internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0"
+ --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0"
fi
# Nova
@@ -123,9 +123,9 @@
keystone endpoint-create \
--region RegionOne \
--service_id $NOVA_SERVICE \
- --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
- --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
- --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s"
+ --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
+ --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \
+ --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s"
fi
# Nova needs ResellerAdmin role to download images when accessing
# swift through the s3 api. The admin role in swift allows a user
@@ -197,9 +197,9 @@
keystone endpoint-create \
--region RegionOne \
--service_id $GLANCE_SERVICE \
- --publicurl "http://$SERVICE_HOST:9292/v1" \
- --adminurl "http://$SERVICE_HOST:9292/v1" \
- --internalurl "http://$SERVICE_HOST:9292/v1"
+ --publicurl "http://$SERVICE_HOST:9292" \
+ --adminurl "http://$SERVICE_HOST:9292" \
+ --internalurl "http://$SERVICE_HOST:9292"
fi
fi
@@ -223,7 +223,7 @@
--region RegionOne \
--service_id $SWIFT_SERVICE \
--publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
- --adminurl "http://$SERVICE_HOST:8080/v1" \
+ --adminurl "http://$SERVICE_HOST:8080" \
--internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
fi
fi
diff --git a/functions b/functions
index 8ed9960..664cfa0 100644
--- a/functions
+++ b/functions
@@ -548,6 +548,26 @@
}
+# Distro-agnostic function to tell if a package is installed
+# is_package_installed package [package ...]
+function is_package_installed() {
+ if [[ -z "$@" ]]; then
+ return 1
+ fi
+
+ if [[ -z "$os_PACKAGE" ]]; then
+ GetOSVersion
+ fi
+ if [[ "$os_PACKAGE" = "deb" ]]; then
+ dpkg -l "$@" > /dev/null
+ return $?
+ else
+ rpm --quiet -q "$@"
+ return $?
+ fi
+}
+
+
# Test if the named environment variable is set and not zero length
# is_set env-var
function is_set() {
diff --git a/lib/keystone b/lib/keystone
new file mode 100644
index 0000000..a0cc601
--- /dev/null
+++ b/lib/keystone
@@ -0,0 +1,172 @@
+# lib/keystone
+# Functions to control the configuration and operation of **Keystone**
+
+# Dependencies:
+# ``functions`` file
+# ``BASE_SQL_CONN``
+# ``SERVICE_HOST``
+# ``SERVICE_TOKEN``
+# ``S3_SERVICE_PORT`` (template backend only)
+
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_keystone
+# configure_keystone
+# init_keystone
+# start_keystone
+# stop_keystone
+# cleanup_keystone
+
+# Print the commands being run so that we can see the command that triggers
+# an error. It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+KEYSTONE_DIR=$DEST/keystone
+KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
+KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
+
+KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
+
+# Select the backend for Keystopne's service catalog
+KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
+KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+
+# Set Keystone interface configuration
+KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
+KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
+KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
+KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
+KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
+KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
+
+
+# Entry Points
+# ------------
+
+# cleanup_keystone() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_keystone() {
+ # kill instances (nova)
+ # delete image files (glance)
+ # This function intentionally left blank
+ :
+}
+
+# configure_keystoneclient() - Set config files, create data dirs, etc
+function configure_keystoneclient() {
+ setup_develop $KEYSTONECLIENT_DIR
+}
+
+# configure_keystone() - Set config files, create data dirs, etc
+function configure_keystone() {
+ setup_develop $KEYSTONE_DIR
+
+ if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
+ sudo mkdir -p $KEYSTONE_CONF_DIR
+ sudo chown `whoami` $KEYSTONE_CONF_DIR
+ fi
+
+ if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
+ cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
+ cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
+ fi
+
+ # Rewrite stock ``keystone.conf``
+ iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
+ iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
+ iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
+ sed -e "
+ /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
+ " -i $KEYSTONE_CONF
+
+ # Append the S3 bits
+ iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
+
+ if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+ # Configure ``keystone.conf`` to use sql
+ iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
+ inicomment $KEYSTONE_CONF catalog template_file
+ else
+ cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+
+ # Add swift endpoints to service catalog if swift is enabled
+ if is_service_enabled swift; then
+ echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
+ fi
+
+ # Add quantum endpoints to service catalog if quantum is enabled
+ if is_service_enabled quantum; then
+ echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
+ echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
+ fi
+
+ sudo sed -e "
+ s,%SERVICE_HOST%,$SERVICE_HOST,g;
+ s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+ " -i $KEYSTONE_CATALOG
+
+ # Configure ``keystone.conf`` to use templates
+ iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+ iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+ fi
+
+ # Set up logging
+ LOGGING_ROOT="devel"
+ if [ "$SYSLOG" != "False" ]; then
+ LOGGING_ROOT="$LOGGING_ROOT,production"
+ fi
+ KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
+ cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
+ iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
+ iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
+
+}
+
+# init_keystone() - Initialize databases, etc.
+function init_keystone() {
+ # (Re)create keystone database
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
+ mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
+
+ # Initialize keystone database
+ $KEYSTONE_DIR/bin/keystone-manage db_sync
+
+ # Set up certificates
+ $KEYSTONE_DIR/bin/keystone-manage pki_setup
+}
+
+# install_keystoneclient() - Collect source and prepare
+function install_keystoneclient() {
+ git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
+}
+
+# install_keystone() - Collect source and prepare
+function install_keystone() {
+ git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+}
+
+# start_keystone() - Start running processes, including screen
+function start_keystone() {
+ # Start Keystone in a screen window
+ screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
+}
+
+# stop_keystone() - Stop running processes
+function stop_keystone() {
+ # Kill the Keystone screen window
+ screen -S $SCREEN_NAME -p key -X kill
+}
diff --git a/samples/local.sh b/samples/local.sh
index eb9bc24..5901525 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -62,3 +62,6 @@
# ----------
# Add tcp/22 and icmp to default security group
+nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
+nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+
diff --git a/stack.sh b/stack.sh
index 687e5bf..c352a27 100755
--- a/stack.sh
+++ b/stack.sh
@@ -182,11 +182,7 @@
sleep $ROOTSLEEP
# Give the non-root user the ability to run as **root** via ``sudo``
- if [[ "$os_PACKAGE" = "deb" ]]; then
- dpkg -l sudo || install_package sudo
- else
- rpm -qa | grep sudo || install_package sudo
- fi
+ is_package_installed sudo || install_package sudo
if ! getent group stack >/dev/null; then
echo "Creating a group called stack"
groupadd stack
@@ -215,12 +211,7 @@
exit 1
else
# We're not **root**, make sure ``sudo`` is available
- if [[ "$os_PACKAGE" = "deb" ]]; then
- CHECK_SUDO_CMD="dpkg -l sudo"
- else
- CHECK_SUDO_CMD="rpm -q sudo"
- fi
- $CHECK_SUDO_CMD || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo."
+ is_package_installed sudo || die "Sudo is required. Re-run stack.sh as root ONE TIME ONLY to set up sudo."
# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
@@ -262,63 +253,17 @@
sudo chown `whoami` $DATA_DIR
-# Configure Projects
-# ==================
-
-# Get project function libraries
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/n-vol
-source $TOP_DIR/lib/ceilometer
-source $TOP_DIR/lib/heat
-source $TOP_DIR/lib/quantum
-
-# Set the destination directories for OpenStack projects
-NOVA_DIR=$DEST/nova
-HORIZON_DIR=$DEST/horizon
-GLANCE_DIR=$DEST/glance
-GLANCECLIENT_DIR=$DEST/python-glanceclient
-KEYSTONE_DIR=$DEST/keystone
-NOVACLIENT_DIR=$DEST/python-novaclient
-KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
-OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-NOVNC_DIR=$DEST/noVNC
-SWIFT_DIR=$DEST/swift
-SWIFT3_DIR=$DEST/swift3
-SWIFTCLIENT_DIR=$DEST/python-swiftclient
-QUANTUM_DIR=$DEST/quantum
-QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
-
-# Default Quantum Plugin
-Q_PLUGIN=${Q_PLUGIN:-openvswitch}
-# Default Quantum Port
-Q_PORT=${Q_PORT:-9696}
-# Default Quantum Host
-Q_HOST=${Q_HOST:-localhost}
-# Which Quantum API nova should use
-# Default admin username
-Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
-# Default auth strategy
-Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# Use namespace or not
-Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
-# Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-}
-
-# Name of the LVM volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
-
-# Nova supports pluggable schedulers. The default ``FilterScheduler``
-# should work in most cases.
-SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
+# Common Configuration
+# ====================
# Set fixed and floating range here so we can make sure not to use addresses
# from either range when attempting to guess the IP to use for the host.
# Note that setting FIXED_RANGE may be necessary when running DevStack
-# in an OpenStack cloud that uses eith of these address ranges internally.
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+# in an OpenStack cloud that uses either of these address ranges internally.
FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
# Find the interface used for the default route
HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')}
@@ -359,6 +304,57 @@
# Service startup timeout
SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+
+# Configure Projects
+# ==================
+
+# Get project function libraries
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/n-vol
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
+source $TOP_DIR/lib/quantum
+
+# Set the destination directories for OpenStack projects
+NOVA_DIR=$DEST/nova
+HORIZON_DIR=$DEST/horizon
+GLANCE_DIR=$DEST/glance
+GLANCECLIENT_DIR=$DEST/python-glanceclient
+NOVACLIENT_DIR=$DEST/python-novaclient
+OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
+NOVNC_DIR=$DEST/noVNC
+SWIFT_DIR=$DEST/swift
+SWIFT3_DIR=$DEST/swift3
+SWIFTCLIENT_DIR=$DEST/python-swiftclient
+QUANTUM_DIR=$DEST/quantum
+QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
+
+# Default Quantum Plugin
+Q_PLUGIN=${Q_PLUGIN:-openvswitch}
+# Default Quantum Port
+Q_PORT=${Q_PORT:-9696}
+# Default Quantum Host
+Q_HOST=${Q_HOST:-localhost}
+# Which Quantum API nova should use
+# Default admin username
+Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
+# Default auth strategy
+Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
+# Use namespace or not
+Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
+# Meta data IP
+Q_META_DATA_IP=${Q_META_DATA_IP:-}
+
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+
+# Nova supports pluggable schedulers. The default ``FilterScheduler``
+# should work in most cases.
+SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
+
# Generic helper to configure passwords
function read_password {
set +o xtrace
@@ -419,8 +415,6 @@
fi
PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
-NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
NET_MAN=${NET_MAN:-FlatDHCPManager}
EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
@@ -568,14 +562,6 @@
# Set the tenant for service accounts in Keystone
SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-# Set Keystone interface configuration
-KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
-KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
-KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
-KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
-KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST}
-KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000}
-KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http}
# Horizon
@@ -791,10 +777,11 @@
# Check Out Source
# ----------------
+install_keystoneclient
+
git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
# Check out the client libs that are used most
-git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH
@@ -802,7 +789,7 @@
# glance, swift middleware and nova api needs keystone middleware
if is_service_enabled key g-api n-api swift; then
# unified auth system (manages accounts/tokens)
- git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
+ install_keystone
fi
if is_service_enabled swift; then
# storage service
@@ -849,11 +836,11 @@
# Set up our checkouts so they are installed into python path
# allowing ``import nova`` or ``import glance.client``
-setup_develop $KEYSTONECLIENT_DIR
+configure_keystoneclient
setup_develop $NOVACLIENT_DIR
setup_develop $OPENSTACKCLIENT_DIR
if is_service_enabled key g-api n-api swift; then
- setup_develop $KEYSTONE_DIR
+ configure_keystone
fi
if is_service_enabled swift; then
setup_develop $SWIFT_DIR
@@ -984,6 +971,36 @@
screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+# Keystone
+# --------
+
+if is_service_enabled key; then
+ configure_keystone
+ init_keystone
+ start_keystone
+ echo "Waiting for keystone to start..."
+ if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
+ echo "keystone did not start"
+ exit 1
+ fi
+
+ # ``keystone_data.sh`` creates services, admin and demo users, and roles.
+ SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+
+ ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
+ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
+ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
+ DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \
+ bash -x $FILES/keystone_data.sh
+
+ # Set up auth creds now that keystone is bootstrapped
+ export OS_AUTH_URL=$SERVICE_ENDPOINT
+ export OS_TENANT_NAME=admin
+ export OS_USERNAME=admin
+ export OS_PASSWORD=$ADMIN_PASSWORD
+fi
+
+
# Horizon
# -------
@@ -1196,6 +1213,12 @@
# Example: LB_PHYSICAL_INTERFACE=eth1
LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+ # With the openvswitch plugin, set to True in localrc to enable
+ # provider GRE tunnels when ENABLE_TENANT_TUNNELS is False.
+ #
+ # Example: OVS_ENABLE_TUNNELING=True
+ OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
# Put config files in ``/etc/quantum`` for everyone to find
if [[ ! -d /etc/quantum ]]; then
sudo mkdir -p /etc/quantum
@@ -1272,6 +1295,11 @@
if [[ "$OVS_VLAN_RANGES" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
fi
+
+ # Enable tunnel networks if selected
+ if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+ fi
elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
@@ -1302,7 +1330,7 @@
quantum_setup_ovs_bridge $OVS_BRIDGE
# Setup agent for tunneling
- if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+ if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
# Verify tunnels are supported
# REVISIT - also check kernel module support for GRE and patch ports
OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
@@ -1311,6 +1339,7 @@
echo "OVS 1.4+ is required for tunneling between multiple hosts."
exit 1
fi
+ iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
fi
@@ -1432,28 +1461,23 @@
cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
-# If Nova ships the new rootwrap filters files, deploy them
-# (owned by root) and add a parameter to ``$NOVA_ROOTWRAP``
-ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP"
-if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then
- # Wipe any existing rootwrap.d files first
- if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
- sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
- fi
- # Deploy filters to /etc/nova/rootwrap.d
- sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
- sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
- sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
- sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
- # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
- sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
- sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
- sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
- sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
- # Specify rootwrap.conf as first parameter to nova-rootwrap
- NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf"
- ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *"
+# Deploy new rootwrap filters files (owned by root).
+# Wipe any existing rootwrap.d files first
+if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
+ sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
fi
+# Deploy filters to /etc/nova/rootwrap.d
+sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
+sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
+sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
+sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
+# Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
+sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
+sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
+sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
+sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
+# Specify rootwrap.conf as first parameter to nova-rootwrap
+ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
# Set up the rootwrap sudoers for nova
TEMPFILE=`mktemp`
@@ -1915,7 +1939,7 @@
add_nova_opt "verbose=True"
add_nova_opt "auth_strategy=keystone"
add_nova_opt "allow_resize_to_same_host=True"
-add_nova_opt "root_helper=sudo $NOVA_ROOTWRAP"
+add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf"
add_nova_opt "compute_scheduler_driver=$SCHEDULER"
add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF_DIR/$NOVA_CONF"
add_nova_opt "fixed_range=$FIXED_RANGE"
@@ -2113,118 +2137,16 @@
fi
fi
-if is_service_enabled key; then
- # (Re)create keystone database
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
- mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
-
- KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
- KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
- KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
-
- if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
- sudo mkdir -p $KEYSTONE_CONF_DIR
- sudo chown `whoami` $KEYSTONE_CONF_DIR
- fi
-
- if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
- cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
- cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
- fi
-
- # Rewrite stock ``keystone.conf``
- iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
- iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
- iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
- sed -e "
- /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
- " -i $KEYSTONE_CONF
- # Append the S3 bits
- iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
-
- if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
- # Configure ``keystone.conf`` to use sql
- iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
- inicomment $KEYSTONE_CONF catalog template_file
- else
- KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
- cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
-
- # Add swift endpoints to service catalog if swift is enabled
- if is_service_enabled swift; then
- echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
- fi
-
- # Add quantum endpoints to service catalog if quantum is enabled
- if is_service_enabled quantum; then
- echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:$Q_PORT/" >> $KEYSTONE_CATALOG
- echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
- fi
-
- sudo sed -e "
- s,%SERVICE_HOST%,$SERVICE_HOST,g;
- s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
- " -i $KEYSTONE_CATALOG
-
- # Configure ``keystone.conf`` to use templates
- iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
- iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
- fi
-
- # Set up logging
- LOGGING_ROOT="devel"
- if [ "$SYSLOG" != "False" ]; then
- LOGGING_ROOT="$LOGGING_ROOT,production"
- fi
- KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
- cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
- iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
- iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
-
- # Initialize keystone database
- $KEYSTONE_DIR/bin/keystone-manage db_sync
-
- # Set up certificates
- $KEYSTONE_DIR/bin/keystone-manage pki_setup
-
- # Launch keystone and wait for it to answer before continuing
- screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
- echo "Waiting for keystone to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
- echo "keystone did not start"
- exit 1
- fi
- # ``keystone_data.sh`` creates services, admin and demo users, and roles.
- SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
-
- ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
- SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
- S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
- DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \
- bash -x $FILES/keystone_data.sh
-
- # Set up auth creds now that keystone is bootstrapped
- export OS_AUTH_URL=$SERVICE_ENDPOINT
- export OS_TENANT_NAME=admin
- export OS_USERNAME=admin
- export OS_PASSWORD=$ADMIN_PASSWORD
-
- # Create an access key and secret key for nova ec2 register image
- if is_service_enabled swift3 && is_service_enabled nova; then
- NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
- NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
- CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
- ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
- SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
- add_nova_opt "s3_access_key=$ACCESS_KEY"
- add_nova_opt "s3_secret_key=$SECRET_KEY"
- add_nova_opt "s3_affix_tenant=True"
- fi
+# Create an access key and secret key for nova ec2 register image
+if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
+ NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+ NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
+ CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
+ ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
+ SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
+ add_nova_opt "s3_access_key=$ACCESS_KEY"
+ add_nova_opt "s3_secret_key=$SECRET_KEY"
+ add_nova_opt "s3_affix_tenant=True"
fi
screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver"
diff --git a/tests/functions.sh b/tests/functions.sh
index f111a48..3a0f319 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -239,3 +239,44 @@
test_disable_negated_services 'b,a,-a' 'b'
test_disable_negated_services 'a,b,-a' 'b'
test_disable_negated_services 'a,-a,b' 'b'
+
+
+echo "Testing is_package_installed()"
+
+if [[ -z "$os_PACKAGE" ]]; then
+ GetOSVersion
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+ is_package_installed dpkg
+ VAL=$?
+else
+ is_package_installed rpm
+ VAL=$?
+fi
+if [[ "$VAL" -eq 0 ]]; then
+ echo "OK"
+else
+ echo "is_package_installed() on existing package failed"
+fi
+
+if [[ "$os_PACKAGE" = "deb" ]]; then
+ is_package_installed dpkg bash
+ VAL=$?
+else
+ is_package_installed rpm bash
+ VAL=$?
+fi
+if [[ "$VAL" -eq 0 ]]; then
+ echo "OK"
+else
+ echo "is_package_installed() on more than one existing package failed"
+fi
+
+is_package_installed zzzZZZzzz
+VAL=$?
+if [[ "$VAL" -ne 0 ]]; then
+ echo "OK"
+else
+ echo "is_package_installed() on non-existing package failed"
+fi