Merge "Explicitly add cosine and inetorgperson schemas on Fedora"
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index 5c4b16e..a1fb2ad 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -235,7 +235,7 @@
source $TOP_DIR/openrc $TENANT $TENANT
local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
- quantum-debug probe-create $NET_ID
+ quantum-debug probe-create --device-owner compute $NET_ID
source $TOP_DIR/openrc demo demo
}
diff --git a/functions b/functions
index f8f63c5..fe50547 100644
--- a/functions
+++ b/functions
@@ -858,26 +858,69 @@
}
+# _run_process() is designed to be backgrounded by run_process() to simulate a
+# fork. It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it(). The log filename is derived
+# from the service name and global-and-now-misnamed SCREEN_LOGDIR
+# _run_process service "command-line"
+function _run_process() {
+ local service=$1
+ local command="$2"
+
+ # Undo logging redirections and close the extra descriptors
+ exec 1>&3
+ exec 2>&3
+ exec 3>&-
+ exec 6>&-
+
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+ # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+ export PYTHONUNBUFFERED=1
+ fi
+
+ exec /bin/bash -c "$command"
+ die "$service exec failure: $command"
+}
+
+
+# run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command. This is meant to duplicate the semantics
+# of screen_it() without screen. PIDs are written to
+# $SERVICE_DIR/$SCREEN_NAME/$service.pid
+# run_process service "command-line"
+function run_process() {
+ local service=$1
+ local command="$2"
+
+ # Spawn the child process
+ _run_process "$service" "$command" &
+ echo $!
+}
+
+
# Helper to launch a service in a named screen
# screen_it service "command-line"
function screen_it {
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
- SCREEN_DEV=`trueorfalse True $SCREEN_DEV`
+ USE_SCREEN=$(trueorfalse True $USE_SCREEN)
if is_service_enabled $1; then
# Append the service to the screen rc file
screen_rc "$1" "$2"
- screen -S $SCREEN_NAME -X screen -t $1
+ if [[ "$USE_SCREEN" = "True" ]]; then
+ screen -S $SCREEN_NAME -X screen -t $1
- if [[ -n ${SCREEN_LOGDIR} ]]; then
- screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
- screen -S $SCREEN_NAME -p $1 -X log on
- ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
- fi
+ if [[ -n ${SCREEN_LOGDIR} ]]; then
+ screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+ screen -S $SCREEN_NAME -p $1 -X log on
+ ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+ fi
- if [[ "$SCREEN_DEV" = "True" ]]; then
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
@@ -886,7 +929,8 @@
NL=`echo -ne '\015'`
screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
else
- screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\""
+ # Spawn directly without screen
+ run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
fi
}
diff --git a/lib/cinder b/lib/cinder
index b3e1904..7688ad9 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -53,6 +53,11 @@
# Support for multi lvm backend configuration (default is no support)
CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
+# Should cinder perform secure deletion of volumes?
+# Defaults to true, can be set to False to avoid this bug when testing:
+# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
+CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
+
# Name of the lvm volume groups to use/create for iscsi volumes
# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
diff --git a/lib/keystone b/lib/keystone
index 17e0866..805cb6f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -59,6 +59,9 @@
KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+# Set the tenant for service accounts in Keystone
+SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
+
# Entry Points
# ------------
diff --git a/lib/nova b/lib/nova
index 23346b7..9809e62 100644
--- a/lib/nova
+++ b/lib/nova
@@ -65,6 +65,62 @@
QEMU_CONF=/etc/libvirt/qemu.conf
+NOVNC_DIR=$DEST/noVNC
+SPICE_DIR=$DEST/spice-html5
+
+
+# Nova Network Configuration
+# --------------------------
+
+# Set defaults according to the virt driver
+if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ PUBLIC_INTERFACE_DEFAULT=eth3
+ GUEST_INTERFACE_DEFAULT=eth1
+ # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
+ NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
+ PUBLIC_INTERFACE_DEFAULT=eth0
+ FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+ STUB_NETWORK=${STUB_NETWORK:-False}
+else
+ PUBLIC_INTERFACE_DEFAULT=br100
+ GUEST_INTERFACE_DEFAULT=eth0
+ FLAT_NETWORK_BRIDGE_DEFAULT=br100
+fi
+
+NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
+VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
+EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
+
+# If you are using the FlatDHCP network mode on multiple hosts, set the
+# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
+# have an IP or you risk breaking things.
+#
+# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
+# hiccup while the network is moved from the flat interface to the flat network
+# bridge. This will happen when you launch your first instance. Upon launch
+# you will lose all connectivity to the node, and the VM launch will probably
+# fail.
+#
+# If you are running on a single node and don't need to access the VMs from
+# devices other than that node, you can set ``FLAT_INTERFACE=``
+# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
+FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
+
+# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
+# allows network operations and routing for a VM to occur on the server that is
+# running the VM - removing a SPOF and bandwidth bottleneck.
+MULTI_HOST=`trueorfalse False $MULTI_HOST`
+
+# Test floating pool and range are used for testing. They are defined
+# here until the admin APIs can replace nova-manage
+TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
+TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
+
# Entry Points
# ------------
@@ -439,6 +495,49 @@
# Replace the first '=' with ' ' for iniset syntax
iniset $NOVA_CONF DEFAULT ${I/=/ }
done
+
+ # All nova-compute workers need to know the vnc configuration options
+ # These settings don't hurt anything if n-xvnc and n-novnc are disabled
+ if is_service_enabled n-cpu; then
+ NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+ iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
+ XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+ iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
+ SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
+ iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
+ fi
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
+ else
+ VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+ fi
+
+ if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
+ # Address on which instance vncservers will listen on compute hosts.
+ # For multi-host, this should be the management ip of the compute host.
+ VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
+ iniset $NOVA_CONF DEFAULT vnc_enabled true
+ iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
+ iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
+ else
+ iniset $NOVA_CONF DEFAULT vnc_enabled false
+ fi
+
+ if is_service_enabled n-spice; then
+ # Address on which instance spiceservers will listen on compute hosts.
+ # For multi-host, this should be the management ip of the compute host.
+ SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
+ SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
+ iniset $NOVA_CONF spice enabled true
+ iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
+ iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
+ else
+ iniset $NOVA_CONF spice enabled false
+ fi
+
+ iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
+ iniset_rpc_backend nova $NOVA_CONF DEFAULT
+ iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
}
# create_nova_cache_dir() - Part of the init_nova() process
@@ -450,7 +549,7 @@
}
function create_nova_conf_nova_network() {
- iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN"
+ iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
diff --git a/lib/quantum b/lib/quantum
index 862ba84..efdd43d 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -53,7 +53,7 @@
# that must be set in localrc for connectivity across hosts with
# Quantum.
#
-# With Quantum networking the NET_MAN variable is ignored.
+# With Quantum networking the NETWORK_MANAGER variable is ignored.
# Save trace setting
@@ -181,6 +181,13 @@
# Hardcoding for 1 service plugin for now
source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+# Use security group or not
+if has_quantum_plugin_security_group; then
+ Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
+else
+ Q_USE_SECGROUP=False
+fi
+
# Entry Points
# ------------
@@ -222,6 +229,11 @@
iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME"
iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT"
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+ iniset $NOVA_CONF DEFAULT security_group_api quantum
+ fi
+
# set NOVA_VIF_DRIVER and optionally set options in nova_conf
quantum_plugin_create_nova_conf
@@ -365,13 +377,13 @@
# Start running processes, including screen
function start_quantum_agents() {
# Start up the quantum agents if enabled
- screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
- screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
- screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
- screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+ screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+ screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+ screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+ screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
if is_service_enabled q-lbaas; then
- screen_it q-lbaas "python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+ screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
}
@@ -415,6 +427,7 @@
cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME`
+ iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
_quantum_setup_rootwrap
}
@@ -534,7 +547,6 @@
iniset $QUANTUM_CONF DEFAULT verbose True
iniset $QUANTUM_CONF DEFAULT debug True
- iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE
iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
@@ -569,7 +581,12 @@
sudo chown -R root:root $Q_CONF_ROOTWRAP_D
sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
# Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d
- sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+ # location moved in newer versions, prefer new location
+ if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then
+ sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE
+ else
+ sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+ fi
sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
sudo chown root:root $Q_RR_CONF_FILE
sudo chmod 0644 $Q_RR_CONF_FILE
@@ -641,9 +658,9 @@
function setup_quantum_debug() {
if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id
private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME`
- quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id
+ quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id
fi
}
diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md
index 5411de0..05bfb85 100644
--- a/lib/quantum_plugins/README.md
+++ b/lib/quantum_plugins/README.md
@@ -32,3 +32,5 @@
* ``quantum_plugin_configure_plugin_agent``
* ``quantum_plugin_configure_service``
* ``quantum_plugin_setup_interface_driver``
+* ``has_quantum_plugin_security_group``:
+ return 0 if the plugin support quantum security group otherwise return 1
diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight
index 7d3fd96..4857f49 100644
--- a/lib/quantum_plugins/bigswitch_floodlight
+++ b/lib/quantum_plugins/bigswitch_floodlight
@@ -51,5 +51,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 1 means False here
+ return 1
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade
index ac91143..6e26ad7 100644
--- a/lib/quantum_plugins/brocade
+++ b/lib/quantum_plugins/brocade
@@ -45,5 +45,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$BRCD_XTRACE
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 11bc585..324e255 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -48,6 +48,11 @@
if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
fi
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
}
@@ -76,5 +81,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index 8c150b1..6eefb02 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -141,5 +141,10 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index dda1239..ab16483 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -8,7 +8,7 @@
source $TOP_DIR/lib/quantum_plugins/ovs_base
function quantum_plugin_create_nova_conf() {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ _quantum_ovs_base_configure_nova_vif_driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
@@ -43,6 +43,7 @@
# Setup integration bridge
OVS_BRIDGE=${OVS_BRIDGE:-br-int}
_quantum_ovs_base_setup_bridge $OVS_BRIDGE
+ _quantum_ovs_base_configure_firewall_driver
# Setup agent for tunneling
if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
@@ -139,5 +140,9 @@
iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
}
+function has_quantum_plugin_security_group() {
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index ab988d9..2ada0db 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -39,6 +39,14 @@
iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
}
+function _quantum_ovs_base_configure_firewall_driver() {
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
+}
+
function _quantum_ovs_base_configure_l3_agent() {
iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
@@ -48,5 +56,15 @@
sudo ip addr flush dev $PUBLIC_BRIDGE
}
+function _quantum_ovs_base_configure_nova_vif_driver() {
+ # The hybrid VIF driver needs to be specified when Quantum Security Group
+ # is enabled (until vif_security attributes are supported in VIF extension)
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ else
+ NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+ fi
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index d1d7382..1139232 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -9,7 +9,7 @@
source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value
function quantum_plugin_create_nova_conf() {
- NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+ _quantum_ovs_base_configure_nova_vif_driver
iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE"
}
@@ -52,6 +52,8 @@
fi
iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE
AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_configure_service() {
@@ -64,5 +66,10 @@
iniset $conf_file DEFAULT ovs_use_veth True
}
+function has_quantum_plugin_security_group() {
+ # 0 means True here
+ return 0
+}
+
# Restore xtrace
$MY_XTRACE
diff --git a/lib/swift b/lib/swift
index 2c87d21..d50b554 100644
--- a/lib/swift
+++ b/lib/swift
@@ -28,6 +28,7 @@
SWIFT_DIR=$DEST/swift
SWIFTCLIENT_DIR=$DEST/python-swiftclient
SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
+SWIFT3_DIR=$DEST/swift3
# TODO: add logging to different location.
@@ -40,6 +41,12 @@
# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
+if is_service_enabled s-proxy && is_service_enabled swift3; then
+ # If we are using swift3, we can default the s3 port to swift instead
+ # of nova-objectstore
+ S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+fi
+
# DevStack will create a loop-back disk formatted as XFS to store the
# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
# kilobytes.
diff --git a/stack.sh b/stack.sh
index 14bb161..cfce6be 100755
--- a/stack.sh
+++ b/stack.sh
@@ -269,19 +269,12 @@
# Set the destination directories for OpenStack projects
HORIZON_DIR=$DEST/horizon
OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-NOVNC_DIR=$DEST/noVNC
-SPICE_DIR=$DEST/spice-html5
-SWIFT3_DIR=$DEST/swift3
-# Should cinder perform secure deletion of volumes?
-# Defaults to true, can be set to False to avoid this bug when testing:
-# https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
-CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
-# Name of the LVM volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
-VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
-INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+# Interactive Configuration
+# -------------------------
+
+# Do all interactive config up front before the logging spew begins
# Generic helper to configure passwords
function read_password {
@@ -326,66 +319,7 @@
}
-# Nova Network Configuration
-# --------------------------
-
-# FIXME: more documentation about why these are important options. Also
-# we should make sure we use the same variable names as the option names.
-
-if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- PUBLIC_INTERFACE_DEFAULT=eth3
- # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
- FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
- GUEST_INTERFACE_DEFAULT=eth1
-elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
- PUBLIC_INTERFACE_DEFAULT=eth0
- FLAT_NETWORK_BRIDGE_DEFAULT=br100
- FLAT_INTERFACE=${FLAT_INTERFACE:-eth0}
- FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False}
- NET_MAN=${NET_MAN:-FlatManager}
- STUB_NETWORK=${STUB_NETWORK:-False}
-else
- PUBLIC_INTERFACE_DEFAULT=br100
- FLAT_NETWORK_BRIDGE_DEFAULT=br100
- GUEST_INTERFACE_DEFAULT=eth0
-fi
-
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-NET_MAN=${NET_MAN:-FlatDHCPManager}
-EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True}
-
-# Test floating pool and range are used for testing. They are defined
-# here until the admin APIs can replace nova-manage
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node. This
-# allows network operations and routing for a VM to occur on the server that is
-# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=`trueorfalse False $MULTI_HOST`
-
-# If you are using the FlatDHCP network mode on multiple hosts, set the
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
-# have an IP or you risk breaking things.
-#
-# **DHCP Warning**: If your flat interface device uses DHCP, there will be a
-# hiccup while the network is moved from the flat interface to the flat network
-# bridge. This will happen when you launch your first instance. Upon launch
-# you will lose all connectivity to the node, and the VM launch will probably
-# fail.
-#
-# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set ``FLAT_INTERFACE=``
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT}
-
-## FIXME(ja): should/can we check that FLAT_INTERFACE is sane?
-
# Database Configuration
-# ----------------------
# To select between database backends, add the following to ``localrc``:
#
@@ -398,8 +332,7 @@
initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
-# RabbitMQ or Qpid
-# --------------------------
+# Queue Configuration
# Rabbit connection info
if is_service_enabled rabbit; then
@@ -407,53 +340,45 @@
read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
fi
-if is_service_enabled s-proxy; then
- # If we are using swift3, we can default the s3 port to swift instead
- # of nova-objectstore
- if is_service_enabled swift3;then
- S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+
+# Keystone
+
+if is_service_enabled key; then
+ # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
+ # just a string and is not a 'real' Keystone token.
+ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+ # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
+ read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
+ # Horizon currently truncates usernames and passwords at 20 characters
+ read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
+
+ # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
+ # service in ``localrc`` (e.g. ``enable_service ldap``).
+ # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
+ # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the
+ # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
+ # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
+ # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
+
+ # only request ldap password if the service is enabled
+ if is_service_enabled ldap; then
+ read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
fi
+fi
+
+
+# Swift
+
+if is_service_enabled s-proxy; then
# We only ask for Swift Hash if we have enabled swift service.
# ``SWIFT_HASH`` is a random unique string for a swift cluster that
# can never change.
read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
fi
-# Set default port for nova-objectstore
-S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
-
-# Keystone
-# --------
-
-# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
-# just a string and is not a 'real' Keystone token.
-read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
-# Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
-read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
-# Horizon currently truncates usernames and passwords at 20 characters
-read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
-# Keystone can now optionally install OpenLDAP by adding ldap to the list
-# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap).
-# If OpenLDAP has already been installed but you need to clear out
-# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes
-# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the
-# Keystone Identity Driver (keystone.identity.backends.ldap.Identity)
-# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap)
-# in the localrc file.
-
-
-# only request ldap password if the service is enabled
-if is_service_enabled ldap; then
- read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
-fi
-
-# Set the tenant for service accounts in Keystone
-SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
-
-# Log files
-# ---------
+# Configure logging
+# -----------------
# Draw a spinner so the user knows something is happening
function spinner() {
@@ -701,14 +626,15 @@
echo_summary "Configuring OpenStack projects"
-# Set up our checkouts so they are installed into python path
-# allowing ``import nova`` or ``import glance.client``
+# Set up our checkouts so they are installed in the python path
configure_keystoneclient
configure_novaclient
setup_develop $OPENSTACKCLIENT_DIR
+
if is_service_enabled key g-api n-api s-proxy; then
configure_keystone
fi
+
if is_service_enabled s-proxy; then
configure_swift
configure_swiftclient
@@ -716,6 +642,7 @@
setup_develop $SWIFT3_DIR
fi
fi
+
if is_service_enabled g-api n-api; then
configure_glance
fi
@@ -729,17 +656,21 @@
cleanup_nova
configure_nova
fi
+
if is_service_enabled horizon; then
configure_horizon
fi
+
if is_service_enabled quantum; then
setup_quantumclient
setup_quantum
fi
+
if is_service_enabled heat; then
configure_heat
configure_heatclient
fi
+
if is_service_enabled cinder; then
configure_cinder
fi
@@ -761,6 +692,7 @@
# don't be naive and add to existing line!
fi
+
# Syslog
# ------
@@ -800,8 +732,17 @@
# Configure screen
# ----------------
-if [ -z "$SCREEN_HARDSTATUS" ]; then
- SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+if [[ "$USE_SCREEN" == "True" ]]; then
+ # Create a new named screen to run processes in
+ screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
+ sleep 1
+
+ # Set a reasonable status bar
+ if [ -z "$SCREEN_HARDSTATUS" ]; then
+ SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+ fi
+ screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
fi
# Clear screen rc file
@@ -810,12 +751,6 @@
echo -n > $SCREENRC
fi
-# Create a new named screen to run processes in
-screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
-sleep 1
-
-# Set a reasonable status bar
-screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
# Initialize the directory for service status check
init_service_check
@@ -980,48 +915,6 @@
elif is_service_enabled n-net; then
create_nova_conf_nova_network
fi
- # All nova-compute workers need to know the vnc configuration options
- # These settings don't hurt anything if n-xvnc and n-novnc are disabled
- if is_service_enabled n-cpu; then
- NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
- iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL"
- XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
- iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL"
- SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
- iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
- fi
- if [ "$VIRT_DRIVER" = 'xenserver' ]; then
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
- else
- VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
- fi
-
- if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
- # Address on which instance vncservers will listen on compute hosts.
- # For multi-host, this should be the management ip of the compute host.
- VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
- iniset $NOVA_CONF DEFAULT vnc_enabled true
- iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN"
- iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
- else
- iniset $NOVA_CONF DEFAULT vnc_enabled false
- fi
-
- if is_service_enabled n-spice; then
- # Address on which instance spiceservers will listen on compute hosts.
- # For multi-host, this should be the management ip of the compute host.
- SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
- SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1}
- iniset $NOVA_CONF spice enabled true
- iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
- iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
- else
- iniset $NOVA_CONF spice enabled false
- fi
-
- iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
- iniset_rpc_backend nova $NOVA_CONF DEFAULT
- iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
# XenServer
@@ -1094,6 +987,7 @@
fi
fi
+
# Launch Services
# ===============
@@ -1183,6 +1077,7 @@
start_heat
fi
+
# Create account rc files
# =======================
@@ -1293,6 +1188,7 @@
# Check the status of running services
service_check
+
# Fin
# ===
diff --git a/stackrc b/stackrc
index d418a0e..34ccfa2 100644
--- a/stackrc
+++ b/stackrc
@@ -30,8 +30,8 @@
# stuffing text into the screen windows so that a developer can use
# ctrl-c, up-arrow, enter to restart the service. Starting services
# this way is slightly unreliable, and a bit slower, so this can
-# be disabled for automated testing by setting this value to false.
-SCREEN_DEV=True
+# be disabled for automated testing by setting this value to False.
+USE_SCREEN=True
# Repositories
# ------------
@@ -196,5 +196,17 @@
# 5Gb default volume backing file size
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
+
+# Set default port for nova-objectstore
+S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333}
+
+# Common network names
PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"nova"}
+
+# Compatibility until it's eradicated from CI
+USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh
index f1242ee..52b9b4e 100755
--- a/tools/build_bm_multi.sh
+++ b/tools/build_bm_multi.sh
@@ -6,7 +6,7 @@
SHELL_AFTER_RUN=no
# Variables common amongst all hosts in the cluster
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NET_MAN=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
+COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
# Helper to launch containers
function run_bm {