Merge "Install nodejs for RHEL/Fedora"
diff --git a/functions b/functions
index 669fa69..dfde7dc 100644
--- a/functions
+++ b/functions
@@ -1413,6 +1413,10 @@
else
which pip
fi
+
+ if [ $? -ne 0 ]; then
+ die $LINENO "Unable to find pip; cannot continue"
+ fi
}
# Path permissions sanity check
diff --git a/lib/ceilometer b/lib/ceilometer
index 90a1884..50060a7 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -91,6 +91,8 @@
iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
+ iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
+
configure_mongodb
cleanup_ceilometer
diff --git a/lib/nova b/lib/nova
index c38f50c..809f56c 100644
--- a/lib/nova
+++ b/lib/nova
@@ -80,7 +80,10 @@
PUBLIC_INTERFACE_DEFAULT=eth3
GUEST_INTERFACE_DEFAULT=eth1
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
- FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+ if is_service_enabled quantum; then
+ XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+ fi
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
PUBLIC_INTERFACE_DEFAULT=eth0
@@ -436,6 +439,7 @@
iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
+ iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True"
if is_service_enabled n-api; then
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
diff --git a/lib/quantum b/lib/quantum
index c36a743..d85c648 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -386,6 +386,11 @@
screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ # For XenServer, start an agent for the domU openvswitch
+ screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+ fi
+
if is_service_enabled q-lbaas; then
screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index b4b52e9..980df5f 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -74,6 +74,11 @@
if [[ "$LB_VLAN_RANGES" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
fi
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
index 411f5a4..608e267 100644
--- a/lib/quantum_plugins/nec
+++ b/lib/quantum_plugins/nec
@@ -82,6 +82,8 @@
iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER
iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX
iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 3741720..d5d4f10 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -11,7 +11,9 @@
_quantum_ovs_base_configure_nova_vif_driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
- iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
+ iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE
+ # Disable nova's firewall so that it does not conflict with quantum
+ iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
fi
}
@@ -71,6 +73,10 @@
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ # Make a copy of our config for domU
+ sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+
+ # Deal with Dom0's L2 Agent:
Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
# For now, duplicate the xen configuration already found in nova.conf
@@ -83,29 +89,25 @@
# that executes commands on dom0 via a XenAPI plugin.
iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND"
- # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To
- # ensure the bridge lacks direct connectivity, set
- # VM_VLAN=-1;VM_DEV=invalid in localrc
- iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE
+ # Set "physical" mapping
+ iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
- # The ovs agent needs to ensure that the ports associated with
- # a given network share the same local vlan tag. On
- # single-node XS/XCP, this requires monitoring both the dom0
- # bridge, where VM's are attached, and the domU bridge, where
- # dhcp servers are attached.
- if is_service_enabled q-dhcp; then
- iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE
- # DomU will use the regular rootwrap
- iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND"
- # Plug the vm interface into the domU integration bridge.
- sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT
- sudo ip link set $OVS_BRIDGE up
- # Assign the VM IP only if it has been set explicitly
- if [[ "$VM_IP" != "" ]]; then
- sudo ip addr add $VM_IP dev $OVS_BRIDGE
- fi
- sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT
- fi
+ # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0
+ iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE
+
+ # Set up domU's L2 agent:
+
+ # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
+ sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT"
+ # Add $GUEST_INTERFACE_DEFAULT to that bridge
+ sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+
+ # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+ # Set integration bridge to domU's
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE
+ # Set root wrap
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND"
fi
}
@@ -135,6 +137,8 @@
if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
fi
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index c482747..dcdccb7 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -57,6 +57,8 @@
function quantum_plugin_configure_service() {
iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/openrc b/openrc
index 2d5d48a..f1026a5 100644
--- a/openrc
+++ b/openrc
@@ -20,6 +20,9 @@
# Find the other rc files
RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+# Import common functions
+source $RC_DIR/functions
+
# Load local configuration
source $RC_DIR/stackrc
diff --git a/rejoin-stack.sh b/rejoin-stack.sh
index a82c73c..c452694 100755
--- a/rejoin-stack.sh
+++ b/rejoin-stack.sh
@@ -5,13 +5,15 @@
TOP_DIR=`dirname $0`
+source $TOP_DIR/stackrc
+
# if screenrc exists, run screen
if [[ -e $TOP_DIR/stack-screenrc ]]; then
if screen -ls | egrep -q "[0-9].stack"; then
echo "Attaching to already started screen session.."
exec screen -r stack
fi
- exec screen -c $TOP_DIR/stack-screenrc
+ exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME
fi
echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?"
diff --git a/stack.sh b/stack.sh
index 9a7f2ab..92d17b8 100755
--- a/stack.sh
+++ b/stack.sh
@@ -51,13 +51,24 @@
# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
+# RHEL6 requires EPEL for many Open Stack dependencies
+RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+
if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+
if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
echo "RDO repo not detected; installing"
- yum_install $RHEL6_RDO_REPO_RPM
+ yum_install $RHEL6_RDO_REPO_RPM || \
+ die $LINENO "Error installing RDO repo, cannot continue"
fi
-fi
+ if ! yum repolist enabled epel | grep -q 'epel'; then
+ echo "EPEL not detected; installing"
+ yum_install ${RHEL6_EPEL_RPM} || \
+ die $LINENO "Error installing EPEL repo, cannot continue"
+ fi
+
+fi
# Global Settings
# ===============
@@ -142,7 +153,6 @@
# and the specified rpc backend is available on your platform.
check_rpc_backend
-SCREEN_NAME=${SCREEN_NAME:-stack}
# Check to see if we are already running DevStack
# Note that this may fail if USE_SCREEN=False
if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
diff --git a/stackrc b/stackrc
index edf5a82..2ac564c 100644
--- a/stackrc
+++ b/stackrc
@@ -245,6 +245,9 @@
# Compatibility until it's eradicated from CI
USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
+# Set default screen name
+SCREEN_NAME=${SCREEN_NAME:-stack}
+
# Local variables:
# mode: shell-script
# End:
diff --git a/tools/xen/functions b/tools/xen/functions
index c6e484d..3458263 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -123,6 +123,32 @@
! [ -z $(xe network-list bridge="$bridge" --minimal) ]
}
+function _network_uuid() {
+ local bridge_or_net_name
+ bridge_or_net_name=$1
+
+ if _bridge_exists "$bridge_or_net_name"; then
+ xe network-list bridge="$bridge_or_net_name" --minimal
+ else
+ xe network-list name-label="$bridge_or_net_name" --minimal
+ fi
+}
+
+function add_interface() {
+ local vm_name
+ local bridge_or_network_name
+
+ vm_name="$1"
+ bridge_or_network_name="$2"
+ device_number="$3"
+
+ local vm
+ local net
+
+ vm=$(xe vm-list name-label="$vm_name" --minimal)
+ net=$(_network_uuid "$bridge_or_network_name")
+ xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
+}
function setup_network() {
local bridge_or_net_name
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 161d7e7..8b2a687 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -71,6 +71,12 @@
setup_network "$MGT_BRIDGE_OR_NET_NAME"
setup_network "$PUB_BRIDGE_OR_NET_NAME"
+# With quantum, one more network is required, which is internal to the
+# hypervisor, and used by the VMs
+if is_service_enabled quantum; then
+ setup_network "$XEN_INT_BRIDGE_OR_NET_NAME"
+fi
+
if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
cat >&2 << EOF
ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file
@@ -195,6 +201,12 @@
# create a new VM with the given template
# creating the correct VIFs and metadata
FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
+
+ KERNEL_PARAMS_FOR_QUANTUM=""
+ if is_service_enabled quantum; then
+ XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME")
+ KERNEL_PARAMS_FOR_QUANTUM="xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}"
+ fi
$THIS_DIR/scripts/install-os-vpx.sh \
-t "$UBUNTU_INST_TEMPLATE_NAME" \
-v "$VM_BRIDGE_OR_NET_NAME" \
@@ -202,7 +214,7 @@
-p "$PUB_BRIDGE_OR_NET_NAME" \
-l "$GUEST_NAME" \
-r "$OSDOMU_MEM_MB" \
- -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+ -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE} ${KERNEL_PARAMS_FOR_QUANTUM}"
# wait for install to finish
wait_for_VM_to_halt
@@ -240,11 +252,16 @@
#
$THIS_DIR/build_xva.sh "$GUEST_NAME"
+# Attach a network interface for the integration network (so that the bridge
+# is created by XenServer). This is required for Quantum.
+if is_service_enabled quantum; then
+ add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4"
+fi
+
# create a snapshot before the first boot
# to allow a quick re-run with the same settings
xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
-
#
# Run DevStack VM
#
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 7aaafd2..0ed3a6a 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -25,6 +25,7 @@
MGT_BRIDGE_OR_NET_NAME="xenbr0"
VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
+XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network"
# VM Password
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}