Merge "Add missing .debs required by Debian"
diff --git a/.mailmap b/.mailmap
index a49875d..29be995 100644
--- a/.mailmap
+++ b/.mailmap
@@ -2,4 +2,5 @@
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
-Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
\ No newline at end of file
+Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
+Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
diff --git a/clean.sh b/clean.sh
index cf24f27..ffc462c 100755
--- a/clean.sh
+++ b/clean.sh
@@ -19,7 +19,9 @@
source $TOP_DIR/stackrc
# Get the variables that are set in stack.sh
-source $TOP_DIR/.stackenv
+if [[ -r $TOP_DIR/.stackenv ]]; then
+ source $TOP_DIR/.stackenv
+fi
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -88,4 +90,4 @@
# FIXED_IP_ADDR in br100
# Clean up files
-#rm -f .stackenv
+rm -f $TOP_DIR/.stackenv
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 151e7e2..cf16cdb 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -3,6 +3,7 @@
gcc
httpd # NOPRIME
mod_wsgi # NOPRIME
+nodejs # NOPRIME
pylint
python-anyjson
python-BeautifulSoup
diff --git a/functions b/functions
index 669fa69..dfde7dc 100644
--- a/functions
+++ b/functions
@@ -1413,6 +1413,10 @@
else
which pip
fi
+
+ if [ $? -ne 0 ]; then
+ die $LINENO "Unable to find pip; cannot continue"
+ fi
}
# Path permissions sanity check
diff --git a/lib/ceilometer b/lib/ceilometer
index 90a1884..bd4ab0f 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -91,6 +91,8 @@
iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
+ iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
+
configure_mongodb
cleanup_ceilometer
@@ -125,10 +127,10 @@
# start_ceilometer() - Start running processes, including screen
function start_ceilometer() {
- screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg $LIBVIRT_GROUP \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
- screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
- screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
- screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-acompute "sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
+ screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
+ screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
}
# stop_ceilometer() - Stop running processes
diff --git a/lib/horizon b/lib/horizon
index 1ee530e..ab11399 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -73,31 +73,6 @@
fi
}
-# Basic install of upstream nodejs for platforms that want it
-function install_nodejs() {
- if [[ $(which node) ]]; then
- echo "You already appear to have nodejs, skipping install"
- return
- fi
-
- # There are several node deployment scripts; one may be more
- # appropriate at some future point, but for now direct download is
- # the simplest way. The version barely matters for lesscss which
- # doesn't use anything fancy.
- local ver=0.10.1
- local nodejs=node-v${ver}-linux-x64
- local tar=$nodejs.tar.gz
- local nodejs_url=http://nodejs.org/dist/v${ver}/${tar}
-
- curl -Ss ${nodejs_url} | tar -C ${DEST} -xz
- if [ $? -ne 0 ]; then
- echo "*** Download of nodejs failed"
- return 1
- fi
-
- # /usr/bin so it gets found in the PATH available to horizon
- sudo ln -s $DEST/$nodejs/bin/node /usr/bin/node
-}
# Entry Points
# ------------
@@ -105,15 +80,7 @@
# cleanup_horizon() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_horizon() {
-
- if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
- # if the /usr/bin/node link looks like it's pointing into $DEST,
- # then we installed it via install_nodejs
- if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then
- sudo rm /usr/bin/node
- fi
- fi
-
+ :
}
# configure_horizon() - Set config files, create data dirs, etc
@@ -199,21 +166,12 @@
exit_distro_not_supported "apache installation"
fi
- if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
- # RHEL6 currently has no native way to get nodejs, so we do a
- # basic install here (see cleanup_horizon too).
- # TODO: does nova have a better way that we can limit
- # requirement of site-wide nodejs install?
- install_nodejs
- fi
-
# NOTE(sdague) quantal changed the name of the node binary
if is_ubuntu; then
if [[ ! -e "/usr/bin/node" ]]; then
install_package nodejs-legacy
fi
- elif is_fedora && [[ "$os_RELEASE" -ge "18" ]]; then
- # fedora 18 and higher gets nodejs
+ elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then
install_package nodejs
fi
diff --git a/lib/nova b/lib/nova
index c38f50c..be526cf 100644
--- a/lib/nova
+++ b/lib/nova
@@ -80,7 +80,10 @@
PUBLIC_INTERFACE_DEFAULT=eth3
GUEST_INTERFACE_DEFAULT=eth1
# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
- FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+ FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+ if is_service_enabled quantum; then
+ XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+ fi
elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
PUBLIC_INTERFACE_DEFAULT=eth0
@@ -436,6 +439,13 @@
iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
+ iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True"
+
+ if is_fedora; then
+ # nova defaults to /usr/local/bin, but fedora pip likes to
+ # install things in /usr/bin
+ iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
+ fi
if is_service_enabled n-api; then
iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
diff --git a/lib/quantum b/lib/quantum
index c36a743..d85c648 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -386,6 +386,11 @@
screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
+ if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ # For XenServer, start an agent for the domU openvswitch
+ screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+ fi
+
if is_service_enabled q-lbaas; then
screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
fi
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index b4b52e9..980df5f 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -74,6 +74,11 @@
if [[ "$LB_VLAN_RANGES" != "" ]]; then
iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
fi
+ if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+ else
+ iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+ fi
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
index 411f5a4..608e267 100644
--- a/lib/quantum_plugins/nec
+++ b/lib/quantum_plugins/nec
@@ -82,6 +82,8 @@
iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER
iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX
iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index fc06b55..7795eed 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -14,9 +14,6 @@
# Get the first controller
controllers=(${NVP_CONTROLLERS//,/ })
OVS_MGR_IP=${controllers[0]}
- elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then
- conn=(${NVP_CONTROLLER_CONNECTION//\:/ })
- OVS_MGR_IP=${conn[0]}
else
die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
fi
@@ -83,55 +80,43 @@
iniset /$Q_PLUGIN_CONF_FILE NVP concurrent_connections $CONCURRENT_CONNECTIONS
fi
- if [[ "$DEFAULT_CLUSTER" != "" ]]; then
- # Make name shorter for sake of readability
- DC=$DEFAULT_CLUSTER
- if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID
- else
- die $LINENO "The nicira plugin won't work without a default transport zone."
- fi
- if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
- Q_L3_ENABLED=True
- Q_L3_ROUTER_PER_TENANT=True
- iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True
- else
- echo "WARNING - No l3 gw service enabled. You will not be able to use the L3 API extension"
- fi
- if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
- fi
- # NVP_CONTROLLERS must be a comma separated string
- if [[ "$NVP_CONTROLLERS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controllers $NVP_CONTROLLERS
- elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then
- # Only 1 controller can be specified in this case
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION
- else
- die $LINENO "The nicira plugin needs at least an NVP controller."
- fi
- if [[ "$NVP_USER" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER
- fi
- if [[ "$NVP_PASSWORD" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_password $NVP_PASSWORD
- fi
- if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" req_timeout $NVP_REQ_TIMEOUT
- fi
- if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" http_timeout $NVP_HTTP_TIMEOUT
- fi
- if [[ "$NVP_RETRIES" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" retries $NVP_RETRIES
- fi
- if [[ "$NVP_REDIRECTS" != "" ]]; then
- iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" redirects $NVP_REDIRECTS
- fi
+ if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
else
- echo "ERROR - Default cluster not configured. Quantum will not start"
- exit 1
+ die $LINENO "The nicira plugin won't work without a default transport zone."
+ fi
+ if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
+ Q_L3_ENABLED=True
+ Q_L3_ROUTER_PER_TENANT=True
+ iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True
+ fi
+ if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
+ fi
+ # NVP_CONTROLLERS must be a comma separated string
+ if [[ "$NVP_CONTROLLERS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS
+ else
+ die $LINENO "The nicira plugin needs at least an NVP controller."
+ fi
+ if [[ "$NVP_USER" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER
+ fi
+ if [[ "$NVP_PASSWORD" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD
+ fi
+ if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT
+ fi
+ if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT
+ fi
+ if [[ "$NVP_RETRIES" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES
+ fi
+ if [[ "$NVP_REDIRECTS" != "" ]]; then
+ iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS
fi
}
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 3741720..d5d4f10 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -11,7 +11,9 @@
_quantum_ovs_base_configure_nova_vif_driver
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
- iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
+ iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE
+ # Disable nova's firewall so that it does not conflict with quantum
+ iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
fi
}
@@ -71,6 +73,10 @@
AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+ # Make a copy of our config for domU
+ sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+
+ # Deal with Dom0's L2 Agent:
Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
# For now, duplicate the xen configuration already found in nova.conf
@@ -83,29 +89,25 @@
# that executes commands on dom0 via a XenAPI plugin.
iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND"
- # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To
- # ensure the bridge lacks direct connectivity, set
- # VM_VLAN=-1;VM_DEV=invalid in localrc
- iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE
+ # Set "physical" mapping
+ iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
- # The ovs agent needs to ensure that the ports associated with
- # a given network share the same local vlan tag. On
- # single-node XS/XCP, this requires monitoring both the dom0
- # bridge, where VM's are attached, and the domU bridge, where
- # dhcp servers are attached.
- if is_service_enabled q-dhcp; then
- iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE
- # DomU will use the regular rootwrap
- iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND"
- # Plug the vm interface into the domU integration bridge.
- sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT
- sudo ip link set $OVS_BRIDGE up
- # Assign the VM IP only if it has been set explicitly
- if [[ "$VM_IP" != "" ]]; then
- sudo ip addr add $VM_IP dev $OVS_BRIDGE
- fi
- sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT
- fi
+ # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0
+ iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE
+
+ # Set up domU's L2 agent:
+
+ # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
+ sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT"
+ # Add $GUEST_INTERFACE_DEFAULT to that bridge
+ sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+
+ # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+ # Set integration bridge to domU's
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE
+ # Set root wrap
+ iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND"
fi
}
@@ -135,6 +137,8 @@
if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
fi
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index c482747..dcdccb7 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -57,6 +57,8 @@
function quantum_plugin_configure_service() {
iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+
+ _quantum_ovs_base_configure_firewall_driver
}
function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_thirdparty/nicira b/lib/quantum_thirdparty/nicira
new file mode 100644
index 0000000..5a20934
--- /dev/null
+++ b/lib/quantum_thirdparty/nicira
@@ -0,0 +1,52 @@
+# Nicira NVP
+# ----------
+
+# This third-party addition can be used to configure connectivity between a DevStack instance
+# and an NVP Gateway in dev/test environments. In order to use this correctly, the following
+# env variables need to be set (e.g. in your localrc file):
+#
+# * enable_service nicira --> to execute this third-party addition
+# * PUBLIC_BRIDGE --> bridge used for external connectivity, typically br-ex
+# * NVP_GATEWAY_NETWORK_INTERFACE --> interface used to communicate with the NVP Gateway
+# * NVP_GATEWAY_NETWORK_CIDR --> CIDR to configure br-ex, e.g. 172.24.4.211/24
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# This is the interface that connects the Devstack instance
+# to an network that allows it to talk to the gateway for
+# testing purposes
+NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2}
+
+function configure_nicira() {
+ :
+}
+
+function init_nicira() {
+ die_if_not_set $LINENO NVP_GATEWAY_NETWORK_CIDR "Please, specify CIDR for the gateway network interface."
+ # Make sure the interface is up, but not configured
+ sudo ifconfig $NVP_GATEWAY_NETWORK_INTERFACE up
+ sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE
+ # Use the PUBLIC Bridge to route traffic to the NVP gateway
+ # NOTE(armando-migliaccio): if running in a nested environment this will work
+ # only with mac learning enabled, portsecurity and security profiles disabled
+ sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE
+ nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
+ sudo ifconfig $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR hw ether $nvp_gw_net_if_mac
+}
+
+function install_nicira() {
+ :
+}
+
+function start_nicira() {
+ :
+}
+
+function stop_nicira() {
+ :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/tempest b/lib/tempest
index e59737b..a259ee9 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -255,6 +255,11 @@
iniset $TEMPEST_CONF boto http_socket_timeout 30
iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
+ # orchestration
+ if is_service_enabled heat; then
+ iniset $TEMPEST_CONF orchestration heat_available "True"
+ fi
+
echo "Created tempest configuration file:"
cat $TEMPEST_CONF
diff --git a/openrc b/openrc
index 2d5d48a..f1026a5 100644
--- a/openrc
+++ b/openrc
@@ -20,6 +20,9 @@
# Find the other rc files
RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+# Import common functions
+source $RC_DIR/functions
+
# Load local configuration
source $RC_DIR/stackrc
diff --git a/rejoin-stack.sh b/rejoin-stack.sh
index a82c73c..c452694 100755
--- a/rejoin-stack.sh
+++ b/rejoin-stack.sh
@@ -5,13 +5,15 @@
TOP_DIR=`dirname $0`
+source $TOP_DIR/stackrc
+
# if screenrc exists, run screen
if [[ -e $TOP_DIR/stack-screenrc ]]; then
if screen -ls | egrep -q "[0-9].stack"; then
echo "Attaching to already started screen session.."
exec screen -r stack
fi
- exec screen -c $TOP_DIR/stack-screenrc
+ exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME
fi
echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?"
diff --git a/stack.sh b/stack.sh
index 9a7f2ab..1e61a3f 100755
--- a/stack.sh
+++ b/stack.sh
@@ -51,13 +51,24 @@
# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
+# RHEL6 requires EPEL for many Open Stack dependencies
+RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+
if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+
if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
echo "RDO repo not detected; installing"
- yum_install $RHEL6_RDO_REPO_RPM
+ yum_install $RHEL6_RDO_REPO_RPM || \
+ die $LINENO "Error installing RDO repo, cannot continue"
fi
-fi
+ if ! yum repolist enabled epel | grep -q 'epel'; then
+ echo "EPEL not detected; installing"
+ yum_install ${RHEL6_EPEL_RPM} || \
+ die $LINENO "Error installing EPEL repo, cannot continue"
+ fi
+
+fi
# Global Settings
# ===============
@@ -142,7 +153,6 @@
# and the specified rpc backend is available on your platform.
check_rpc_backend
-SCREEN_NAME=${SCREEN_NAME:-stack}
# Check to see if we are already running DevStack
# Note that this may fail if USE_SCREEN=False
if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
@@ -780,6 +790,22 @@
EOF
sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
fi
+
+ RSYSLOGCONF="/etc/rsyslog.conf"
+ if [ -f $RSYSLOGCONF ]; then
+ sudo cp -b $RSYSLOGCONF $RSYSLOGCONF.bak
+ if [[ $(grep '$SystemLogRateLimitBurst' $RSYSLOGCONF) ]]; then
+ sudo sed -i 's/$SystemLogRateLimitBurst\ .*/$SystemLogRateLimitBurst\ 0/' $RSYSLOGCONF
+ else
+ sudo sed -i '$ i $SystemLogRateLimitBurst\ 0' $RSYSLOGCONF
+ fi
+ if [[ $(grep '$SystemLogRateLimitInterval' $RSYSLOGCONF) ]]; then
+ sudo sed -i 's/$SystemLogRateLimitInterval\ .*/$SystemLogRateLimitInterval\ 0/' $RSYSLOGCONF
+ else
+ sudo sed -i '$ i $SystemLogRateLimitInterval\ 0' $RSYSLOGCONF
+ fi
+ fi
+
echo_summary "Starting rsyslog"
restart_service rsyslog
fi
@@ -1055,6 +1081,27 @@
iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD"
iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER"
+ # fake
+ # -----
+
+ elif [ "$VIRT_DRIVER" = 'fake' ]; then
+ echo_summary "Using fake Virt driver"
+ iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver"
+ # Disable arbitrary limits
+ iniset $NOVA_CONF DEFAULT quota_instances -1
+ iniset $NOVA_CONF DEFAULT quota_cores -1
+ iniset $NOVA_CONF DEFAULT quota_ram -1
+ iniset $NOVA_CONF DEFAULT quota_floating_ips -1
+ iniset $NOVA_CONF DEFAULT quota_fixed_ips -1
+ iniset $NOVA_CONF DEFAULT quota_metadata_items -1
+ iniset $NOVA_CONF DEFAULT quota_injected_files -1
+ iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1
+ iniset $NOVA_CONF DEFAULT quota_security_groups -1
+ iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
+ iniset $NOVA_CONF DEFAULT quota_key_pairs -1
+ iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter"
+
+
# Default
# -------
diff --git a/stackrc b/stackrc
index edf5a82..2ac564c 100644
--- a/stackrc
+++ b/stackrc
@@ -245,6 +245,9 @@
# Compatibility until it's eradicated from CI
USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
+# Set default screen name
+SCREEN_NAME=${SCREEN_NAME:-stack}
+
# Local variables:
# mode: shell-script
# End:
diff --git a/tools/xen/functions b/tools/xen/functions
index c6e484d..ebfd483 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -94,6 +94,14 @@
done
}
+function _vm_uuid() {
+ local vm_name_label
+
+ vm_name_label="$1"
+
+ xe vm-list name-label="$vm_name_label" --minimal
+}
+
function _create_new_network() {
local name_label
name_label=$1
@@ -123,6 +131,32 @@
! [ -z $(xe network-list bridge="$bridge" --minimal) ]
}
+function _network_uuid() {
+ local bridge_or_net_name
+ bridge_or_net_name=$1
+
+ if _bridge_exists "$bridge_or_net_name"; then
+ xe network-list bridge="$bridge_or_net_name" --minimal
+ else
+ xe network-list name-label="$bridge_or_net_name" --minimal
+ fi
+}
+
+function add_interface() {
+ local vm_name_label
+ local bridge_or_network_name
+
+ vm_name_label="$1"
+ bridge_or_network_name="$2"
+ device_number="$3"
+
+ local vm
+ local net
+
+ vm=$(_vm_uuid "$vm_name_label")
+ net=$(_network_uuid "$bridge_or_network_name")
+ xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
+}
function setup_network() {
local bridge_or_net_name
@@ -174,3 +208,19 @@
compgen -v | grep "$parameter_name"
}
+
+function append_kernel_cmdline()
+{
+ local vm_name_label
+ local kernel_args
+
+ vm_name_label="$1"
+ kernel_args="$2"
+
+ local vm
+ local pv_args
+
+ vm=$(_vm_uuid "$vm_name_label")
+ pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm)
+ xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 161d7e7..a744869 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -71,6 +71,12 @@
setup_network "$MGT_BRIDGE_OR_NET_NAME"
setup_network "$PUB_BRIDGE_OR_NET_NAME"
+# With quantum, one more network is required, which is internal to the
+# hypervisor, and used by the VMs
+if is_service_enabled quantum; then
+ setup_network "$XEN_INT_BRIDGE_OR_NET_NAME"
+fi
+
if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
cat >&2 << EOF
ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file
@@ -194,15 +200,13 @@
# create a new VM with the given template
# creating the correct VIFs and metadata
- FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
$THIS_DIR/scripts/install-os-vpx.sh \
-t "$UBUNTU_INST_TEMPLATE_NAME" \
-v "$VM_BRIDGE_OR_NET_NAME" \
-m "$MGT_BRIDGE_OR_NET_NAME" \
-p "$PUB_BRIDGE_OR_NET_NAME" \
-l "$GUEST_NAME" \
- -r "$OSDOMU_MEM_MB" \
- -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+ -r "$OSDOMU_MEM_MB"
# wait for install to finish
wait_for_VM_to_halt
@@ -240,11 +244,25 @@
#
$THIS_DIR/build_xva.sh "$GUEST_NAME"
+# Attach a network interface for the integration network (so that the bridge
+# is created by XenServer). This is required for Quantum. Also pass that as a
+# kernel parameter for DomU
+if is_service_enabled quantum; then
+ add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4"
+
+ XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME")
+ append_kernel_cmdline \
+ "$GUEST_NAME" \
+ "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}"
+fi
+
+FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
+append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+
# create a snapshot before the first boot
# to allow a quick re-run with the same settings
xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
-
#
# Run DevStack VM
#
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 6105a1e..c82f870 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -25,7 +25,6 @@
DATA_VDI_SIZE="500MiB"
BRIDGE_M=
BRIDGE_P=
-KERNEL_PARAMS=
VPX_FILE=os-vpx.xva
AS_TEMPLATE=
FROM_TEMPLATE=
@@ -38,7 +37,7 @@
cat << EOF
Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME]
- [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
+ [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
Installs XenServer OpenStack VPX.
@@ -57,7 +56,6 @@
Defaults to xenbr0.
-v bridge Specifies the bridge for the vm network
-p bridge Specifies the bridge for the externally facing network.
- -k params Specifies kernel parameters.
-r MiB Specifies RAM used by the VPX, in MiB.
By default it will take the value from the XVA.
-l name Specifies the name label for the VM.
@@ -81,15 +79,12 @@
using the default for management traffic:
install-os-vpx.sh -m xapi4
- Create a VPX that automatically becomes the master:
- install-os-vpx.sh -k geppetto_master=true
-
EOF
}
get_params()
{
- while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION;
+ while getopts "hicwbf:d:v:m:p:r:l:t:" OPTION;
do
case $OPTION in
h) usage
@@ -119,9 +114,6 @@
p)
BRIDGE_P=$OPTARG
;;
- k)
- KERNEL_PARAMS=$OPTARG
- ;;
r)
RAM=$OPTARG
;;
@@ -328,20 +320,6 @@
}
-set_kernel_params()
-{
- local v="$1"
- local args=$KERNEL_PARAMS
- if [ "$args" != "" ]
- then
- echo "Passing Geppetto args to VPX: $args."
- pvargs=$(xe vm-param-get param-name=PV-args uuid="$v")
- args="$pvargs $args"
- xe vm-param-set PV-args="$args" uuid="$v"
- fi
-}
-
-
set_memory()
{
local v="$1"
@@ -367,7 +345,6 @@
set_all()
{
local v="$1"
- set_kernel_params "$v"
set_memory "$v"
set_auto_start "$v"
label_system_disk "$v"
@@ -430,7 +407,6 @@
create_vm_vif "$vm_uuid"
create_management_vif "$vm_uuid"
create_public_vif "$vm_uuid"
- set_kernel_params "$vm_uuid"
xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
set_memory "$vm_uuid"
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 7aaafd2..0ed3a6a 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -25,6 +25,7 @@
MGT_BRIDGE_OR_NET_NAME="xenbr0"
VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
+XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network"
# VM Password
GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}