Merge "Changes to NVP plugin configuration file"
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 151e7e2..cf16cdb 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -3,6 +3,7 @@
 gcc
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
+nodejs # NOPRIME
 pylint
 python-anyjson
 python-BeautifulSoup
diff --git a/files/rpms/quantum b/files/rpms/quantum
index 32c6f62..8827d5a 100644
--- a/files/rpms/quantum
+++ b/files/rpms/quantum
@@ -4,6 +4,7 @@
 iptables
 iputils
 mysql-server # NOPRIME
+openvswitch # NOPRIME
 python-boto
 python-eventlet
 python-greenlet
diff --git a/functions b/functions
index 669fa69..dfde7dc 100644
--- a/functions
+++ b/functions
@@ -1413,6 +1413,10 @@
     else
         which pip
     fi
+
+    if [ $? -ne 0 ]; then
+        die $LINENO "Unable to find pip; cannot continue"
+    fi
 }
 
 # Path permissions sanity check
diff --git a/lib/ceilometer b/lib/ceilometer
index 90a1884..50060a7 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -91,6 +91,8 @@
     iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
 
+    iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
+
     configure_mongodb
 
     cleanup_ceilometer
diff --git a/lib/horizon b/lib/horizon
index 1ee530e..ab11399 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -73,31 +73,6 @@
     fi
 }
 
-# Basic install of upstream nodejs for platforms that want it
-function install_nodejs() {
-    if [[ $(which node) ]]; then
-        echo "You already appear to have nodejs, skipping install"
-        return
-    fi
-
-    # There are several node deployment scripts; one may be more
-    # appropriate at some future point, but for now direct download is
-    # the simplest way.  The version barely matters for lesscss which
-    # doesn't use anything fancy.
-    local ver=0.10.1
-    local nodejs=node-v${ver}-linux-x64
-    local tar=$nodejs.tar.gz
-    local nodejs_url=http://nodejs.org/dist/v${ver}/${tar}
-
-    curl -Ss ${nodejs_url} | tar -C ${DEST} -xz
-    if [ $? -ne 0 ]; then
-        echo "*** Download of nodejs failed"
-        return 1
-    fi
-
-    # /usr/bin so it gets found in the PATH available to horizon
-    sudo ln -s $DEST/$nodejs/bin/node /usr/bin/node
-}
 
 # Entry Points
 # ------------
@@ -105,15 +80,7 @@
 # cleanup_horizon() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_horizon() {
-
-    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
-    # if the /usr/bin/node link looks like it's pointing into $DEST,
-    # then we installed it via install_nodejs
-        if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then
-            sudo rm /usr/bin/node
-        fi
-    fi
-
+    :
 }
 
 # configure_horizon() - Set config files, create data dirs, etc
@@ -199,21 +166,12 @@
         exit_distro_not_supported "apache installation"
     fi
 
-    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
-        # RHEL6 currently has no native way to get nodejs, so we do a
-        # basic install here (see cleanup_horizon too).
-        # TODO: does nova have a better way that we can limit
-        # requirement of site-wide nodejs install?
-        install_nodejs
-    fi
-
     # NOTE(sdague) quantal changed the name of the node binary
     if is_ubuntu; then
         if [[ ! -e "/usr/bin/node" ]]; then
             install_package nodejs-legacy
         fi
-    elif is_fedora && [[ "$os_RELEASE" -ge "18" ]]; then
-        # fedora 18 and higher gets nodejs
+    elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then
         install_package nodejs
     fi
 
diff --git a/lib/nova b/lib/nova
index c38f50c..809f56c 100644
--- a/lib/nova
+++ b/lib/nova
@@ -80,7 +80,10 @@
     PUBLIC_INTERFACE_DEFAULT=eth3
     GUEST_INTERFACE_DEFAULT=eth1
     # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
-    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+    FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+    if is_service_enabled quantum; then
+        XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+    fi
 elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
     NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
     PUBLIC_INTERFACE_DEFAULT=eth0
@@ -436,6 +439,7 @@
     iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
+    iniset $NOVA_CONF DEFAULT osapi_v3_enabled "True"
 
     if is_service_enabled n-api; then
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
diff --git a/lib/quantum b/lib/quantum
index c36a743..d85c648 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -386,6 +386,11 @@
     screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
     screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
 
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        # For XenServer, start an agent for the domU openvswitch
+        screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+    fi
+
     if is_service_enabled q-lbaas; then
         screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
     fi
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index b4b52e9..980df5f 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -74,6 +74,11 @@
     if [[ "$LB_VLAN_RANGES" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
     fi
+    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+    else
+        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+    fi
 }
 
 function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
index 411f5a4..608e267 100644
--- a/lib/quantum_plugins/nec
+++ b/lib/quantum_plugins/nec
@@ -82,6 +82,8 @@
     iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER
     iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX
     iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL
+
+    _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 3741720..d5d4f10 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -11,7 +11,9 @@
     _quantum_ovs_base_configure_nova_vif_driver
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
-        iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
+        iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE
+        # Disable nova's firewall so that it does not conflict with quantum
+        iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
     fi
 }
 
@@ -71,6 +73,10 @@
     AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        # Make a copy of our config for domU
+        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+
+        # Deal with Dom0's L2 Agent:
         Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
 
         # For now, duplicate the xen configuration already found in nova.conf
@@ -83,29 +89,25 @@
         # that executes commands on dom0 via a XenAPI plugin.
         iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND"
 
-        # FLAT_NETWORK_BRIDGE is the dom0 integration bridge.  To
-        # ensure the bridge lacks direct connectivity, set
-        # VM_VLAN=-1;VM_DEV=invalid in localrc
-        iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE
+        # Set "physical" mapping
+        iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
 
-        # The ovs agent needs to ensure that the ports associated with
-        # a given network share the same local vlan tag.  On
-        # single-node XS/XCP, this requires monitoring both the dom0
-        # bridge, where VM's are attached, and the domU bridge, where
-        # dhcp servers are attached.
-        if is_service_enabled q-dhcp; then
-            iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE
-            # DomU will use the regular rootwrap
-            iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND"
-            # Plug the vm interface into the domU integration bridge.
-            sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT
-            sudo ip link set $OVS_BRIDGE up
-            # Assign the VM IP only if it has been set explicitly
-            if [[ "$VM_IP" != "" ]]; then
-                sudo ip addr add $VM_IP dev $OVS_BRIDGE
-            fi
-            sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT
-        fi
+        # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0
+        iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $XEN_INTEGRATION_BRIDGE
+
+        # Set up domU's L2 agent:
+
+        # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
+        sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT"
+        # Add $GUEST_INTERFACE_DEFAULT to that bridge
+        sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+
+        # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+        # Set integration bridge to domU's
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" OVS integration_bridge $OVS_BRIDGE
+        # Set root wrap
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" AGENT root_helper "$Q_RR_COMMAND"
     fi
 }
 
@@ -135,6 +137,8 @@
     if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
     fi
+
+    _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_setup_interface_driver() {
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index c482747..dcdccb7 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -57,6 +57,8 @@
 
 function quantum_plugin_configure_service() {
     iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+
+    _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_setup_interface_driver() {
diff --git a/openrc b/openrc
index 2d5d48a..f1026a5 100644
--- a/openrc
+++ b/openrc
@@ -20,6 +20,9 @@
 # Find the other rc files
 RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
 
+# Import common functions
+source $RC_DIR/functions
+
 # Load local configuration
 source $RC_DIR/stackrc
 
diff --git a/rejoin-stack.sh b/rejoin-stack.sh
index a82c73c..c452694 100755
--- a/rejoin-stack.sh
+++ b/rejoin-stack.sh
@@ -5,13 +5,15 @@
 
 TOP_DIR=`dirname $0`
 
+source $TOP_DIR/stackrc
+
 # if screenrc exists, run screen
 if [[ -e $TOP_DIR/stack-screenrc ]]; then
     if screen -ls | egrep -q "[0-9].stack"; then
         echo "Attaching to already started screen session.."
         exec screen -r stack
     fi
-    exec screen -c $TOP_DIR/stack-screenrc
+    exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME
 fi
 
 echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?"
diff --git a/stack.sh b/stack.sh
index 16533df..92d17b8 100755
--- a/stack.sh
+++ b/stack.sh
@@ -32,6 +32,12 @@
 # and ``DISTRO``
 GetDistro
 
+
+# Configure non-default repos
+# ===========================
+
+# Repo configuration needs to occur before package installation.
+
 # Some dependencies are not available in Debian Wheezy official
 # repositories. However, it's possible to run OpenStack from gplhost
 # repository.
@@ -42,6 +48,28 @@
     apt_get install --force-yes gplhost-archive-keyring
 fi
 
+# Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
+RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
+RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
+# RHEL6 requires EPEL for many Open Stack dependencies
+RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+
+    if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+        echo "RDO repo not detected; installing"
+        yum_install $RHEL6_RDO_REPO_RPM || \
+            die $LINENO "Error installing RDO repo, cannot continue"
+    fi
+
+    if ! yum repolist enabled epel | grep -q 'epel'; then
+        echo "EPEL not detected; installing"
+        yum_install ${RHEL6_EPEL_RPM} || \
+            die $LINENO "Error installing EPEL repo, cannot continue"
+    fi
+
+fi
+
 # Global Settings
 # ===============
 
@@ -125,7 +153,6 @@
 # and the specified rpc backend is available on your platform.
 check_rpc_backend
 
-SCREEN_NAME=${SCREEN_NAME:-stack}
 # Check to see if we are already running DevStack
 # Note that this may fail if USE_SCREEN=False
 if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
diff --git a/stackrc b/stackrc
index edf5a82..2ac564c 100644
--- a/stackrc
+++ b/stackrc
@@ -245,6 +245,9 @@
 # Compatibility until it's eradicated from CI
 USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
 
+# Set default screen name
+SCREEN_NAME=${SCREEN_NAME:-stack}
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tools/xen/functions b/tools/xen/functions
index c6e484d..ebfd483 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -94,6 +94,14 @@
     done
 }
 
+function _vm_uuid() {
+    local vm_name_label
+
+    vm_name_label="$1"
+
+    xe vm-list name-label="$vm_name_label" --minimal
+}
+
 function _create_new_network() {
     local name_label
     name_label=$1
@@ -123,6 +131,32 @@
     ! [ -z $(xe network-list bridge="$bridge" --minimal) ]
 }
 
+function _network_uuid() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    if _bridge_exists "$bridge_or_net_name"; then
+        xe network-list bridge="$bridge_or_net_name" --minimal
+    else
+        xe network-list name-label="$bridge_or_net_name" --minimal
+    fi
+}
+
+function add_interface() {
+    local vm_name_label
+    local bridge_or_network_name
+
+    vm_name_label="$1"
+    bridge_or_network_name="$2"
+    device_number="$3"
+
+    local vm
+    local net
+
+    vm=$(_vm_uuid "$vm_name_label")
+    net=$(_network_uuid "$bridge_or_network_name")
+    xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
+}
 
 function setup_network() {
     local bridge_or_net_name
@@ -174,3 +208,19 @@
 
     compgen -v | grep "$parameter_name"
 }
+
+function append_kernel_cmdline()
+{
+    local vm_name_label
+    local kernel_args
+
+    vm_name_label="$1"
+    kernel_args="$2"
+
+    local vm
+    local pv_args
+
+    vm=$(_vm_uuid "$vm_name_label")
+    pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm)
+    xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 161d7e7..a744869 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -71,6 +71,12 @@
 setup_network "$MGT_BRIDGE_OR_NET_NAME"
 setup_network "$PUB_BRIDGE_OR_NET_NAME"
 
+# With quantum, one more network is required, which is internal to the
+# hypervisor, and used by the VMs
+if is_service_enabled quantum; then
+    setup_network "$XEN_INT_BRIDGE_OR_NET_NAME"
+fi
+
 if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
     cat >&2 << EOF
 ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file
@@ -194,15 +200,13 @@
 
     # create a new VM with the given template
     # creating the correct VIFs and metadata
-    FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
     $THIS_DIR/scripts/install-os-vpx.sh \
         -t "$UBUNTU_INST_TEMPLATE_NAME" \
         -v "$VM_BRIDGE_OR_NET_NAME" \
         -m "$MGT_BRIDGE_OR_NET_NAME" \
         -p "$PUB_BRIDGE_OR_NET_NAME" \
         -l "$GUEST_NAME" \
-        -r "$OSDOMU_MEM_MB" \
-        -k "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+        -r "$OSDOMU_MEM_MB"
 
     # wait for install to finish
     wait_for_VM_to_halt
@@ -240,11 +244,25 @@
 #
 $THIS_DIR/build_xva.sh "$GUEST_NAME"
 
+# Attach a network interface for the integration network (so that the bridge
+# is created by XenServer). This is required for Quantum. Also pass that as a
+# kernel parameter for DomU
+if is_service_enabled quantum; then
+    add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" "4"
+
+    XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME")
+    append_kernel_cmdline \
+        "$GUEST_NAME" \
+        "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}"
+fi
+
+FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
+append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+
 # create a snapshot before the first boot
 # to allow a quick re-run with the same settings
 xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
 
-
 #
 # Run DevStack VM
 #
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 6105a1e..c82f870 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -25,7 +25,6 @@
 DATA_VDI_SIZE="500MiB"
 BRIDGE_M=
 BRIDGE_P=
-KERNEL_PARAMS=
 VPX_FILE=os-vpx.xva
 AS_TEMPLATE=
 FROM_TEMPLATE=
@@ -38,7 +37,7 @@
 cat << EOF
 
   Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME]
-            [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
+            [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
 
   Installs XenServer OpenStack VPX.
 
@@ -57,7 +56,6 @@
                   Defaults to xenbr0.
      -v bridge    Specifies the bridge for the vm network
      -p bridge    Specifies the bridge for the externally facing network.
-     -k params    Specifies kernel parameters.
      -r MiB       Specifies RAM used by the VPX, in MiB.
                   By default it will take the value from the XVA.
      -l name      Specifies the name label for the VM.
@@ -81,15 +79,12 @@
      using the default for management traffic:
             install-os-vpx.sh -m xapi4
 
-     Create a VPX that automatically becomes the master:
-            install-os-vpx.sh -k geppetto_master=true
-
 EOF
 }
 
 get_params()
 {
-  while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION;
+  while getopts "hicwbf:d:v:m:p:r:l:t:" OPTION;
   do
     case $OPTION in
       h) usage
@@ -119,9 +114,6 @@
       p)
          BRIDGE_P=$OPTARG
          ;;
-      k)
-         KERNEL_PARAMS=$OPTARG
-         ;;
       r)
          RAM=$OPTARG
          ;;
@@ -328,20 +320,6 @@
 }
 
 
-set_kernel_params()
-{
-  local v="$1"
-  local args=$KERNEL_PARAMS
-  if [ "$args" != "" ]
-  then
-    echo "Passing Geppetto args to VPX: $args."
-    pvargs=$(xe vm-param-get param-name=PV-args uuid="$v")
-    args="$pvargs $args"
-    xe vm-param-set PV-args="$args" uuid="$v"
-  fi
-}
-
-
 set_memory()
 {
   local v="$1"
@@ -367,7 +345,6 @@
 set_all()
 {
   local v="$1"
-  set_kernel_params "$v"
   set_memory "$v"
   set_auto_start "$v"
   label_system_disk "$v"
@@ -430,7 +407,6 @@
   create_vm_vif "$vm_uuid"
   create_management_vif "$vm_uuid"
   create_public_vif "$vm_uuid"
-  set_kernel_params "$vm_uuid"
   xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
   xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
   set_memory "$vm_uuid"
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 7aaafd2..0ed3a6a 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -25,6 +25,7 @@
 MGT_BRIDGE_OR_NET_NAME="xenbr0"
 VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
 PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
+XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network"
 
 # VM Password
 GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}