Add support for IPv6 tunnel endpoints

Currently, neutron tunnel endpoints must be IPv4 addresses,
i.e. $HOST_IP, although IPv6 endpoints are supported by most
drivers.

Create a TUNNEL_IP_VERSION variable to choose which host IP
to use, either HOST_IP or HOST_IPV6, and configure it in the
OVS and Linuxbridge agent driver files. The default is still
IPv4, but it can be over-ridden by specifying TUNNEL_ENDPOINT_IP
accordingly.

This behaves similar to the SERVICE_IP_VERSION option, which
can either be set to 4 or 6, but not 4+6 - the tunnel overhead
should be consistent on all systems in order not to have MTU
issues.

Must set the ML2 overlay_ip_version config option to match
else agent tunnel sync RPC will not work.

Must set the OVN external_ids:ovn-encap-ip config option to
the correct address.

Updated 'devstack-ipv6-only' job definition and verification role
that will set all services and tunnels to use IPv6 addresses.

Closes-bug: #1619476

Change-Id: I6034278dfc17b55d7863bc4db541bbdaa983a686
diff --git a/lib/neutron b/lib/neutron
index f24ccfb..1b78493 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -230,6 +230,7 @@
             mech_drivers+=",linuxbridge"
         fi
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 overlay_ip_version $TUNNEL_IP_VERSION
 
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
@@ -251,10 +252,10 @@
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
             iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
-            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $TUNNEL_ENDPOINT_IP
         elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
             iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
-            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $TUNNEL_ENDPOINT_IP
 
             if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
                 iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index e9b55b6..5e6af0f 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -246,13 +246,6 @@
     LB_PHYSICAL_INTERFACE=$default_route_dev
 fi
 
-# When Neutron tunnels are enabled it is needed to specify the
-# IP address of the end point in the local server. This IP is set
-# by default to the same IP address that the HOST IP.
-# This variable can be used to specify a different end point IP address
-# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1``
-TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP}
-
 # With the openvswitch plugin, set to True in ``localrc`` to enable
 # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
 #
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index f00feac..7343606 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -125,6 +125,7 @@
     fi
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION
 
     if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index dfd55de..24bdf92 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -99,8 +99,10 @@
 OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
 
 export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+TUNNEL_IP=$TUNNEL_ENDPOINT_IP
 if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
     OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+    TUNNEL_IP=[$TUNNEL_IP]
 fi
 
 OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
@@ -639,7 +641,7 @@
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
-        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
         # Select this chassis to host gateway routers
         if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
@@ -654,7 +656,7 @@
         if is_service_enabled ovn-controller-vtep ; then
             ovn_base_setup_bridge br-v
             vtep-ctl add-ps br-v
-            vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+            vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP
 
             enable_service ovs-vtep
             local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"