Merge "Always start iscsid for nova-compute"
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index df3c7ce..b1d88cb 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -1,39 +1,54 @@
-Configure Load-Balancer Version 2
-=================================
+Devstack with Octavia Load Balancing
+====================================
-Starting in the OpenStack Liberty release, the
-`neutron LBaaS v2 API <https://developer.openstack.org/api-ref/network/v2/index.html>`_
-is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference
-driver is based on Octavia.
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://developer.openstack.org/api-ref/load-balancer/v2/index.html
Phase 1: Create DevStack + 2 nova instances
--------------------------------------------
First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find useful.
+make sure it is updated. Install git and any other developer tools you find
+useful.
Install devstack
::
git clone https://git.openstack.org/openstack-dev/devstack
- cd devstack
+ cd devstack/tools
+ sudo ./create-stack-user.sh
+ cd ../..
+ sudo mv devstack /opt/stack
+ sudo chown -R stack.stack /opt/stack/devstack
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
-Edit your ``local.conf`` to look like
+Edit your ``/opt/stack/devstack/local.conf`` to look like
::
[[local|localrc]]
- # Load the external LBaaS plugin.
- enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
enable_plugin octavia https://git.openstack.org/openstack/octavia
+ # If you are enabling horizon, include the octavia dashboard
+ # enable_plugin octavia-dashboard https://git.openstack.org/openstack/octavia-dashboard.git
+ # If you are enabling barbican for TLS offload in Octavia, include it here.
+ # enable_plugin barbican https://github.com/openstack/barbican.git
+
+ # If you have python3 available:
+ # USE_PYTHON3=True
# ===== BEGIN localrc =====
DATABASE_PASSWORD=password
ADMIN_PASSWORD=password
SERVICE_PASSWORD=password
+ SERVICE_TOKEN=password
RABBIT_PASSWORD=password
# Enable Logging
LOGFILE=$DEST/logs/stack.sh.log
@@ -41,27 +56,30 @@
LOG_COLOR=True
# Pre-requisite
ENABLED_SERVICES=rabbit,mysql,key
- # Horizon
- ENABLED_SERVICES+=,horizon
+ # Horizon - enable for the OpenStack web GUI
+ # ENABLED_SERVICES+=,horizon
# Nova
- ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch
+ ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
+ ENABLED_SERVICES+=,placement-api,placement-client
# Glance
ENABLED_SERVICES+=,g-api,g-reg
# Neutron
- ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
- # Enable LBaaS v2
- ENABLED_SERVICES+=,q-lbaasv2
+ ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
# Cinder
ENABLED_SERVICES+=,c-api,c-vol,c-sch
# Tempest
ENABLED_SERVICES+=,tempest
+ # Barbican - Optionally used for TLS offload in Octavia
+ # ENABLED_SERVICES+=,barbican
# ===== END localrc =====
Run stack.sh and do some sanity checks
::
+ sudo su - stack
+ cd /opt/stack/devstack
./stack.sh
. ./openrc
@@ -72,38 +90,59 @@
::
#create nova instances on private network
- nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
- nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
- nova list # should show the nova instances just created
+ openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+ openstack server creeate --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+ openstack server list # should show the nova instances just created
#add secgroup rules to allow ssh etc..
openstack security group rule create default --protocol icmp
openstack security group rule create default --protocol tcp --dst-port 22:22
openstack security group rule create default --protocol tcp --dst-port 80:80
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
+Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
::
MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-Phase 2: Create your load balancers
-------------------------------------
+Phase 2: Create your load balancer
+----------------------------------
+
+Make sure you have the 'openstack loadbalancer' commands:
::
- neutron lbaas-loadbalancer-create --name lb1 private-subnet
- neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE.
- neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
- sleep 10 # Sleep since LBaaS actions can take a few seconds depending on the environment.
- neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
- sleep 10
- neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1
- sleep 10
- neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1
+ pip install python-octaviaclient
-Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes
-(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be
-reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is
-"curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
+Create your load balancer:
+
+::
+
+ openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+ openstack loadbalancer show lb1 # Wait for the provisioning_status to be ACTIVE.
+ openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+ openstack loadbalancer show lb1 # Note the vip_address
+ curl http://<vip_address>
+ curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
diff --git a/lib/etcd3 b/lib/etcd3
index c65a522..0748ea0 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -27,6 +27,10 @@
ETCD_DATA_DIR="$DATA_DIR/etcd"
ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
ETCD_BIN_DIR="$DEST/bin"
+# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run
+# etcd-heavy services in the gate VM's, e.g. Kubernetes.
+ETCD_USE_RAMDISK=$(trueorfalse False ETCD_USE_RAMDISK)
+ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512}
if is_ubuntu ; then
UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1`
@@ -89,6 +93,9 @@
$SYSTEMCTL daemon-reload
+ if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+ sudo umount $ETCD_DATA_DIR
+ fi
sudo rm -rf $ETCD_DATA_DIR
}
@@ -98,6 +105,9 @@
# Create the necessary directories
sudo mkdir -p $ETCD_BIN_DIR
sudo mkdir -p $ETCD_DATA_DIR
+ if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+ sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR
+ fi
# Download and cache the etcd tgz for subsequent use
local etcd_file
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 523024e..fbe4c92 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -69,7 +69,7 @@
restart_service openvswitch
sudo systemctl enable openvswitch
elif is_suse; then
- if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+ if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
restart_service openvswitch-switch
else
# workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
diff --git a/lib/nova b/lib/nova
index 0b08c0a..6c9b944 100644
--- a/lib/nova
+++ b/lib/nova
@@ -595,6 +595,21 @@
fi
}
+# Configure access to placement from a nova service, usually
+# compute, but sometimes conductor.
+function configure_placement_nova_compute {
+ # Use the provided config file path or default to $NOVA_CONF.
+ local conf=${1:-$NOVA_CONF}
+ iniset $conf placement auth_type "password"
+ iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
+ iniset $conf placement username placement
+ iniset $conf placement password "$SERVICE_PASSWORD"
+ iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement project_name "$SERVICE_TENANT_NAME"
+ iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
+ iniset $conf placement region_name "$REGION_NAME"
+}
+
function configure_console_compute {
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
diff --git a/lib/placement b/lib/placement
index da69e39..a89cd26 100644
--- a/lib/placement
+++ b/lib/placement
@@ -93,19 +93,6 @@
" -i $placement_api_apache_conf
}
-function configure_placement_nova_compute {
- # Use the provided config file path or default to $NOVA_CONF.
- local conf=${1:-$NOVA_CONF}
- iniset $conf placement auth_type "password"
- iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
- iniset $conf placement username placement
- iniset $conf placement password "$SERVICE_PASSWORD"
- iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $conf placement project_name "$SERVICE_TENANT_NAME"
- iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
- iniset $conf placement auth_strategy $PLACEMENT_AUTH_STRATEGY
-}
-
# create_placement_conf() - Write config
function create_placement_conf {
rm -f $PLACEMENT_CONF
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
index 5a198b2..276c4e0 100644
--- a/roles/fetch-devstack-log-dir/tasks/main.yaml
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -1,5 +1,10 @@
+# as the user in the guest may not exist on the executor
+# we do not preserve the group or owner of the copied logs.
+
- name: Collect devstack logs
synchronize:
dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
mode: pull
src: "{{ devstack_base_dir }}/logs"
+ group: no
+ owner: no