Initialize config repository
diff --git a/containers/README b/containers/README
new file mode 100644
index 0000000..ce25a16
--- /dev/null
+++ b/containers/README
@@ -0,0 +1,2 @@
+# Each directory shall contains a Dockerfile
+# The config-update job build those on the k1s hosts using the localhost/k1s tag prefix
diff --git a/containers/centos-7/Dockerfile b/containers/centos-7/Dockerfile
new file mode 100644
index 0000000..ebec03c
--- /dev/null
+++ b/containers/centos-7/Dockerfile
@@ -0,0 +1,35 @@
+# Copyright (C) 2019 Red Hat
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+FROM registry.centos.org/centos:7
+
+# Zuul minimal package
+RUN yum install -y sudo rsync git traceroute iproute python3-setuptools python3-pip rpm-build python3-rpm-macros
+
+# Zuul except /bin/pip to be available
+RUN ln -sf /bin/pip3 /bin/pip && /bin/pip3 install --user "tox>=3.8.0"
+
+# Install base Python linter for the demo project
+RUN /bin/pip install flake8 yamllint
+
+# Zuul uses revoke-sudo. We can simulate that by moving the default sudoers to zuul
+# And this will prevent root from using sudo when the file is removed by revoke-sudo
+RUN mv /etc/sudoers /etc/sudoers.d/zuul && grep includedir /etc/sudoers.d/zuul > /etc/sudoers && sed -e 's/.*includedir.*//' -i /etc/sudoers.d/zuul && chmod 440 /etc/sudoers
+
+# Create fake zuul users
+RUN echo "zuul:x:0:0:root:/root:/bin/bash" >> /etc/passwd
+
+# Enable root local bin
+ENV PATH=/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+WORKDIR /root
diff --git a/gerrit/commentlinks.yaml b/gerrit/commentlinks.yaml
new file mode 100644
index 0000000..d72d52c
--- /dev/null
+++ b/gerrit/commentlinks.yaml
@@ -0,0 +1,4 @@
+---
+# Note: '\' character needs to be escapped twice ('\\')
+# Double quote needs to be escaped too
+commentlinks: []
diff --git a/gerrit/replication.config b/gerrit/replication.config
new file mode 100644
index 0000000..1a1afc7
--- /dev/null
+++ b/gerrit/replication.config
@@ -0,0 +1,4 @@
+[gerrit]
+ defaultForceUpdate = true
+ replicateOnStartup = true
+ autoReload = true
diff --git a/nodepool/_local_hypervisor_k1s.yaml b/nodepool/_local_hypervisor_k1s.yaml
new file mode 100644
index 0000000..666c99a
--- /dev/null
+++ b/nodepool/_local_hypervisor_k1s.yaml
@@ -0,0 +1 @@
+# This file is managed by sfconfig, do not edit manually
diff --git a/nodepool/_local_hypervisor_openshift.yaml b/nodepool/_local_hypervisor_openshift.yaml
new file mode 100644
index 0000000..404b62b
--- /dev/null
+++ b/nodepool/_local_hypervisor_openshift.yaml
@@ -0,0 +1,2 @@
+# This file is managed by sfconfig, do not edit manually
+---
diff --git a/nodepool/_pods.yaml b/nodepool/_pods.yaml
new file mode 100644
index 0000000..491a749
--- /dev/null
+++ b/nodepool/_pods.yaml
@@ -0,0 +1,5 @@
+# This file is managed by sfconfig, do not edit manually
+---
+labels:
+ - name: pod-centos-7
+
diff --git a/nodepool/elements/README b/nodepool/elements/README
new file mode 100644
index 0000000..257e142
--- /dev/null
+++ b/nodepool/elements/README
@@ -0,0 +1,5 @@
+Customs diskimage builder elements to be used by nodepool-builder.
+
+Nodepool is configured to use by default (with low precedence):
+* https://softwarefactory-project.io/r/software-factory/sf-elements
+* git://git.openstack.org/openstack-infra/project-config/nodepool/elements
diff --git a/nodepool/nodepool.yaml b/nodepool/nodepool.yaml
new file mode 100644
index 0000000..d9cb250
--- /dev/null
+++ b/nodepool/nodepool.yaml
@@ -0,0 +1,41 @@
+# Uncomment to enable openstack provider
+#---
+#diskimages:
+# - name: dib-centos-7
+# elements:
+# - centos-minimal
+# - nodepool-minimal
+# - zuul-worker-user
+# - name: cloud-fedora-rawhide
+# python-path: /usr/bin/python3
+# dib-cmd: /usr/bin/dib-virt-customize /etc/nodepool/virt_images/cloud-fedora-rawhide.yaml
+#
+#
+#labels:
+# - name: dib-centos-7
+# min-ready: 1
+# - name: cloud-fedora-rawhide
+# min-ready: 1
+#
+#providers:
+# - name: default
+# cloud: default
+# clean-floating-ips: true
+# image-name-format: '{image_name}-{timestamp}'
+# boot-timeout: 120
+# rate: 10.0
+# diskimages:
+# - name: dib-centos-7
+# - name: cloud-fedora-rawhide
+# pools:
+# - name: main
+# max-servers: 5
+# networks:
+# - worker-net-name
+# labels:
+# - name: dib-centos-7
+# min-ram: 1024
+# diskimage: dib-centos-7
+# - name: cloud-fedora-rawhide
+# min-ram: 1024
+# diskimage: cloud-fedora-rawhide
diff --git a/nodepool/openshift.yaml b/nodepool/openshift.yaml
new file mode 100644
index 0000000..64bb998
--- /dev/null
+++ b/nodepool/openshift.yaml
@@ -0,0 +1,45 @@
+# Uncomment to enable openshift provider
+#---
+# After the provider is registered in sfconfig.yaml, grab the context name using:
+# sudo -u nodepool oc config get-contexts
+#
+#
+# To use the openshift driver, a self provisioner service account is needed:
+# Request the cluster operator to create:
+# oc create sa nodepool
+# oc adm policy add-cluster-role-to-user self-provisioner --serviceaccount=nodepool
+# oc policy add-role-to-user admin --serviceaccount=nodepool
+# oc sa get-token nodepool
+# Then register the token in sfconfig.yaml
+#
+#providers:
+# - name: openshift01
+# driver: openshift
+# context: self-provisioner-service-account-context-name
+# pools:
+# - name: zuul-ci
+# labels:
+# - name: openshift-project
+# type: project
+# - name: openshift-pod-fedora
+# type: pod
+# image: docker.io/fedora:28
+#
+#
+###############################################################################
+# Or use the openshiftpods driver with a regular service account:
+# oc new-project nodepool
+# oc create sa nodepool
+# oc policy add-role-to-user admin --serviceaccount=nodepool
+# oc sa get-token nodepool
+# Then register the token in sfconfig.yaml
+#
+#providers:
+# - name: openshift01
+# driver: openshiftpods
+# context: "nodepool/openshift-example-com:8443/system:serviceaccount:nodepool:nodepool"
+# pools:
+# - name: nodepool
+# labels:
+# - name: openshift-pod
+# image: docker.io/fedora:28
diff --git a/nodepool/static_config/README.md b/nodepool/static_config/README.md
new file mode 100644
index 0000000..55b446b
--- /dev/null
+++ b/nodepool/static_config/README.md
@@ -0,0 +1,5 @@
+# Nodepool static configuration
+
+To create a static configuration for a nodepool service, create a file named
+'hostname.yaml' with the nodepool configuration in this directory. The file will
+be installed on the host instead the generated configuration.
diff --git a/nodepool/virt_images/README.md b/nodepool/virt_images/README.md
new file mode 100644
index 0000000..3fed60e
--- /dev/null
+++ b/nodepool/virt_images/README.md
@@ -0,0 +1,12 @@
+# Virt-customize based nodepool image
+
+This directory contains nodepool image built using virt-customize-dib elements.
+
+To use a playbook, add this to a nodepool yaml file:
+
+```yaml
+diskimages:
+ - name: cloud-fedora-rawhide
+ python-path: /usr/bin/python3
+ dib-cmd: /usr/bin/dib-virt-customize /etc/nodepool/virt_images/cloud-fedora-rawhide.yaml
+```
diff --git a/nodepool/virt_images/cloud-fedora-rawhide.yaml b/nodepool/virt_images/cloud-fedora-rawhide.yaml
new file mode 100644
index 0000000..07d2ecc
--- /dev/null
+++ b/nodepool/virt_images/cloud-fedora-rawhide.yaml
@@ -0,0 +1,38 @@
+---
+- name: Build a fedora cloud image suitable for Zuul
+ hosts: localhost
+ vars:
+ image: Fedora-Cloud-Base-Rawhide.x86_64.qcow2
+ extra_packages:
+ # Extra system tools
+ - pigz
+ - bridge-utils
+ - wget
+ - unzip
+ # Basic CI tools
+ - make
+ - gcc
+ - patch
+ tasks:
+ - block:
+ - import_role:
+ name: discover-rawhide
+ - import_role:
+ name: base-appliance
+ - import_role:
+ name: base
+ - import_role:
+ name: sshd-config
+ - import_role:
+ name: network-config
+ - import_role:
+ name: zuul-user
+ - import_role:
+ name: base-install-packages
+ - import_role:
+ name: base-customize
+ - import_role:
+ name: base-finalize
+ always:
+ - import_role:
+ name: base-cleanup
diff --git a/nodepool/virt_images/roles/base-appliance/tasks/main.yaml b/nodepool/virt_images/roles/base-appliance/tasks/main.yaml
new file mode 100644
index 0000000..e5e83fd
--- /dev/null
+++ b/nodepool/virt_images/roles/base-appliance/tasks/main.yaml
@@ -0,0 +1,12 @@
+- name: Download appliance
+ unarchive:
+ src: http://download.libguestfs.org/binaries/appliance/appliance-1.46.0.tar.xz
+ remote_src: yes
+ dest: /tmp
+ args:
+ creates: /tmp/appliance
+
+- set_fact:
+ virt_customize_env:
+ LIBGUESTFS_PATH: '/tmp/appliance'
+ LIBGUESTFS_BACKEND: 'direct'
diff --git a/nodepool/virt_images/roles/base-cleanup/defaults/main.yaml b/nodepool/virt_images/roles/base-cleanup/defaults/main.yaml
new file mode 100644
index 0000000..dd9240b
--- /dev/null
+++ b/nodepool/virt_images/roles/base-cleanup/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+image_tmp_dir: "/var/tmp/{{ image_output | basename }}"
diff --git a/nodepool/virt_images/roles/base-cleanup/tasks/main.yaml b/nodepool/virt_images/roles/base-cleanup/tasks/main.yaml
new file mode 100644
index 0000000..29c01ea
--- /dev/null
+++ b/nodepool/virt_images/roles/base-cleanup/tasks/main.yaml
@@ -0,0 +1,5 @@
+---
+- name: Remove tmp directory
+ file:
+ path: "{{ image_tmp_dir }}"
+ state: absent
diff --git a/nodepool/virt_images/roles/base-customize/defaults/main.yaml b/nodepool/virt_images/roles/base-customize/defaults/main.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/nodepool/virt_images/roles/base-customize/defaults/main.yaml
@@ -0,0 +1 @@
+---
diff --git a/nodepool/virt_images/roles/base-customize/tasks/main.yaml b/nodepool/virt_images/roles/base-customize/tasks/main.yaml
new file mode 100644
index 0000000..c1d397d
--- /dev/null
+++ b/nodepool/virt_images/roles/base-customize/tasks/main.yaml
@@ -0,0 +1,7 @@
+---
+- debug:
+ msg: "Running: {{ ' '.join(virt_customize_cmd) }}"
+
+- name: Run virt-customize
+ command: "{{ ' '.join(virt_customize_cmd) }}"
+ environment: "{{ virt_customize_env|default({}) }}"
diff --git a/nodepool/virt_images/roles/base-finalize/defaults/main.yaml b/nodepool/virt_images/roles/base-finalize/defaults/main.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/nodepool/virt_images/roles/base-finalize/defaults/main.yaml
@@ -0,0 +1 @@
+---
diff --git a/nodepool/virt_images/roles/base-finalize/tasks/main.yaml b/nodepool/virt_images/roles/base-finalize/tasks/main.yaml
new file mode 100644
index 0000000..9fb8968
--- /dev/null
+++ b/nodepool/virt_images/roles/base-finalize/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Create raw file
+ command: "qemu-img convert -O raw {{ image_file }} {{ image_output }}.raw"
+ when: raw_type | default(False) | bool
+
+- name: Create qcow file
+ command: "mv {{ image_file }} {{ image_output }}.qcow2"
+ when: qcow2_type | default(False) | bool
diff --git a/nodepool/virt_images/roles/base-install-packages/defaults/main.yaml b/nodepool/virt_images/roles/base-install-packages/defaults/main.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/nodepool/virt_images/roles/base-install-packages/defaults/main.yaml
@@ -0,0 +1 @@
+---
diff --git a/nodepool/virt_images/roles/base-install-packages/tasks/main.yaml b/nodepool/virt_images/roles/base-install-packages/tasks/main.yaml
new file mode 100644
index 0000000..588de54
--- /dev/null
+++ b/nodepool/virt_images/roles/base-install-packages/tasks/main.yaml
@@ -0,0 +1,7 @@
+---
+- set_fact:
+ cmd:
+ - "--install '{{ extra_packages | join(',') }}'"
+
+- set_fact:
+ virt_customize_cmd: "{{ virt_customize_cmd + cmd }}"
diff --git a/nodepool/virt_images/roles/base/defaults/main.yaml b/nodepool/virt_images/roles/base/defaults/main.yaml
new file mode 100644
index 0000000..a1cdfac
--- /dev/null
+++ b/nodepool/virt_images/roles/base/defaults/main.yaml
@@ -0,0 +1,10 @@
+---
+image_cache_dir: "/var/cache/nodepool"
+image_wipe_cache: False
+memsize: 1024
+base_packages:
+ - traceroute
+ - iproute
+ - git
+ - rsync
+extra_packages: []
diff --git a/nodepool/virt_images/roles/base/tasks/main.yaml b/nodepool/virt_images/roles/base/tasks/main.yaml
new file mode 100644
index 0000000..88f8799
--- /dev/null
+++ b/nodepool/virt_images/roles/base/tasks/main.yaml
@@ -0,0 +1,70 @@
+---
+- assert:
+ that:
+ - image_url is defined
+ - image_checksum is defined
+ - image is defined
+ - image_url != ''
+ - image_checksum != ''
+ - image != ''
+
+- name: Set some runtime facts
+ set_fact:
+ image_cache_file: "{{ image_cache_dir }}/{{ image }}"
+ image_tmp_dir: "/var/tmp/{{ image_output | basename }}"
+
+- name: Make sure cache directory exist
+ file:
+ path: "{{ image_cache_dir }}"
+ state: directory
+
+- name: Delete previous image cache
+ file:
+ path: "{{ image_cache_file }}"
+ state: absent
+ when: image_wipe_cache
+
+- name: Check if image is already downloaded
+ stat:
+ path: "{{ image_cache_file }}"
+ register: _image_cache_file_stat
+
+- name: Download if checksum doesn't match
+ get_url:
+ url: "{{ image_url }}"
+ dest: "{{ image_cache_file }}"
+ checksum: "{{ image_checksum }}"
+ when: not _image_cache_file_stat.stat.exists
+
+- name: Extract the image if necessary
+ command: "xz -k -d {{ image_cache_file }}.xz"
+ args:
+ chdir: "{{ image_cache_dir }}"
+ creates: "{{ image_cache_file }}"
+
+- name: Update the cache
+ command: "virt-customize -m {{ memsize }} -a {{ image_cache_file }} --update"
+ environment: "{{ virt_customize_env|default({}) }}"
+
+- name: Create tmp directory
+ file:
+ path: "{{ image_tmp_dir }}"
+ state: directory
+ mode: '0755'
+
+- name: Set filename copy fact
+ set_fact:
+ image_file: "{{ image_tmp_dir }}/{{ image_cache_file | basename }}"
+
+- name: Copy the image
+ copy:
+ src: "{{ image_cache_file }}"
+ dest: "{{ image_file }}"
+ remote_src: true
+ mode: '0644'
+
+- set_fact:
+ virt_customize_cmd:
+ - "virt-customize -m {{ memsize }} -a {{ image_file }}"
+ - "--selinux-relabel"
+ - "--install '{{ base_packages | join(',') }}'"
diff --git a/nodepool/virt_images/roles/discover-rawhide/defaults/main.yaml b/nodepool/virt_images/roles/discover-rawhide/defaults/main.yaml
new file mode 100644
index 0000000..e87cb9c
--- /dev/null
+++ b/nodepool/virt_images/roles/discover-rawhide/defaults/main.yaml
@@ -0,0 +1 @@
+base_url: https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/x86_64/images/
diff --git a/nodepool/virt_images/roles/discover-rawhide/tasks/main.yaml b/nodepool/virt_images/roles/discover-rawhide/tasks/main.yaml
new file mode 100644
index 0000000..51e47c8
--- /dev/null
+++ b/nodepool/virt_images/roles/discover-rawhide/tasks/main.yaml
@@ -0,0 +1,45 @@
+- tempfile:
+ state: file
+ register: tempfile
+
+- file:
+ path: "{{ tempfile.path }}"
+ state: absent
+
+- name: Fetch publication page
+ get_url:
+ url: "{{ base_url }}"
+ dest: "{{ tempfile.path }}"
+
+- name: Find rawhide qcow2 url
+ command: sed -n "/qcow2/ s/.*\(Fedora-Cloud-Base-Rawhide-.*\)<\/a>.*/\1/p" {{ tempfile.path }}
+ register: get_qcow_image_name
+
+- name: Find checksum file url
+ command: sed -n "/CHECKSUM/ s/.*\(Fedora-Cloud-Rawhide-.*\)<\/a>.*/\1/p" {{ tempfile.path }}
+ register: get_checksum_name
+
+- set_fact:
+ checksums_url: "{{ base_url }}{{ get_checksum_name.stdout }}"
+
+- file:
+ path: "{{ tempfile.path }}"
+ state: absent
+
+- name: Fetch checksum file
+ get_url:
+ url: "{{ checksums_url }}"
+ dest: "{{ tempfile.path }}"
+
+- name: Find checksum
+ command: sed -n "/SHA256 ({{ get_qcow_image_name.stdout }}) = / s/.* = \(.*\)/\1/p" {{ tempfile.path }}
+ register: get_checksum
+
+- set_fact:
+ image_url: "{{ base_url }}{{ get_qcow_image_name.stdout }}"
+ image_checksum: "sha256:{{ get_checksum.stdout }}"
+
+- debug:
+ msg: |
+ Discovered image_url: {{ image_url }}
+ Discovered image_checksum: {{ image_checksum }}
diff --git a/nodepool/virt_images/roles/network-config/defaults/main.yaml b/nodepool/virt_images/roles/network-config/defaults/main.yaml
new file mode 100644
index 0000000..dd9240b
--- /dev/null
+++ b/nodepool/virt_images/roles/network-config/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+image_tmp_dir: "/var/tmp/{{ image_output | basename }}"
diff --git a/nodepool/virt_images/roles/network-config/tasks/main.yaml b/nodepool/virt_images/roles/network-config/tasks/main.yaml
new file mode 100644
index 0000000..5d49314
--- /dev/null
+++ b/nodepool/virt_images/roles/network-config/tasks/main.yaml
@@ -0,0 +1,12 @@
+---
+- set_fact:
+ cmd:
+ - "--append-line '/etc/sysctl.conf:net.ipv6.conf.all.disable_ipv6 = 1'"
+ - "--append-line '/etc/sysctl.conf:net.ipv6.conf.default.disable_ipv6 = 1'"
+ - "--append-line '/etc/sysconfig/network:IPV6INIT=no'"
+ - "--append-line '/etc/sysconfig/network:IPV6_AUTOCONF=no'"
+ - "--append-line '/etc/sysconfig/network:IPV6_DEFROUTE=no'"
+ - "--append-line '/etc/yum.conf:ip_resolve=4'"
+
+- set_fact:
+ virt_customize_cmd: "{{ virt_customize_cmd + cmd }}"
diff --git a/nodepool/virt_images/roles/sshd-config/defaults/main.yaml b/nodepool/virt_images/roles/sshd-config/defaults/main.yaml
new file mode 100644
index 0000000..dd9240b
--- /dev/null
+++ b/nodepool/virt_images/roles/sshd-config/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+image_tmp_dir: "/var/tmp/{{ image_output | basename }}"
diff --git a/nodepool/virt_images/roles/sshd-config/files/sshd_config b/nodepool/virt_images/roles/sshd-config/files/sshd_config
new file mode 100644
index 0000000..df17bfa
--- /dev/null
+++ b/nodepool/virt_images/roles/sshd-config/files/sshd_config
@@ -0,0 +1,20 @@
+HostKey /etc/ssh/ssh_host_rsa_key
+HostKey /etc/ssh/ssh_host_ecdsa_key
+HostKey /etc/ssh/ssh_host_ed25519_key
+KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
+Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
+MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com
+SyslogFacility AUTHPRIV
+AuthorizedKeysFile .ssh/authorized_keys
+PasswordAuthentication no
+ChallengeResponseAuthentication no
+GSSAPIAuthentication no
+GSSAPICleanupCredentials no
+UsePAM yes
+X11Forwarding no
+UseDNS no
+AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
+AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
+AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
+AcceptEnv XMODIFIERS
+Subsystem sftp /usr/libexec/openssh/sftp-server
diff --git a/nodepool/virt_images/roles/sshd-config/tasks/main.yaml b/nodepool/virt_images/roles/sshd-config/tasks/main.yaml
new file mode 100644
index 0000000..ff62665
--- /dev/null
+++ b/nodepool/virt_images/roles/sshd-config/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: Prepare sshd_config file
+ copy:
+ src: files/sshd_config
+ dest: "{{ image_tmp_dir }}/sshd_config"
+
+- set_fact:
+ cmd:
+ - "--copy-in '{{ image_tmp_dir }}/sshd_config:/etc/ssh/'"
+ - "--chmod '0600:/etc/ssh/sshd_config'"
+
+- set_fact:
+ virt_customize_cmd: "{{ virt_customize_cmd + cmd }}"
diff --git a/nodepool/virt_images/roles/zuul-user/defaults/main.yaml b/nodepool/virt_images/roles/zuul-user/defaults/main.yaml
new file mode 100644
index 0000000..dd9240b
--- /dev/null
+++ b/nodepool/virt_images/roles/zuul-user/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+image_tmp_dir: "/var/tmp/{{ image_output | basename }}"
diff --git a/nodepool/virt_images/roles/zuul-user/tasks/main.yaml b/nodepool/virt_images/roles/zuul-user/tasks/main.yaml
new file mode 100644
index 0000000..2382ba4
--- /dev/null
+++ b/nodepool/virt_images/roles/zuul-user/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: Prepare the sudoers file
+ copy:
+ content: |
+ Defaults !requiretty
+ zuul-worker ALL=(ALL) NOPASSWD:ALL
+ dest: "{{ image_tmp_dir }}/zuul-worker"
+
+- name: Prepare the authorized_keys file
+ copy:
+ src: /var/lib/nodepool/.ssh/zuul_rsa.pub
+ dest: "{{ image_tmp_dir }}/authorized_keys"
+ remote_src: true
+
+- set_fact:
+ cmd:
+ - "--run-command 'adduser -m zuul-worker'"
+ - "--mkdir '/home/zuul-worker/.ssh'"
+ - "--chmod '0700:/home/zuul-worker/.ssh'"
+ - "--copy-in '{{ image_tmp_dir }}/authorized_keys:/home/zuul-worker/.ssh/'"
+ - "--chmod '0600:/home/zuul-worker/.ssh/authorized_keys'"
+ - "--run-command 'chown -R zuul-worker:zuul-worker /home/zuul-worker/.ssh/'"
+ - "--copy-in '{{ image_tmp_dir }}/zuul-worker:/etc/sudoers.d/'"
+ - "--chmod '0440:/etc/sudoers.d/zuul-worker'"
+
+- set_fact:
+ virt_customize_cmd: "{{ virt_customize_cmd + cmd }}"
diff --git a/playbooks/base/README b/playbooks/base/README
new file mode 100644
index 0000000..4450121
--- /dev/null
+++ b/playbooks/base/README
@@ -0,0 +1 @@
+Base job playbooks
diff --git a/playbooks/base/post.yaml b/playbooks/base/post.yaml
new file mode 100644
index 0000000..6bbf3d9
--- /dev/null
+++ b/playbooks/base/post.yaml
@@ -0,0 +1,40 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: all
+ tasks:
+ - block:
+ - include_role: name=fetch-output
+ when:
+ - "ansible_connection != 'kubectl'"
+ - ansible_user_dir is defined
+ - block:
+ - include_role: name=fetch-output-openshift
+ when:
+ - "ansible_connection == 'kubectl'"
+ - ansible_user_dir is defined
+ - import_role: name=merge-output-to-logs
+ when: ansible_user_dir is defined
+
+- hosts: localhost
+ roles:
+ - role: add-fileserver
+ fileserver: "{{ site_sflogs }}"
+ - role: generate-zuul-manifest
+ - role: ara-report
+ # This depends-on https://review.openstack.org/577675
+ ara_report_run: True
+ ara_report_type: database
+ ara_report_path: "{{ zuul.executor.log_root }}/ara-report"
+
+- hosts: "spfactory.storpool.com"
+ gather_facts: false
+ tasks:
+ # Use a block because play vars doesn't take precedence on roles vars
+ - block:
+ - import_role: name=upload-logs
+ - import_role: name=buildset-artifacts-location
+ vars:
+ zuul_log_compress: true
+ zuul_log_url: "https://spfactory.storpool.com/logs"
+ zuul_logserver_root: "{{ site_sflogs.path }}"
+
diff --git a/playbooks/base/pre.yaml b/playbooks/base/pre.yaml
new file mode 100644
index 0000000..91d29c8
--- /dev/null
+++ b/playbooks/base/pre.yaml
@@ -0,0 +1,26 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: localhost
+ tasks:
+ - block:
+ - import_role: name=emit-job-header
+ # This depends-on https://review.openstack.org/578234
+ - import_role: name=log-inventory
+ vars:
+ zuul_log_url: "https://spfactory.storpool.com/logs"
+
+- hosts: all
+ tasks:
+ - include_role: name=start-zuul-console
+ - block:
+ - include_role: name=validate-host
+ - include_role: name=prepare-workspace
+ - include_role: name=add-build-sshkey
+ when: "ansible_connection != 'kubectl'"
+ - block:
+ - include_role: name=prepare-workspace-openshift
+ - include_role: name=remove-zuul-sshkey
+ run_once: true
+ when: "ansible_connection == 'kubectl'"
+ - import_role: name=ensure-output-dirs
+ when: ansible_user_dir is defined
diff --git a/playbooks/config/check-fetch-artifacts.yaml b/playbooks/config/check-fetch-artifacts.yaml
new file mode 100644
index 0000000..288f4e1
--- /dev/null
+++ b/playbooks/config/check-fetch-artifacts.yaml
@@ -0,0 +1,14 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: localhost
+ tasks:
+ - name: Set speculative config path
+ set_fact:
+ config_root: "{{ zuul.executor.src_root }}/{{ zuul.project.canonical_name }}"
+
+ - name: Fetch artifacts
+ synchronize:
+ src: "{{ config_root }}/build"
+ dest: "{{ zuul.executor.log_root }}/logs"
+ mode: pull
+ no_log: True
diff --git a/playbooks/config/check.yaml b/playbooks/config/check.yaml
new file mode 100644
index 0000000..53e983d
--- /dev/null
+++ b/playbooks/config/check.yaml
@@ -0,0 +1,192 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: localhost
+ tasks:
+ - name: Set speculative config path
+ set_fact:
+ config_root: "{{ zuul.executor.src_root }}/{{ zuul.project.canonical_name }}"
+
+ - name: Fetch default config
+ get_url:
+ url: "{{ gateway_url }}/_defconf.tgz"
+ dest: "{{ config_root }}/"
+ retries: 30
+ delay: 1
+
+ - name: Create defconf directory
+ file:
+ path: "{{ config_root }}/defconf"
+ state: directory
+
+ - name: Extract default config
+ unarchive:
+ src: "{{ config_root }}/_defconf.tgz"
+ dest: "{{ config_root }}/defconf/"
+
+ - name: include arch.yaml
+ include_vars:
+ file: "{{ config_root }}/defconf/arch.yaml"
+ name: arch
+
+ - name: Create build directory to merge configuration
+ file:
+ path: "{{ config_root }}/build"
+ state: directory
+
+ - name: Tenant env config-check preparation
+ block:
+ - name: Create defconf-master directory
+ file:
+ path: "{{ config_root }}/defconf-master"
+ state: directory
+
+ - name: Fetch master SF default config
+ get_url:
+ url: "{{ master_sf_url }}/_defconf.tgz"
+ dest: "{{ config_root }}/_defconf-master.tgz"
+ retries: 30
+ delay: 1
+
+ - name: Extract master SF default config
+ unarchive:
+ src: "{{ config_root }}/_defconf-master.tgz"
+ dest: "{{ config_root }}/defconf-master/"
+
+ - name: Overwrite with master SF fake zuul.conf
+ copy:
+ remote_src: true
+ src: "{{ config_root }}/defconf-master/defconf-zuul.conf"
+ dest: "{{ config_root }}/defconf/defconf-zuul.conf"
+
+ - set_fact:
+ tenant_options: "--tenant --master-sf-url {{ master_sf_url }}"
+ when: tenant_config is defined and tenant_config
+
+ - name: Copy service_user password in workspace
+ copy:
+ content: "{{ service_user.password }}"
+ dest: "{{ config_root }}/.service_user_password"
+ no_log: true
+
+ - name: Check resources changes
+ shell: managesf-resources remote-validate --remote-gateway {{ gateway_url }}
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Check gerrit replication
+ command: git config -f gerrit/replication.config -l
+ args:
+ chdir: "{{ config_root }}"
+ when: '"gerrit" in arch.roles'
+
+ - name: Check gerrit commentlinks
+ command: python3 -c "import yaml; 'commentlinks' in yaml.safe_load(open('gerrit/commentlinks.yaml'))"
+ args:
+ chdir: "{{ config_root }}"
+ when: '"gerrit" in arch.roles'
+
+ - name: Check policy file
+ command: python3 -c "import yaml; yaml.safe_load(open('policies/policy.yaml'))"
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Check nodepool dhall configuration
+ shell: |
+ for dhall_conf in $(ls nodepool/static_config/*.dhall 2> /dev/null); do
+ echo ${dhall_conf}
+ dhall-to-yaml --file ${dhall_conf} --output build/$(basename ${dhall_conf} .dhall).yaml || exit 1
+ done
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Validate nodepool configuration
+ block:
+ - name: Install defconf nodepool.yaml
+ copy:
+ remote_src: true
+ src: "{{ config_root }}/defconf/defconf-nodepool.yaml"
+ dest: "{{ config_root }}/nodepool/_nodepool.yaml"
+
+ - name: Check all launcher-hosts exists (task fail with invalid hostname on stdout)
+ shell: >
+ find {{ config_root }}/nodepool/ -name "*.yaml" | xargs grep '^ *launcher-host: ' | awk '{ print $3 }' |
+ grep -v '^\({{ arch.launcher_hosts | join('\|') }}\)$'
+ register: _unknown_launcher_hosts
+ failed_when: _unknown_launcher_hosts.stdout
+ changed_when: false
+
+ - name: Generate per launcher-hosts configuration
+ block:
+ - name: Generate configuration
+ command: >
+ managesf-configuration nodepool
+ --cache-dir {{ config_root }}/../.cache
+ {% if item != arch.launcher_hosts[0] %}--extra-launcher {% endif %}
+ --hostname {{ item }}
+ --config-dir {{ config_root }} --output build/nodepool-{{ item }}.yaml
+ args:
+ chdir: "{{ config_root }}"
+ loop: "{{ arch.launcher_hosts }}"
+
+ - name: Run nodepool config-validate for nodepool-launchers
+ command: >
+ nodepool -c build/nodepool-{{ item }}.yaml config-validate
+ args:
+ chdir: "{{ config_root }}"
+ loop: "{{ arch.launcher_hosts }}"
+ when: arch.launcher_hosts
+
+ - name: Merge nodepool-builder config repo files
+ command: >
+ managesf-configuration nodepool
+ --cache-dir {{ config_root }}/../.cache --builder
+ --config-dir {{ config_root }} --output build/nodepool-builder.yaml
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Run nodepool config-validate for nodepool-builder
+ command: >
+ nodepool -c build/nodepool-builder.yaml config-validate
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Run nodepool config-validate for static configuration
+ command: >
+ find nodepool/static_config/ -name '*.yaml' -exec nodepool -c {} config-validate \;
+ args:
+ chdir: "{{ config_root }}"
+ when:
+ - '"nodepool-launcher" in arch.roles'
+
+ - name: Validate zuul configuration
+ block:
+ - name: Install fake zuul.conf
+ copy:
+ remote_src: true
+ src: "{{ config_root }}/defconf/defconf-zuul.conf"
+ dest: "{{ config_root }}/build/zuul.conf"
+
+ - name: Merge zuul tenant config
+ command: >
+ managesf-configuration zuul
+ --cache-dir {{ config_root }}/../.cache
+ --config-dir {{ config_root }}
+ --gateway-url {{ gateway_url }} {{ tenant_options | default('') }}
+ --output build/main.yaml
+ args:
+ chdir: "{{ config_root }}"
+
+ - name: Validate zuul config syntax
+ command: >
+ env - /usr/local/bin/zuul -c zuul.conf tenant-conf-check
+ args:
+ chdir: "{{ config_root }}/build"
+
+ - name: Validate metrics dashboards
+ block:
+ - name: Check syntax errors in metrics dashboards
+ shell: |
+ find . -regextype posix-egrep -regex '.*.(yaml|yml)$' | xargs -I yaml grafana-dashboard validate yaml
+ args:
+ chdir: "{{ config_root }}/metrics"
+ when: '"grafana" in arch.roles'
diff --git a/playbooks/config/config-update.yaml b/playbooks/config/config-update.yaml
new file mode 100644
index 0000000..86b4e19
--- /dev/null
+++ b/playbooks/config/config-update.yaml
@@ -0,0 +1,5 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: localhost
+ tasks:
+ - include_tasks: update_local.yaml
diff --git a/playbooks/config/update_local.yaml b/playbooks/config/update_local.yaml
new file mode 100644
index 0000000..a334e85
--- /dev/null
+++ b/playbooks/config/update_local.yaml
@@ -0,0 +1,26 @@
+# This file is managed by ansible, do not edit directly
+---
+- name: Create SSH private key tempfile
+ tempfile:
+ state: file
+ register: ssh_private_key_tmp
+
+- name: Create SSH private key from secret
+ copy:
+ content: "{{ site_install_server.ssh_private_key }}"
+ dest: "{{ ssh_private_key_tmp.path }}"
+ mode: '0600'
+
+- name: Add zuul ssh key
+ command: "ssh-add {{ ssh_private_key_tmp.path }}"
+
+- name: Remove SSH private key from disk
+ command: "shred {{ ssh_private_key_tmp.path }}"
+
+- name: Add site_install_server server to known hosts
+ known_hosts:
+ name: "{{ site_install_server.fqdn }}"
+ key: "{{ site_install_server.ssh_known_hosts }}"
+
+- name: run config update
+ command: "ssh root@{{ site_install_server.fqdn }} config_update {{ (zuul | zuul_legacy_vars)['ZUUL_NEWREV'] }}"
diff --git a/playbooks/config/update_tenant.yaml b/playbooks/config/update_tenant.yaml
new file mode 100644
index 0000000..9d8c51f
--- /dev/null
+++ b/playbooks/config/update_tenant.yaml
@@ -0,0 +1,40 @@
+# This file is managed by ansible, do not edit directly
+---
+- name: Discover path of config repository
+ command: git rev-parse --show-toplevel
+ register: config_path
+
+- name: Get last change sha
+ command: "git --git-dir={{ config_path.stdout }}/.git log -n1 --pretty=format:'%h' --no-merges"
+ register: git_log
+
+- name: Get last change on resources sha
+ command: "git --git-dir={{ config_path.stdout }}/.git log -n1 --pretty=format:'%h' --no-merges -- resources zuul"
+ register: git_log_resources
+
+- block:
+ - name: Create SSH private key tempfile
+ tempfile:
+ state: file
+ register: ssh_private_key_tmp
+
+ - name: Create SSH private key from secret
+ copy:
+ content: "{{ site_tenant_update.ssh_private_key }}"
+ dest: "{{ ssh_private_key_tmp.path }}"
+ mode: '0600'
+
+ - name: Add zuul ssh key
+ command: "ssh-add {{ ssh_private_key_tmp.path }}"
+
+ - name: Remove SSH private key from disk
+ command: "shred {{ ssh_private_key_tmp.path }}"
+
+ - name: Add site_tenant_update server to known hosts
+ known_hosts:
+ name: "{{ site_tenant_update.fqdn }}"
+ key: "{{ site_tenant_update.ssh_known_hosts }}"
+
+ - name: Run tenant_update
+ command: "ssh root@{{ site_tenant_update.fqdn }} tenant_update"
+ when: git_log_resources.stdout == git_log.stdout
diff --git a/playbooks/openshift/build-project.yaml b/playbooks/openshift/build-project.yaml
new file mode 100644
index 0000000..957c957
--- /dev/null
+++ b/playbooks/openshift/build-project.yaml
@@ -0,0 +1,84 @@
+# This file is managed by ansible, do not edit directly
+---
+- name: prepare dumb bare clone of future state
+ git:
+ repo: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}"
+ dest: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git"
+ bare: yes
+
+- name: update server info for dumb http transport
+ command: git update-server-info
+ args:
+ chdir: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git"
+
+- name: create project dir on http server
+ command: >
+ {{ oc_command }} exec {{ zm_name }} -- mkdir -p {{ zuul.project.src_dir }}.git
+
+- name: copy project to http server
+ command: >
+ {{ oc_command }} rsync -q --progress=false
+ {{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git/
+ {{ zm_name }}:/opt/app-root/src/{{ zuul.project.src_dir }}.git/
+ no_log: true
+
+- name: create project ImageStream spec
+ openshift_raw:
+ state: present
+ namespace: "{{ zuul.resources['project'].namespace }}"
+ context: "{{ zuul.resources['project'].context }}"
+ definition:
+ apiVersion: v1
+ kind: ImageStream
+ metadata:
+ generation: 1
+ labels:
+ app: "{{ zuul.project.short_name }}"
+ name: "{{ zuul.project.short_name }}"
+ spec:
+ lookupPolicy:
+ local: false
+ register: _image_stream
+
+- name: create project BuildConfig spec
+ openshift_raw:
+ state: present
+ namespace: "{{ zuul.resources['project'].namespace }}"
+ context: "{{ zuul.resources['project'].context }}"
+ definition:
+ apiVersion: v1
+ kind: BuildConfig
+ metadata:
+ labels:
+ app: "{{ zuul.project.short_name }}"
+ name: "{{ zuul.project.short_name }}"
+ spec:
+ output:
+ to:
+ kind: ImageStreamTag
+ name: '{{ zuul.project.short_name }}:latest'
+ runPolicy: Serial
+ source:
+ git:
+ ref: master
+ uri: 'http://staging-http-server:8080/{{ zuul.project.src_dir }}.git'
+ type: Git
+ strategy:
+ sourceStrategy:
+ from:
+ kind: ImageStreamTag
+ name: '{{ base_image }}'
+ namespace: openshift
+ type: Source
+ triggers:
+ - type: ImageChange
+ - type: ConfigChange
+
+- name: wait for project image built
+ command: >
+ {{ oc_command }} get builds
+ -o "jsonpath={.items[?(@.metadata.labels.buildconfig!='staging-http-server')].status.phase}"
+ register: _project_build
+ retries: 600
+ delay: 1
+ until: "'Complete' in _project_build.stdout"
diff --git a/playbooks/openshift/deploy-project.yaml b/playbooks/openshift/deploy-project.yaml
new file mode 100644
index 0000000..bc1b63a
--- /dev/null
+++ b/playbooks/openshift/deploy-project.yaml
@@ -0,0 +1,66 @@
+# This file is managed by ansible, do not edit directly
+---
+- name: start the project
+ openshift_raw:
+ state: present
+ namespace: "{{ zuul.resources['project'].namespace }}"
+ context: "{{ zuul.resources['project'].context }}"
+ definition:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ generation: 2
+ labels:
+ app: "{{ zuul.project.short_name }}"
+ name: "{{ zuul.project.short_name }}"
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: "{{ zuul.project.short_name }}"
+ strategy:
+ resources: {}
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ app: "{{ zuul.project.short_name }}"
+ deploymentconfig: "{{ zuul.project.short_name }}"
+ spec:
+ containers:
+ - image: "{{ _image_stream.result.status.dockerImageRepository }}"
+ name: "{{ zuul.project.short_name }}"
+ command: [ "/bin/bash", "-c", "--" ]
+ args: [ "while true; do sleep 30; done;" ]
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8443
+ protocol: TCP
+ resources: {}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ test: false
+
+- name: get project pod name
+ command: >
+ {{ oc_command }} get pods --field-selector=status.phase=Running
+ -o "jsonpath={.items[?(@.metadata.labels.app=='{{ zuul.project.short_name }}')].metadata.name}"
+ register: _pod_name
+ retries: 600
+ delay: 1
+ until: "zuul.project.short_name in _pod_name.stdout"
+
+- name: create pods list
+ set_fact:
+ pods_data:
+ pods:
+ - name: "{{ zuul.project.short_name }}"
+ pod: "{{ _pod_name.stdout }}"
+
+- name: store pods list in work_root
+ copy:
+ content: "{{ pods_data | to_yaml }}"
+ dest: "{{ zuul.executor.work_root }}/pods.yaml"
diff --git a/playbooks/openshift/pre.yaml b/playbooks/openshift/pre.yaml
new file mode 100644
index 0000000..44fff25
--- /dev/null
+++ b/playbooks/openshift/pre.yaml
@@ -0,0 +1,34 @@
+---
+- hosts: localhost
+ tasks:
+ - block:
+ - import_role: name=emit-job-header
+ # We need those tasks to use log-inventory, see: https://review.openstack.org/577674
+ - name: Define zuul_info_dir fact
+ set_fact:
+ zuul_info_dir: "{{ zuul.executor.log_root }}/zuul-info"
+
+ - name: Ensure Zuul Ansible directory exists
+ delegate_to: localhost
+ run_once: true
+ file:
+ path: "{{ zuul_info_dir }}"
+ state: directory
+
+ - name: Define inventory_file fact
+ set_fact:
+ inventory_file: "/tmp/{{ zuul.build }}/ansible/inventory.yaml"
+
+ - import_role: name=log-inventory
+ vars:
+ zuul_log_url: "https://spfactory.storpool.com/logs"
+
+ - name: Set oc_command fact
+ set_fact:
+ oc_command: >
+ oc --context "{{ zuul.resources['project'].context }}"
+ --namespace "{{ zuul.resources['project'].namespace }}"
+
+ - include_tasks: prepare-namespace.yaml
+ - include_tasks: build-project.yaml
+ - include_tasks: deploy-project.yaml
diff --git a/playbooks/openshift/prepare-namespace.yaml b/playbooks/openshift/prepare-namespace.yaml
new file mode 100644
index 0000000..d583469
--- /dev/null
+++ b/playbooks/openshift/prepare-namespace.yaml
@@ -0,0 +1,80 @@
+# This file is managed by ansible, do not edit directly
+---
+- name: create staging-http DeploymentConfig
+ openshift_raw:
+ state: present
+ namespace: "{{ zuul.resources['project'].namespace }}"
+ context: "{{ zuul.resources['project'].context }}"
+ definition:
+ apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ generation: 2
+ labels:
+ app: staging-http-server
+ name: staging-http-server
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: staging-http-server
+ strategy:
+ resources: {}
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ app: staging-http-server
+ deploymentconfig: staging-http-server
+ spec:
+ containers:
+ - image: "docker.io/softwarefactoryproject/staging-http-server"
+ # imagePullPolicy: Always
+ name: staging-http-server
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8443
+ protocol: TCP
+ resources: {}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ terminationGracePeriodSeconds: 30
+
+- name: create staging-http Service spec
+ openshift_raw:
+ state: present
+ namespace: "{{ zuul.resources['project'].namespace }}"
+ context: "{{ zuul.resources['project'].context }}"
+ definition:
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app: staging-http-server
+ name: staging-http-server
+ spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ deploymentconfig: staging-http-server
+ sessionAffinity: None
+ type: ClusterIP
+ status:
+ loadBalancer: {}
+
+- name: get staging-http-server pod name
+ command: >
+ {{ oc_command }} get pods --field-selector=status.phase=Running
+ -o "jsonpath={.items[?(@.metadata.labels.app=='staging-http-server')].metadata.name}"
+ register: _zm_name
+ retries: 600
+ delay: 1
+ until: "'staging-http' in _zm_name.stdout"
+
+- name: register staging-http-server pod name
+ set_fact:
+ zm_name: "{{ _zm_name.stdout }}"
diff --git a/playbooks/openshift/unprivileged-machine.yaml b/playbooks/openshift/unprivileged-machine.yaml
new file mode 100644
index 0000000..431b844
--- /dev/null
+++ b/playbooks/openshift/unprivileged-machine.yaml
@@ -0,0 +1,39 @@
+---
+- hosts: localhost
+ tasks:
+ - block:
+ - import_role: name=emit-job-header
+ # We need those tasks to use log-inventory, see: https://review.openstack.org/577674
+ - name: Define zuul_info_dir fact
+ set_fact:
+ zuul_info_dir: "{{ zuul.executor.log_root }}/zuul-info"
+
+ - name: Ensure Zuul Ansible directory exists
+ delegate_to: localhost
+ run_once: true
+ file:
+ path: "{{ zuul_info_dir }}"
+ state: directory
+
+ - name: Define inventory_file fact
+ set_fact:
+ inventory_file: "/tmp/{{ zuul.build }}/ansible/inventory.yaml"
+
+ - import_role: name=log-inventory
+ vars:
+ zuul_log_url: "https://spfactory.storpool.com/logs"
+
+ - name: Create src directory
+ command: >
+ oc --context "{{ zuul.resources['pod'].context }}"
+ --namespace "{{ zuul.resources['pod'].namespace }}"
+ exec {{ zuul.resources['pod'].pod }} mkdir src
+
+ - name: Copy src repos to the pod
+ command: >
+ oc --context "{{ zuul.resources['pod'].context }}"
+ --namespace "{{ zuul.resources['pod'].namespace }}"
+ rsync -q --progress=false
+ {{ zuul.executor.src_root }}/
+ {{ zuul.resources['pod'].pod }}:src/
+ no_log: true
diff --git a/playbooks/pages/build.yaml b/playbooks/pages/build.yaml
new file mode 100644
index 0000000..b44b7e1
--- /dev/null
+++ b/playbooks/pages/build.yaml
@@ -0,0 +1,5 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: all
+ roles:
+ - role: build-pages
diff --git a/playbooks/pages/publish.yaml b/playbooks/pages/publish.yaml
new file mode 100644
index 0000000..602a609
--- /dev/null
+++ b/playbooks/pages/publish.yaml
@@ -0,0 +1,25 @@
+# This file is managed by ansible, do not edit directly
+---
+- hosts: all
+ tasks:
+ - block:
+ - include_role: name=fetch-output
+ when:
+ - "ansible_connection != 'kubectl'"
+ - ansible_user_dir is defined
+ - block:
+ - include_role: name=fetch-output-openshift
+ when:
+ - "ansible_connection == 'kubectl'"
+ - ansible_user_dir is defined
+ - import_role: name=merge-output-to-logs
+ when: ansible_user_dir is defined
+
+- hosts: localhost
+ roles:
+ - role: add-fileserver
+ fileserver: "{{ site_pages }}"
+
+- hosts: "{{ site_pages.fqdn }}"
+ roles:
+ - role: upload-pages
diff --git a/playbooks/wait-for-changes-ahead.yaml b/playbooks/wait-for-changes-ahead.yaml
new file mode 100644
index 0000000..d78a109
--- /dev/null
+++ b/playbooks/wait-for-changes-ahead.yaml
@@ -0,0 +1,4 @@
+---
+- hosts: localhost
+ roles:
+ - wait-for-changes-ahead
diff --git a/policies/policy.yaml b/policies/policy.yaml
new file mode 100644
index 0000000..509f814
--- /dev/null
+++ b/policies/policy.yaml
@@ -0,0 +1,69 @@
+---
+# Default rules that should not be changed, but can be used as building blocks for more complex rules
+'admin_or_service': 'rule:is_admin or rule:is_service'
+'admin_api': 'rule:is_admin'
+# is_owner applies to API calls where a user is the target. is_owner will be True if the requestor is the target of the action
+'is_owner': 'username:%(username)s'
+'owner_api': 'rule:is_owner'
+'admin_or_owner': 'rule:is_admin or rule:is_owner'
+# group checking depending on the target project
+'is_ptl': 'group:%(project)s-ptl'
+'is_core': 'group:%(project)s-core'
+'is_dev': 'group:%(project)s-dev'
+'ptl_api': 'rule:is_ptl'
+'core_api': 'rule:is_core'
+'dev_api': 'rule:is_dev'
+'contributor_api': 'rule:ptl_api or rule:core_api or rule:dev_api'
+
+'authenticated_api': 'is_authenticated:True'
+'any': '@'
+'none': '!'
+# Backup API
+'managesf.backup:get': 'rule:admin_api'
+'managesf.backup:create': 'rule:admin_api'
+# Pages API CRUD
+'managesf.pages:get': 'rule:admin_api or rule:ptl_api'
+'managesf.pages:create': 'rule:admin_api or rule:ptl_api'
+'managesf.pages:delete': 'rule:admin_api or rule:ptl_api'
+# local user backend (for local authentication) API CRUD
+'managesf.localuser:get': 'rule:authenticated_api'
+'managesf.localuser:create_update': 'rule:admin_api or username:%(username)s'
+'managesf.localuser:delete': 'rule:admin_api or username:%(username)s'
+# This rule should be left alone, or local users will not be able to authenticate
+'managesf.localuser:bind': 'rule:any'
+# user API CRUD
+'managesf.user:get': 'rule:authenticated_api'
+'managesf.user:create': 'rule:admin_api or username:%(username)s'
+'managesf.user:delete': 'rule:admin_api'
+'managesf.user:update': 'rule:admin_api or username:%(username)s'
+# gerrit hooks API
+'managesf.hooks:trigger': 'rule:admin_or_service'
+# template tests for projects API
+'managesf.tests:add': 'rule:admin_api or rule:ptl_api'
+# config (permissions) API
+'managesf.config:get': 'rule:authenticated_api'
+# resources API
+'managesf.resources:get': 'rule:any'
+'managesf.resources:validate': 'rule:admin_or_service'
+'managesf.resources:apply': 'rule:admin_or_service'
+# jobs API
+'managesf.job:get': 'rule:any'
+'managesf.job:stop': 'rule:admin_or_service'
+'managesf.job:run': 'rule:admin_or_service'
+# nodes API
+'managesf.node:get': 'rule:any'
+'managesf.node:hold': 'rule:admin_or_service'
+'managesf.node:delete': 'rule:admin_or_service'
+'managesf.node:image-get': 'rule:any'
+'managesf.node:add_authorized_key': 'rule:admin_or_service'
+'managesf.node:image-start-update': 'rule:admin_or_service'
+'managesf.node:image-update-status': 'rule:admin_or_service'
+# zuul API
+'zuul.tenants:get': 'rule:any'
+'zuul.tenant.status:get': 'rule:any'
+'zuul.tenant.jobs:get': 'rule:any'
+'zuul.tenant.builds:get': 'rule:any'
+'zuul.tenant.console-stream:get': 'rule:any'
+'zuul.status:get': 'rule:any'
+'zuul.status.change:get': 'rule:any'
+'zuul.project.public_keys:get': 'rule:any'
diff --git a/resources/README b/resources/README
new file mode 100644
index 0000000..77afcdd
--- /dev/null
+++ b/resources/README
@@ -0,0 +1 @@
+Software Factory managed resources
diff --git a/resources/_internal.yaml b/resources/_internal.yaml
new file mode 100644
index 0000000..0cc6850
--- /dev/null
+++ b/resources/_internal.yaml
@@ -0,0 +1,75 @@
+# This file is managed by ansible, do not edit directly
+---
+resources:
+ tenants:
+ local:
+ description: "The local tenant."
+ url: "https://spfactory.storpool.com/manage"
+ default-connection: gerrit
+ tenant-options:
+ zuul/report-build-page: True
+ zuul/max-job-timeout: 10800
+
+ connections:
+ gerrit:
+ base-url: "https://spfactory.storpool.com/r"
+ type: gerrit
+ opendev.org:
+ base-url: "https://review.opendev.org/r"
+ type: gerrit
+
+ projects:
+ internal:
+ tenant: local
+ description: Internal configuration project
+ source-repositories:
+ - config:
+ zuul/config-project: True
+ - sf-jobs
+ - zuul/zuul-jobs:
+ connection: opendev.org
+ zuul/include: [job]
+ zuul/shadow: sf-jobs
+
+ repos:
+ config:
+ description: Config repository
+ acl: config-acl
+ sf-jobs:
+ description: Local job repository
+ acl: config-acl
+
+ acls:
+ config-acl:
+ file: |
+ [access "refs/*"]
+ read = group config-core
+ owner = group config-ptl
+ [access "refs/heads/*"]
+ label-Code-Review = -2..+2 group config-core
+ label-Code-Review = -2..+2 group config-ptl
+ label-Verified = -2..+2 group config-ptl
+ label-Workflow = -1..+1 group config-core
+ label-Workflow = -1..+1 group config-ptl
+ label-Workflow = -1..+0 group Registered Users
+ rebase = group config-core
+ abandon = group config-core
+ submit = group config-ptl
+ read = group config-core
+ read = group Registered Users
+ [access "refs/meta/config"]
+ read = group config-core
+ read = group Registered Users
+ [receive]
+ requireChangeId = true
+ [submit]
+ mergeContent = false
+ action = fast forward only
+ [plugin "reviewers-by-blame"]
+ maxReviewers = 5
+ ignoreDrafts = true
+ ignoreSubjectRegEx = (WIP|DNM)(.*)
+ groups:
+ - config-ptl
+ - config-core
+
diff --git a/resources/resources.yaml b/resources/resources.yaml
new file mode 100644
index 0000000..8138344
--- /dev/null
+++ b/resources/resources.yaml
@@ -0,0 +1,15 @@
+---
+# This file contains the default users group authorized to
+# manage Software Factory services configurations.
+#
+# Adds trusted operator email to the config-core or config-ptl list.
+#
+resources:
+ groups:
+ config-ptl:
+ description: Team lead for the config repo
+ members:
+ - admin@spfactory.storpool.com
+ config-core:
+ description: Team core for the config repo
+ members: []
diff --git a/roles/fetch-output-openshift/defaults/main.yaml b/roles/fetch-output-openshift/defaults/main.yaml
new file mode 100644
index 0000000..b040970
--- /dev/null
+++ b/roles/fetch-output-openshift/defaults/main.yaml
@@ -0,0 +1,2 @@
+openshift_pods: "{{ zuul.resources }}"
+zuul_output_dir: "{{ ansible_user_dir }}/zuul-output"
diff --git a/roles/fetch-output-openshift/tasks/main.yaml b/roles/fetch-output-openshift/tasks/main.yaml
new file mode 100644
index 0000000..28ba3b3
--- /dev/null
+++ b/roles/fetch-output-openshift/tasks/main.yaml
@@ -0,0 +1,29 @@
+- name: Set log path for multiple nodes
+ set_fact:
+ log_path: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
+ when: groups['all'] | length > 1
+
+- name: Set log path for single node
+ set_fact:
+ log_path: "{{ zuul.executor.log_root }}"
+ when: log_path is not defined
+
+- name: Ensure local output dirs
+ delegate_to: localhost
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ log_path }}"
+ - "{{ log_path }}/npm"
+ - "{{ zuul.executor.work_root }}/artifacts"
+ - "{{ zuul.executor.work_root }}/docs"
+
+- include_tasks: rsync.yaml
+ when: item.1.pod is defined
+ loop: "{{ openshift_pods.items()|list }}"
+ run_once: true
+
+- name: Remove empty directory
+ command: find "{{ zuul.executor.work_root }}" -empty -type d -delete
+ delegate_to: localhost
diff --git a/roles/fetch-output-openshift/tasks/rsync.yaml b/roles/fetch-output-openshift/tasks/rsync.yaml
new file mode 100644
index 0000000..98643bc
--- /dev/null
+++ b/roles/fetch-output-openshift/tasks/rsync.yaml
@@ -0,0 +1,22 @@
+---
+- name: Copy zuul-output from the pod to the executor
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ rsync -q --progress=false
+ {{ item.1.pod }}:{{ output.src }}/
+ {{ output.dst }}/
+ no_log: true
+ delegate_to: localhost
+ loop:
+ - src: "{{ zuul_output_dir }}/logs"
+ dst: "{{ log_path }}"
+# This need: https://review.opendev.org/#/c/681748/10/roles/ensure-output-dirs/tasks/main.yaml
+# - src: "{{ zuul_output_dir }}/npm"
+# dst: "{{ log_path }}/npm"
+ - src: "{{ zuul_output_dir }}/artifacts"
+ dst: "{{ zuul.executor.work_root }}/artifacts"
+ - src: "{{ zuul_output_dir }}/docs"
+ dst: "{{ zuul.executor.work_root }}/docs"
+ loop_control:
+ loop_var: output
diff --git a/roles/prepare-workspace-openshift/README.rst b/roles/prepare-workspace-openshift/README.rst
new file mode 100644
index 0000000..caa5163
--- /dev/null
+++ b/roles/prepare-workspace-openshift/README.rst
@@ -0,0 +1,15 @@
+Prepare remote workspaces
+
+This role can be used instead of the `prepare-workspace` role when the
+synchronize module doesn't work with kubectl connection. It copies the
+prepared source repos to the pods' cwd using the `oc rsync` command.
+
+This role is intended to run once before any other role in a Zuul job.
+This role requires the origin-clients to be installed.
+
+**Role Variables**
+
+.. zuul:rolevar:: openshift_pods
+ :default: {{ zuul.resources }}
+
+ The dictionary of pod name, pod information to copy the sources to.
diff --git a/roles/prepare-workspace-openshift/defaults/main.yaml b/roles/prepare-workspace-openshift/defaults/main.yaml
new file mode 100644
index 0000000..fa94895
--- /dev/null
+++ b/roles/prepare-workspace-openshift/defaults/main.yaml
@@ -0,0 +1 @@
+openshift_pods: "{{ zuul.resources }}"
diff --git a/roles/prepare-workspace-openshift/tasks/main.yaml b/roles/prepare-workspace-openshift/tasks/main.yaml
new file mode 100644
index 0000000..0d6d50b
--- /dev/null
+++ b/roles/prepare-workspace-openshift/tasks/main.yaml
@@ -0,0 +1,4 @@
+---
+- include_tasks: rsync.yaml
+ when: item.1.pod is defined
+ loop: "{{ openshift_pods.items()|list }}"
diff --git a/roles/prepare-workspace-openshift/tasks/rsync.yaml b/roles/prepare-workspace-openshift/tasks/rsync.yaml
new file mode 100644
index 0000000..c90c4ed
--- /dev/null
+++ b/roles/prepare-workspace-openshift/tasks/rsync.yaml
@@ -0,0 +1,17 @@
+---
+- name: Create src directory
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ exec {{ item.1.pod }} mkdir src
+ delegate_to: localhost
+
+- name: Copy src repos to the pod
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ rsync -q --progress=false
+ {{ zuul.executor.src_root }}/
+ {{ item.1.pod }}:src/
+ no_log: true
+ delegate_to: localhost
diff --git a/roles/remove-zuul-sshkey/README.rst b/roles/remove-zuul-sshkey/README.rst
new file mode 100644
index 0000000..2c2d3d2
--- /dev/null
+++ b/roles/remove-zuul-sshkey/README.rst
@@ -0,0 +1,4 @@
+Remove the zuul ssh key
+
+This role is intended to be run on the Zuul Executor at the start of
+every job to prevent access to public Zuul ssh connection.
diff --git a/roles/remove-zuul-sshkey/library/sshagent_remove_keys.py b/roles/remove-zuul-sshkey/library/sshagent_remove_keys.py
new file mode 100644
index 0000000..b4f6ea6
--- /dev/null
+++ b/roles/remove-zuul-sshkey/library/sshagent_remove_keys.py
@@ -0,0 +1,126 @@
+# Copyright 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import socket
+import struct
+import sys
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+SSH_AGENT_FAILURE = 5
+SSH_AGENT_SUCCESS = 6
+SSH_AGENT_IDENTITIES_ANSWER = 12
+
+SSH_AGENTC_REQUEST_IDENTITIES = 11
+SSH_AGENTC_REMOVE_IDENTITY = 18
+
+
+def unpack_string(data):
+ (l,) = struct.unpack('!i', data[:4])
+ d = data[4:4 + l]
+ return (d, data[4 + l:])
+
+
+def pack_string(data):
+ ret = struct.pack('!i', len(data))
+ return ret + data
+
+
+class Agent(object):
+ def __init__(self):
+ path = os.environ['SSH_AUTH_SOCK']
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(path)
+
+ def send(self, message_type, contents):
+ payload = struct.pack('!ib', len(contents) + 1, message_type)
+ payload += bytearray(contents)
+ self.sock.send(payload)
+
+ def recv(self):
+ buf = b''
+ while len(buf) < 5:
+ buf += self.sock.recv(1)
+ message_len, message_type = struct.unpack('!ib', buf[:5])
+ buf = buf[5:]
+ while len(buf) < message_len - 1:
+ buf += self.sock.recv(1)
+ return message_type, buf
+
+ def list(self):
+ self.send(SSH_AGENTC_REQUEST_IDENTITIES, b'')
+ mtype, data = self.recv()
+ if mtype != SSH_AGENT_IDENTITIES_ANSWER:
+ raise Exception("Invalid response to list")
+ (nkeys,) = struct.unpack('!i', data[:4])
+ data = data[4:]
+ keys = []
+ for i in range(nkeys):
+ blob, data = unpack_string(data)
+ comment, data = unpack_string(data)
+ keys.append((blob, comment))
+ return keys
+
+ def remove(self, blob):
+ self.send(SSH_AGENTC_REMOVE_IDENTITY, pack_string(blob))
+ mtype, data = self.recv()
+ if mtype != SSH_AGENT_SUCCESS:
+ raise Exception("Key was not removed")
+
+
+def run(remove):
+ a = Agent()
+ keys = a.list()
+ removed = []
+ to_remove = re.compile(remove)
+ for blob, comment in keys:
+ if not to_remove.match(comment.decode('utf8')):
+ continue
+ a.remove(blob)
+ removed.append(comment)
+ return removed
+
+
+def ansible_main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ remove=dict(required=True, type='str')))
+
+ removed = run(module.params.get('remove'))
+
+ module.exit_json(changed=(removed != []),
+ removed=removed)
+
+
+def cli_main():
+ parser = argparse.ArgumentParser(
+ description="Remove ssh keys from agent"
+ )
+ parser.add_argument('remove', nargs='+',
+ help='regex matching comments of keys to remove')
+ args = parser.parse_args()
+
+ removed = run(args.remove)
+ print(removed)
+
+
+if __name__ == '__main__':
+ if sys.stdin.isatty():
+ cli_main()
+ else:
+ ansible_main()
diff --git a/roles/remove-zuul-sshkey/tasks/main.yaml b/roles/remove-zuul-sshkey/tasks/main.yaml
new file mode 100644
index 0000000..e417f58
--- /dev/null
+++ b/roles/remove-zuul-sshkey/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Remove master key from local agent
+ # The master key has a filename, all others (e.g., per-project keys)
+ # have "(stdin)" as a comment.
+ sshagent_remove_keys:
+ remove: '^(?!\(stdin\)).*'
+ delegate_to: localhost
+ run_once: true
diff --git a/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py b/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py
new file mode 100755
index 0000000..33944d6
--- /dev/null
+++ b/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py
@@ -0,0 +1,107 @@
+#!/bin/env python3
+
+# Copyright (c) 2018 Red Hat
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+import traceback
+import json
+import time
+from six.moves import urllib
+from ansible.module_utils.basic import AnsibleModule
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: wait_for_changes_ahead
+short_description: Wait for zuul queue
+author: Tristan de Cacqueray (@tristanC)
+description:
+ - Wait for zuul queue ahead to SUCCEED
+requirements:
+ - "python >= 3.5"
+options:
+ zuul_web_url:
+ description:
+ - The zuul web url to query change status
+ required: true
+ type: str
+ zuul_change:
+ description:
+ - The change nr, patchset nr
+ required: true
+ type: str
+ wait_timeout:
+ description:
+ - The maximum waiting time
+ default: 7200
+ type: int
+'''
+
+log = list()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ zuul_status_url=dict(required=True, type='str'),
+ zuul_change=dict(required=True, type='str'),
+ wait_timeout=dict(type='int'),
+ )
+ )
+ zuul_status_url = module.params['zuul_status_url']
+ zuul_change = module.params['zuul_change']
+ wait_timeout = module.params.get('wait_timeout', 120)
+ if not wait_timeout:
+ wait_timeout = 120
+ wait_timeout = int(wait_timeout) * 60
+
+ if False:
+ module.exit_json(changed=False, msg="noop")
+ try:
+ start_time = time.monotonic()
+ while True:
+ req = urllib.request.urlopen(
+ zuul_status_url + "/change/%s" % zuul_change)
+ changes = json.loads(req.read().decode('utf-8'))
+
+ if not changes:
+ module.fail_json(msg="Unknown change", log="\n".join(log))
+
+ found = None
+ for change in changes:
+ if change["live"] is True:
+ found = change
+ break
+
+ if found and not change["item_ahead"]:
+ break
+
+ if time.monotonic() - start_time > wait_timeout:
+ module.fail_json(msg="Timeout", log="\n".join(log))
+
+ time.sleep(30)
+ except Exception as e:
+ tb = traceback.format_exc()
+ log.append(str(e))
+ log.append(tb)
+ module.fail_json(msg=str(e), log="\n".join(log))
+ finally:
+ log_text = "\n".join(log)
+ module.exit_json(changed=False, msg=log_text)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/wait-for-changes-ahead/tasks/main.yaml b/roles/wait-for-changes-ahead/tasks/main.yaml
new file mode 100644
index 0000000..f0c1aae
--- /dev/null
+++ b/roles/wait-for-changes-ahead/tasks/main.yaml
@@ -0,0 +1,6 @@
+---
+- name: Wait for changes ahead
+ wait_for_changes_ahead:
+ zuul_status_url: "{{ zuul_web_url }}/api/tenant/{{ zuul.tenant }}/status"
+ zuul_change: "{{ zuul.change }},{{ zuul.patchset }}"
+ wait_timeout: "{{ wait_timeout|default(120) }}"
diff --git a/zuul.d/README b/zuul.d/README
new file mode 100644
index 0000000..78444aa
--- /dev/null
+++ b/zuul.d/README
@@ -0,0 +1 @@
+Zuul config repo content
diff --git a/zuul.d/_jobs-base.yaml b/zuul.d/_jobs-base.yaml
new file mode 100644
index 0000000..33cafe4
--- /dev/null
+++ b/zuul.d/_jobs-base.yaml
@@ -0,0 +1,109 @@
+# This file is managed by ansible, do not edit directly
+---
+- job:
+ name: base
+ parent: null
+ description: The base job.
+ pre-run: playbooks/base/pre.yaml
+ post-run:
+ - playbooks/base/post.yaml
+ roles:
+ - zuul: sf-jobs
+ - zuul: zuul/zuul-jobs
+ extra-vars:
+ zuul_use_fetch_output: true
+ timeout: 1800
+ attempts: 3
+ secrets:
+ - site_sflogs
+ nodeset:
+ nodes:
+ - name: container
+ label: runc-centos
+
+- semaphore:
+ name: semaphore-config-update
+ max: 1
+
+- job:
+ name: config-check
+ parent: base
+ final: true
+ allowed-projects:
+ - config
+ description: Validate the config repo.
+ run: playbooks/config/check.yaml
+ post-run: playbooks/config/check-fetch-artifacts.yaml
+ secrets:
+ - service_user
+ vars:
+ zuul_log_url: "https://spfactory.storpool.com/logs"
+ gateway_url: "https://spfactory.storpool.com"
+ tenant_config: False
+ nodeset:
+ nodes: []
+
+- job:
+ name: config-update
+ parent: base
+ final: true
+ allowed-projects:
+ - config
+ description: Deploy config repo update.
+ run: playbooks/config/config-update.yaml
+ secrets:
+ - site_install_server
+ semaphore: semaphore-config-update
+ nodeset:
+ nodes: []
+
+- project:
+ name: config
+ check:
+ jobs:
+ - config-check
+ gate:
+ jobs:
+ - config-check
+ post:
+ jobs:
+ - config-update
+
+
+- project:
+ name: sf-jobs
+ check:
+ jobs:
+ - linters:
+ vars:
+ linters: [flake8, yamllint]
+ gate:
+ jobs:
+ - linters:
+ vars:
+ linters: [flake8, yamllint]
+
+
+- job:
+ name: wait-for-changes-ahead
+ parent: null
+ timeout: 7200
+ nodeset:
+ nodes: []
+ vars:
+ zuul_web_url: "https://spfactory.storpool.com/zuul"
+ description: |
+ This job wait for the queue ahead to be empty.
+
+ Responds to these variables:
+
+ .. zuul:jobvar:: zuul_web_url
+
+ The zuul web api url.
+
+ .. zuul:jobvar:: wait_timeout
+ :default: 120
+
+ Wait timeout in minutes.
+
+ run: playbooks/wait-for-changes-ahead.yaml
diff --git a/zuul.d/_jobs-openshift.yaml b/zuul.d/_jobs-openshift.yaml
new file mode 100644
index 0000000..a381bbb
--- /dev/null
+++ b/zuul.d/_jobs-openshift.yaml
@@ -0,0 +1,48 @@
+# This file is managed by sfconfig, do not edit manually
+# The Openshift driver is a Tech Preview, use at your own risk...
+---
+- job:
+ name: base-openshift-native
+ parent: null
+ description: |
+ A base job that build and deploy a container image using the
+ project future state.
+ pre-run: playbooks/openshift/pre.yaml
+ post-run:
+ - playbooks/base/post.yaml
+ roles:
+ - zuul: sf-jobs
+ - zuul: zuul/zuul-jobs
+ timeout: 1800
+ # Set attempts to 1 until it's working well
+ attempts: 1
+ secrets:
+ - site_sflogs
+ nodeset:
+ nodes:
+ - name: project
+ label: openshift-project
+ vars:
+ base_image: "python:3.6"
+
+- job:
+ name: base-openshift-pod
+ parent: null
+ description: |
+ A base job to spawn a vanilla container and copy the project
+ future state
+ pre-run: playbooks/openshift/unprivileged-machine.yaml
+ post-run:
+ - playbooks/base/post.yaml
+ roles:
+ - zuul: sf-jobs
+ - zuul: zuul/zuul-jobs
+ # Set attempts to 1 until it's working well
+ attempts: 1
+ secrets:
+ - site_sflogs
+ timeout: 1800
+ nodeset:
+ nodes:
+ - name: pod
+ label: openshift-pod-fedora
diff --git a/zuul.d/_jobs-pages.yaml b/zuul.d/_jobs-pages.yaml
new file mode 100644
index 0000000..a705bf6
--- /dev/null
+++ b/zuul.d/_jobs-pages.yaml
@@ -0,0 +1,24 @@
+# This file is managed by ansible, do not edit directly
+---
+- job:
+ name: build-and-publish-pages
+ parent: base
+ description: Base build and publish pages job
+ run: playbooks/pages/build.yaml
+ post-run:
+ - playbooks/pages/publish.yaml
+ secrets:
+ - site_pages
+ protected: true
+ vars:
+ src_dir: /
+ fqdn: spfactory.storpool.com
+
+- job:
+ name: build-pages
+ parent: base
+ description: Pages build
+ run: playbooks/pages/build.yaml
+ vars:
+ src_dir: /
+
diff --git a/zuul.d/_pipelines.yaml b/zuul.d/_pipelines.yaml
new file mode 100644
index 0000000..ec77979
--- /dev/null
+++ b/zuul.d/_pipelines.yaml
@@ -0,0 +1,222 @@
+# This file is managed by ansible, do not edit directly
+---
+- pipeline:
+ name: check
+ description: |
+ Newly uploaded patchsets enter this pipeline to receive an
+ initial +/-1 Verified vote.
+ manager: independent
+ require:
+ gerrit:
+ open: True
+ current-patchset: True
+ opendev.org:
+ open: True
+ current-patchset: True
+ trigger:
+ gerrit:
+ - event: patchset-created
+ - event: change-restored
+ - event: comment-added
+ comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*(recheck|reverify)
+ - event: comment-added
+ require-approval:
+ - Verified: [-1, -2]
+ username: zuul
+ approval:
+ - Workflow: 1
+ opendev.org:
+ - event: patchset-created
+ - event: change-restored
+ - event: comment-added
+ comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*(recheck|reverify)
+ - event: comment-added
+ require-approval:
+ - Verified: [-1, -2]
+ username: zuul
+ approval:
+ - Workflow: 1
+ start:
+ gerrit:
+ Verified: 0
+ opendev.org:
+ Verified: 0
+ success:
+ gerrit:
+ Verified: 1
+ opendev.org:
+ Verified: 1
+ failure:
+ gerrit:
+ Verified: -1
+ opendev.org:
+ Verified: -1
+
+- pipeline:
+ name: gate
+ description: |
+ Changes that have been approved by core developers are enqueued
+ in order in this pipeline, and if they pass tests, will be
+ merged.
+ success-message: Build succeeded (gate pipeline).
+ failure-message: |
+ Build failed (gate pipeline). For information on how to proceed, see
+ http://docs.openstack.org/infra/manual/developers.html#automated-testing
+ manager: dependent
+ precedence: high
+ post-review: True
+ require:
+ gerrit:
+ open: True
+ current-patchset: True
+ approval:
+ - Verified: [1, 2]
+ username: zuul
+ - Workflow: 1
+ opendev.org:
+ open: True
+ current-patchset: True
+ approval:
+ - Verified: [1, 2]
+ username: zuul
+ - Workflow: 1
+ trigger:
+ gerrit:
+ - event: comment-added
+ approval:
+ - Workflow: 1
+ - event: comment-added
+ approval:
+ - Verified: 1
+ username: zuul
+ opendev.org:
+ - event: comment-added
+ approval:
+ - Workflow: 1
+ - event: comment-added
+ approval:
+ - Verified: 1
+ username: zuul
+ start:
+ gerrit:
+ Verified: 0
+ opendev.org:
+ Verified: 0
+ success:
+ gerrit:
+ Verified: 2
+ submit: true
+ opendev.org:
+ Verified: 2
+ submit: true
+ failure:
+ gerrit:
+ Verified: -2
+ opendev.org:
+ Verified: -2
+ window-floor: 20
+ window-increase-factor: 2
+
+- pipeline:
+ name: post
+ post-review: true
+ description: This pipeline runs jobs that operate after each change is merged.
+ manager: supercedent
+ precedence: low
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^refs/heads/.*$
+ opendev.org:
+ - event: ref-updated
+ ref: ^refs/heads/.*$
+ failure:
+ smtp:
+ from: "zuul@spfactory.storpool.com"
+ to: "root@localhost"
+ subject: '[Zuul] Job failed in post pipeline: {change.project}'
+
+- pipeline:
+ name: pre-release
+ description: When a commit is tagged with a pre-release tag, this pipeline runs jobs that publish archives and documentation.
+ manager: independent
+ precedence: high
+ post-review: True
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^refs/tags/([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9alpha|beta|rc.-]+))?(?:\+([0-9a-zA-Z.-]+))?$
+ opendev.org:
+ - event: ref-updated
+ ref: ^refs/tags/([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9alpha|beta|rc.-]+))?(?:\+([0-9a-zA-Z.-]+))?$
+ failure:
+ smtp:
+ from: "zuul@spfactory.storpool.com"
+ to: "root@localhost"
+ subject: '[Zuul] Job failed in pre-release pipeline: {change.project}'
+
+- pipeline:
+ name: release
+ post-review: true
+ description: When a commit is tagged as a release, this pipeline runs jobs that publish archives and documentation.
+ manager: independent
+ precedence: high
+ trigger:
+ gerrit:
+ - event: ref-updated
+ ref: ^refs/tags/([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z.-]+))?(?:\+([0-9a-zA-Z.-]+))?$
+ opendev.org:
+ - event: ref-updated
+ ref: ^refs/tags/([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9a-zA-Z.-]+))?(?:\+([0-9a-zA-Z.-]+))?$
+ failure:
+ smtp:
+ from: "zuul@spfactory.storpool.com"
+ to: "root@localhost"
+ subject: '[Zuul] Job failed in release pipeline: {change.project}'
+
+- pipeline:
+ name: periodic
+ post-review: true
+ description: Jobs in this queue are triggered daily.
+ manager: independent
+ precedence: low
+ trigger:
+ timer:
+ - time: '0 0 * * *'
+ failure:
+ smtp:
+ from: "zuul@spfactory.storpool.com"
+ to: "root@localhost"
+ subject: '[Zuul] Job failed in periodic pipeline: {change.project}'
+
+- pipeline:
+ name: experimental
+ description: On-demand pipeline for requesting a run against a set of jobs that are not yet gating. Leave review comment of "check experimental" to run jobs in this pipeline.
+ success-message: Build succeeded (experimental pipeline).
+ failure-message: Build failed (experimental pipeline).
+ manager: independent
+ precedence: normal
+ trigger:
+ gerrit:
+ - event: comment-added
+ comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*check experimental\s*$
+ opendev.org:
+ - event: comment-added
+ comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*check experimental\s*$
+ success:
+ gerrit: {}
+ opendev.org: {}
+ failure:
+ gerrit: {}
+ opendev.org: {}
+
+- pipeline:
+ name: merge-check
+ description: >
+ Each time a change merges, this pipeline verifies that all open changes
+ on the same project are still mergeable.
+ failure-message: Build failed (merge-check pipeline).
+ manager: independent
+ ignore-dependencies: true
+ precedence: low
+ trigger: {}
diff --git a/zuul.d/_projects.yaml b/zuul.d/_projects.yaml
new file mode 100644
index 0000000..baad817
--- /dev/null
+++ b/zuul.d/_projects.yaml
@@ -0,0 +1,8 @@
+# This file is managed by ansible, do not edit directly
+---
+- project:
+ name: ^.*$
+ check:
+ jobs: []
+ gate:
+ jobs: []
diff --git a/zuul.d/_secret_install_server.yaml b/zuul.d/_secret_install_server.yaml
new file mode 100644
index 0000000..506ad2f
--- /dev/null
+++ b/zuul.d/_secret_install_server.yaml
@@ -0,0 +1,48 @@
+# This file is managed by ansible, do not edit directly
+- secret:
+ name: site_install_server
+ data:
+ fqdn: spfactory.storpool.com
+ ssh_known_hosts: spfactory.storpool.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV/cSRvpNT9RTJvRjBevfzXPPehCdotepMj0xgg59fgdbk+wsl9Pl4AezmQGf8B0V+NLkSLhfJku2OAjCWCOikFN3sbc1qq30XH+3749H+VOvwQUSL02bPv+9Yzc19MEU29CNKWbeOAZRZ0omXB/8skSJ6+VwYgm1Na3bVkz4FL4kSjd+GCEVCR/xY5BotdX/ehWiwHoJsYfU0vpm6IbLFAgmzK6fDJFe8mPmwl6NwFA/OIQu777cmvhvWyZXGDzDOiUjNd49bkd4GfUEAmEQXtVZ7Ag3krrI7VZ0FvgKOSl1K8llws6dNp/xnvGY8BlTjXvYi/K26cb9R7DkcBpfF
+ ssh_username: root
+ ssh_private_key: !encrypted/pkcs1-oaep
+ - tytsCEXl+kDilydm3qjOwDNcXtGz+zJYi6nHFzbYU7zvAHUkEVWDbIkrMl1PNLp8/Yu1l
+ kp7Ase5CFujBl0QEE3lXLH3yjBmRp+/AjbFHcU67e5nWybFz9MDj+MZtrF1l0QmZoXKVO
+ 0MgQPnNqJtj3SO+Jq7XzATnmjMMJjfBds+b6dxgUsSUZpjdS1qXRqiLifuITu61cOCQMI
+ 6sFYCUukmh4a/yjFYnauJtPMpAjfelmqZ2j9uiOGCqUpa2SMTX1FpBdxAY3xsZjMIuugE
+ r4L/28a8QZBTfckbkoZ30oANRlBUrf8hcMBWlP753FaCKjDD5faALONFcFvRkvn1Squ3M
+ ny401LrmOdwuOeH2X2GnMeT1YDUuYUrttEmVgYpplxRXHdgiL+BSdLWnFw+A2dfUMrPKa
+ Ki09LkcXKORt/5vitTyISvicCZi3j3Fel/a7WfRsmyq5XU/njAKAL7/TiJU9wqQdlebAG
+ RgNfvouPojh82m2eeu6RbleMFuhZu2IrK8glLmQ3DRL4SUjxLxD026MP6Au+8+4/pY6qL
+ M7Dc9y+lcsrUiBn2KB8NAiEdfHimZ5yhr0nrlF9ckhe+Xo6ITK+OlVpFKaWTNsucJFd5g
+ 8udQC+UUBaFdXsOd4m44nsTujMHmkTd8SLYWjB1WXjPFIHTWpdEoWIQhmdo/B0=
+ - OC0LqUoMZsKY5zYWpGGZeGXDdmSga/lV21N1QDT4g1g7ygyJ/OXOqHamoAdrhN7rKggAC
+ tMYe4bV767zoBejgJdJzcDGKCmsvJx/T2eYHpLT6SLhcFxoQvZVL+7JP/eBm6homyBRga
+ WgPoSmz7WW0vGg9UyqpSXd2KXnL0YlFeCkiJ3WMvqfFx0RTW4vDbU2Wn9Pyu7vh39tid7
+ 6yn+2ZhFwrxPMmS/b07RYA3Z6qftnqJKE1lDfuXpi49cpwWaVnNmmkvN1TLozns0PDLD2
+ O4loI/PXCyhSVqE8/TdtMDAtTjK0y8Q+dBR4eOxXv3+S3gWtkfk9pbLQC1wOGes3ywpfD
+ RKbO4dRJed/5QySbRdK3UPfgLI7tWJRMOlkkiwnPA+eLPDu22F/5rAqI5VTMOpPiARa+d
+ 8r5MsSKzi9JFRbca0y9h7UC6LpFjux1tizIumX4gVZDMtkjIitLcpBhJFNWCHTZ6lJfvj
+ ftD9SvCAnHjxZ9n3w6lk1Zm66Y7arj7ReKIMcaWpnrHNCrIaz7EybT1FHn5od3eKaBfME
+ DMED8GyiqVr2FodlulT0RB43wi3/s/KemTx35gF2FoQ4DiTRAnNsl/cVPLnrTBOu4Hvuv
+ yvoDLolFQz+42gzB1v/5N8zexd52hEiuVTDK8Pv3HUmXr3+Cd0k7Sz4ecxQMP8=
+ - d5YF8iXnwJlYpNHCcypSdEQwK1gBvgsWvykgG1t+lu9DfeqrKDYyhfDxVCp2JsyePtU10
+ 6v+YF4oDUPmFku6zfG+ASohHwHIhiPoMHdBaczGB2o3U3mf8jsrb35jmqruuOS6PIf92d
+ 9DHPvIKFBFfkv1Hne2CKIqWpW5Mj2QaffdSV/L9gxtySpmLkhKH/S2WL2OMBC10IuQV7D
+ EZRjPoeMEGTmm/pa11vDEUr5c7ufHqZCWlj6+hFqqvHuI8v5V77LIouvdSpZvGPN8VnE4
+ CSYHj38CLNBAvBlWEtAbG/IpopV2Xe3NhO+l1SbWkIc+BHeidxfF6/MM2jc0R4dkO5xtw
+ xMRwL0ZL4pJPMijZEHvnIhoVKX90tKIdclj3Fy3XVtwCpuIPMV2zOG74wvECYrw19DYxh
+ HSU+6gjC0mkK3EtcLrOX5jNenWmM3nsQeHimhHdGU2tokTmmnH35BFG5NaF55zlEpU0dZ
+ o676XTotZeXlfCpxtfTsjO+5aE/AgYbcSr5qQjEEir7+G0fz43uIjqsP6X5Z8fWsqceRX
+ JBR+XmKbGTYTa7Ywm82rU1/guHaDmlz72kNUC0b3DiIoh8OJtCtQ35NbdmyluJOTXSKfA
+ kaVisKMXIKQSbuHajZ1dOk7HZ6KrtJycFIsRjNGSyUjQzbILnfgKHcTFSD2BX0=
+ - f+xqK2UcH81o8ZFxjBv7Ppacq2BVTiTE/SjVbhUFvLOQUQkFOpqZ08DYM2vA4dEmAdvPN
+ cDkS95KPBeMinA6REFMtiBtITuUmCGrhuJk3lJnR0jp+d6xKURQIAvb7jMs2dP7CcmlG3
+ hGmPgsDEMlfpYrLXHRbgbyJgeUDuFVLtQZ77NiF6U0J1vMgvVC7jU1wkFzHpK463qDGAR
+ 6mFShdJ3CCbtJLfjApyz7aBxune68bNWH+np2rzkXGcGZeDxdtM/XfS5FCie8X7AjroHd
+ Be21WAQy7DAgcFdlSEFLLjJXv0+5J/ASPHKPf0RMhKGzpmpoBqh/CUETa3lkUaHwPnmU2
+ Jon2kLqQz3P0qi+ZJH0Zs2AznsxIM+8KelFzoDPEDpjKFlbbdgI2AJc/p5qgzvr2ywUoH
+ EIni9L2uXxTArRX1wvU6ZfHXsinrs9tB6EePnqny7/wfTaTrqlippcGSUPYt1XwOn15+P
+ xPFegeJyFtHjoS8uoa2X0CI0g7x/w4pHbNvcvGQp39FPx+8D98uNyRYN6z7Ien4BH11Qg
+ 3McCc7++qfdIv23BAm+Q63gOJSJtkZM6lRxBXPUhzEQZClp/K74d/UlUvrfi2GL3ovbGC
+ y3cvES6LUsAhVIhSdqmmsJXTFn0y1hIjTE15JAaNlgfNb9yp4bVUppWSYlWZ+4=
diff --git a/zuul.d/_secret_service_user.yaml b/zuul.d/_secret_service_user.yaml
new file mode 100644
index 0000000..c23d0a1
--- /dev/null
+++ b/zuul.d/_secret_service_user.yaml
@@ -0,0 +1,15 @@
+# This file is managed by ansible, do not edit directly
+- secret:
+ name: service_user
+ data:
+ password: !encrypted/pkcs1-oaep
+ - TdizvvvNZl073X2FPgYF6vuhugnBEabrD2Ft3TBwdRSS+05LogoFYeEr4P+zpGBsp+KPj
+ wTeP7T8ByTQAsm3rfBYNG7Db+fk1Inf9Y0szyhxJnlflCwIzQ+uGsa6E6U22FJJd22qDQ
+ ZE/PDgfD7nYpfrM8eaJMVh8w+RyDi/mYOD4zMVCkIT2pd53KsdAdxW68e97V4nEQGeOQB
+ oUj/l0lvXVEPHVbJBDVLwRCVhmHzv/kTE4drs1+scfRb9VayOPqz8a2GShiTmKj9DT4KM
+ QvIeEaCs8A/2RYuK3xPte9NuScNMrev20H9f1HUTUhqpxeRGsSnliv6fvAcy01EqN5ZKZ
+ KuNal8Nh2eu6emfT/ncatOuwb0VbFcLSrNtixEVKOYlsaAx5xCT9UsopFgnrYRCe2Zi7t
+ a4PXorilgury+wDtmCBVuSsATeQ9lgAFsKron77M5W7NygUjnoqekIglepPS1xzybGx2P
+ Ut+VHRKuK6rn+ENI/tcSWgdAyxB+1bQJFiUe0HmriZU1D4MjmYS3IxGInE/rWkKG7zYQR
+ 8Ih3C5QBMflRbdBIslycdiRqxYhhx8yH5WxT7efphjNV9K5elKvNUq2sEbB89+jMCTGha
+ tCvGy4wdumekC6Ab+2cZYsHDxXq6vYWarGIJixUgJBqp6faDxPWURTMddVbx24=
diff --git a/zuul.d/_secret_sflogs.yaml b/zuul.d/_secret_sflogs.yaml
new file mode 100644
index 0000000..96e91fa
--- /dev/null
+++ b/zuul.d/_secret_sflogs.yaml
@@ -0,0 +1,49 @@
+# This file is managed by ansible, do not edit directly
+- secret:
+ name: site_sflogs
+ data:
+ fqdn: spfactory.storpool.com
+ path: /var/www/logs
+ ssh_known_hosts: spfactory.storpool.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV/cSRvpNT9RTJvRjBevfzXPPehCdotepMj0xgg59fgdbk+wsl9Pl4AezmQGf8B0V+NLkSLhfJku2OAjCWCOikFN3sbc1qq30XH+3749H+VOvwQUSL02bPv+9Yzc19MEU29CNKWbeOAZRZ0omXB/8skSJ6+VwYgm1Na3bVkz4FL4kSjd+GCEVCR/xY5BotdX/ehWiwHoJsYfU0vpm6IbLFAgmzK6fDJFe8mPmwl6NwFA/OIQu777cmvhvWyZXGDzDOiUjNd49bkd4GfUEAmEQXtVZ7Ag3krrI7VZ0FvgKOSl1K8llws6dNp/xnvGY8BlTjXvYi/K26cb9R7DkcBpfF
+ ssh_username: loguser
+ ssh_private_key: !encrypted/pkcs1-oaep
+ - cmuFrqJyhKwWgeItFEfMPLcYunsb4wnIUqZ0yt+ZeWLZm//IMPdPhVtvlxBr2yRK/C7dC
+ mcvJWPooq61mSIIQf4/wzj+3Sbd60dbRPFs11Njq2jkSiKRC6Q4JBd2hGjS1xFRZkrAP1
+ cHXdXRbs6zDx6rTgKDw/5svf4HnCuKrouqxJ1f62kMvnJoW4ZZAszNOxp+CYIF3OILT3j
+ 2da31E9+J0EFyJ146nwxTYUSV3SueAKoEgMzRos10L/zkUs9snGMyEDO5SXeeMEfwFVNl
+ 0UkjG5u33qe5jboI4efDH5hNa16762hP3M1YJwegjKuXvdOghYkuqzWSjMdd+UrFQrySi
+ tJyrtf6+oGsvA7TjD+HYp0ksQwycYZGJaSWHfME2cExo1SylR4JekEB5rzh7ZSNQcvLuw
+ +FvPzqEC3pL9EfLs2pDJoDFQXb+LdQ0j0xm5orhFdWHMwwzI7RUAFpmAvKkEpwgDHEcdL
+ fB54BhBRpeED+cxGXGZr2/EwpA5qAIp+fJi1+oIkApm6ob8BbtXIKiiV9fa1VwZYiqEtf
+ 9Wx4WscUgCZ3syR2Jq6OFoApIyorO7cN+TyQL+LYblb0jx60PqspFDG+QUYG2bf1v8+rB
+ jRXdsAMZVniA0HJghNPxyKHTHx+lSCFIXzuK66H0Afo4EYQzb/1eG0z7Pd0Fe0=
+ - EKR5OGPTvEf8LFG4tG1tHu8CNAcvMSo/6mKfBFQAur7JylP3VGfMAl2hbao79gxpnltIy
+ BQKOdJM1ynkZ/crQlCV0eVI+/fX6JFxasdKzAGv3yt4yHraXkz6MpsvroFL3p1Fcctvtr
+ NvixtF5qEIa4VvNjUvNwz1Ym++emkyfulrBp6xh/6fDynITFrKYphuIsOOFRYudk7iigJ
+ RBh6k+1r57DGuQQUtOaiGU2VWRQKPAYP2ewlNAh0/p4qJMqb4Q+PtcqGonovN/K0fAyd9
+ ZkCCuAJ4N0k5jMIBIqxioDoTEQe3tUTmj+8FOzjDZVD5BWU4mOz0pYEcYSHEQuUG1SpoP
+ 0quE8bBGA0fb466VM1ZRZthxBVHJa8e4HmXUzJTjLsd4vjpWFaPLdR+LDjc+wUOpGFFF1
+ 5M2O0DnZsrF1W6aEfRKf2s0PONCobEO7YKpqdHEDEOH/wbWr3lPifeHT84BkS2O43dgkJ
+ f4TqzzaNaOqYWDBz1iDhp9Uy1BdlmY6S17XiXdqYOqKvgMBTM5uAseRRefbzkLZc/a6Vu
+ CA3EiAoyo+uFYkJHS8b9QhzECwuCNjF7VTvmPkxNQ5k5t2IU3Prin4z8M67my9/byRbl1
+ qH2CvShql161B+85tJAn+QNX1UFuGIGOmTSYti0QIxI3C+meiEKUgsLCgiLB7w=
+ - WqjELYsN/5QrVsyO2g4c/o949fM8lBRrVW4hR4y0MNQ8y+ZW3GywgaV66KiNLHxmpEogE
+ h6Kq/VvRD4tbqGkab/BqYUlEUKzlHnqId1d8e/aXyFzPgqHHzDicJpzHJRwuCjkSoelyJ
+ wq+ra99GJlQY8fgt3zQm8+UP5xI0yaSB88l+rNy1xbeA+t98pPb0drNsmZ+XNtnDdG/wf
+ Zn4OvonHYmr17bWRLyANj6Fv40W4iLFrRDUWBuAowYX8+8SHWiHsdOBb/lnlYX78xjg7e
+ d7OmiJJtEC9cXFHAwfIIZDnFthEQeIgjtp0j8iFna5neRoTgyS+khiKIxuOVx/Ub4+mEK
+ GayOKSjw5vnuPY7Cnw+32TeEG3WTb1uYejW7qUlMCyFKCuKT5uElZEkpn/583X5W8+7Rh
+ PUVaF8Cu5r+0zQHdeI3yiGLf8io1s7IXV4Ev3fef5oH/l/oA0zSuDexScVSlZtDD/AVYe
+ 8XrC3BwtETUNacriNVrEboidEIRQQ7JvHtIVd6Kvh00YAoJ7V6lTt5ji6pX8MsVyPZb31
+ TPeGFvCKZR21FUB0bBfKvNWK9IfaerOX5pyIfScDjYlxTDvGD5ncYXavS97WSnAGujZna
+ caSki0chao3YoEj6GUW8/z5xOL5N0j1lApwYNnHd9r7mm+TzBE1AaLi1bMm+/k=
+ - eCZPmHeC+UKQCsPtYh2rDiczI3OSAKRAO0GzrM3Ap7ihFlailQJhFuArgQ/ng6q/lEVSM
+ ikzVvycHgwalcmPc+3crMITOjUjlLytONjljFkhaXpMt5lgWK+AAdz73f5s+CVdoNo4tp
+ 61BOP4LzF65NF6L5zjPoq/o3NkekWHEaGEv2ZsbkILl0wOhl0kPXc0sx8lmyeQnMvsKKX
+ dFJYkL3Orlzpd3qDRAoLTi8WvCthQN1k3SgoOUpfFn1pq/z75rQrzc3mms2Toetga6QYm
+ 9bliIjl4fgGuX6o05xGAugaYB4FFLjOZSeTPhhbFxP1nL6kRUm3z1Ns73XTbekClZPZA1
+ aaS75G2tTP32lXKA0lHn4ULP+irZUgJPBbM2SrsLARRWhNVsEDyTFXJfA3A5oALY7d+3U
+ M5007JaS83qnDFgtRXVMMCKIW25BiJNTiPysYv7LL4J5ndY5HvygHnnweLMgDbYuB7WlS
+ rTr0kuad9uQ1/FobwyvAS8LmdQHdv3kw6T05CWPb5TnXTK8uz6I/vRHDgXm16VYIul8rd
+ 4RUEs87Fhj2QNUfLp26lR9bl3hJawBDLGZ1iHY81+cdbfqkFttl7awAQ0ExYjsuaeA6gq
+ 8ThC+mYwJ3SA6EV9EaB1k43HOf9+LqEN8OosPjrT9wnxSsmFnraQl1jtVLcdFw=
diff --git a/zuul.d/_secret_sfpages.yaml b/zuul.d/_secret_sfpages.yaml
new file mode 100644
index 0000000..db8e380
--- /dev/null
+++ b/zuul.d/_secret_sfpages.yaml
@@ -0,0 +1,49 @@
+# This file is managed by ansible, do not edit directly
+- secret:
+ name: site_pages
+ data:
+ fqdn: managesf.spfactory.storpool.com
+ ssh_known_hosts: managesf.spfactory.storpool.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV/cSRvpNT9RTJvRjBevfzXPPehCdotepMj0xgg59fgdbk+wsl9Pl4AezmQGf8B0V+NLkSLhfJku2OAjCWCOikFN3sbc1qq30XH+3749H+VOvwQUSL02bPv+9Yzc19MEU29CNKWbeOAZRZ0omXB/8skSJ6+VwYgm1Na3bVkz4FL4kSjd+GCEVCR/xY5BotdX/ehWiwHoJsYfU0vpm6IbLFAgmzK6fDJFe8mPmwl6NwFA/OIQu777cmvhvWyZXGDzDOiUjNd49bkd4GfUEAmEQXtVZ7Ag3krrI7VZ0FvgKOSl1K8llws6dNp/xnvGY8BlTjXvYi/K26cb9R7DkcBpfF
+ path: /var/www/pages
+ ssh_username: pagesuser
+ ssh_private_key: !encrypted/pkcs1-oaep
+ - MLeXKSzH3VfjP20Jj5y8EBqzkBwPHMAOtm/WUBd3JblIK4i09pIryHU4Fh6S2gOs6Imdh
+ 9+rid65/S8cNtiGINSYh9JwtCbiiTpWcqXF472K1FNLXqKgiXKKBcSgV5Dr19daPnWeU5
+ YkfRjfviAZxx8WSnh9yxMZJYF7RhyfH1arTqWKrlzyT6E2k6qgp2WKIFI25bG8QVQs3CV
+ /rQAXAqsKcxOK5DGI7t7Riuf0viHK/rjyJW/F7v/Je3Ch8jav3XmPnUVC7nnBlWjmBjCX
+ aqpR2IYZn45L7CZA48Pjdmh7yVkVs7g8vf07VkVG6Pm3I3JzPx8A+qrFlYapSdJs2hqsD
+ /K1FZiN9qsDdritChoVEVkYMJ9VwsUdub3ogkCWbqVqRa0Du07t3qlOvV8sUpdySPQvXz
+ He2ptrQiDRBp2Vn9nPJycrBcSTUOY4+cu+en5dGTrQ4WuXZ2m3W1XP4ZSQLW75jQlCNKl
+ gp0pN1nzkH0JRY46NQHPn53wpeVG9UoHlnodZsReUahdgAMAEX04TgAR6bI/9FZj/Hb1a
+ CUvxBSajA02gO3m6Y42EP3dzs8ikFLz9P+ZkzKY48+GgeDOMgxZ6kHiJg8P76QEdNatOI
+ agmJqC9zeAv8p8FEqg/tLUx61YJxxGZqfygKAUCyJqVRdPkGE6nid19zB5CfAM=
+ - Kg2Jnmne0g1NKmDAse0nzeaJqZF1sVLFYzUA2SO4WIQM+TkfYxmiZDv41UPqgz+pjIwFQ
+ vDm8vlsM/ne81X/4zdbKLCk31vHIDh1WxW4u8bF4f685JBt0GE3SOVrAw3VaT2VnQ3660
+ NoK8w54S3sBPEwfxXPUam6kmJLblYc4WVzdrURof3WF/uDGrsdSnfJCCg7gtxCeESJqRe
+ vX3OkKh9TAA0zQ/FYkKzNkiKs48ph2Mo/XCZXdsAsuW1q2fqRWBqN+kvp2TpxZTBFEA6L
+ mEQaVJ4uuAivE8RuJKu7lbDN7Ei6Wn7fSl4dNA8kBRfeX2W1C4Bg4ndT+ae8Jg3NQW7T5
+ EbyAkz0gDs3MC6keOJ+rFUr/biu8INHe0rk5J1jhM0uj/neiaHZeWy4TARuh7Fx9Ojn6J
+ z2A7L3+M46+LluBvmzP/wJ7CdegC4cT2lundOXR+l/p56nTdK1kQwUWiJmqb7X+0trJ66
+ 7j+dIz8g7o6oYsKcS8ZuQApA2X8ohyXSSzxasOFqb+k09+2B8p6Y1W9DDTmDwfI8uFvyD
+ gRrnfRU9JlYRhL7nTEoM4gcSKRQBosDRbO9fLnXSTn0HWnWFLwqNlLXoMaVE/SgHNe/l8
+ tMNwKNaNHCwsSj4unbaEZhrhyF3eLnLBeD6bPLBCq9gfWvtUeup5TTKBxJmDi4=
+ - oLeJveHuB0JBlYnp11ZWE9HIFIejV9Zb+AN30rY1PSCoMlgui5FTfn0Qsi2LiZ9ekKmdX
+ axTNaromfULRXK/X4wqCAgLXiJURbo6phhxsEHT+vlyGLRzv3Wt7fhqpelQcBjqQOoSVN
+ 99pXvymUYlVe9etFptzO0ngW8BMb9B8o90FwxHV6NCLCdYi12cktiqPDxH63vqSm1UrLb
+ aFKdaAo4JUfL8u0cPY0QxBSFveKiPl4T3A+qPUL7qatcy5H6E2NaHd6AzcoSOZAIjMY/x
+ SW3zPCEWB64E2TlHs6IHWC/D7XH7wkMvdZa8Y+iRVZKs0yKARnzmA/eZUP63tNy28kFw9
+ myCJdF8TNg5cZ3yoq3umV2itZCqFQWXs7h1HgLVlb1McE86BLOfPoM4ZY6cejXG02xL5Q
+ uKsImDQgpq/PhCBKyKm3G+lDtHRw2tlJHE7Z8DW0O6r4sO0bwDSvPlW7TQfmsK/QUZUQv
+ S9hvXHlOD41uHRwEZ94gUQ9HmNyk+4p6xhth0fZ70brrr1GsWivn8r7ouZ8GDsDfQZrFA
+ auAtsQw+mauI4IvDKdQHIdLCS4JmWwXz+JDOokRt4dHGDdHeHAAYDmt4MPcBZZ4TPQRpG
+ Du2vGIjLbtdXDdWAAVgejz7tlkIFMY/uhLPJX88/mUS5Q7+dmrSxc8CVeC+rLA=
+ - jRBtdI9wAAeHecu/Z4sXqyEE+qE58DdvZIVp9TQeOxG/RaiPzczqWm59fncocz2pFESRx
+ 2YswvbWFhQveS5ATzxA1qctwFyqC/8r4W4k9JISRvLsTSyArxH5XUdgNNM0iVC3TZw8ZJ
+ bN2YMsfrsMyz7fLtlun+5iydSacpWofPX8Jd/AhLR3LtfWgsHDZpviMt5KDKt/OdztHFB
+ QWPTDAaAnSeQk/kHAy22/+tb2ddDwOlm8icA85gdaQGTNG908gL+/LRxXG+++YQBziy1F
+ fxLrBYmACqrHymt46xD1X6DTV78klMr2hnlW532jixsi2CMwsNPwhwyS38YGgkmbn2N1x
+ accpecT0oHAEIA8phNwwTqgDrysQfYUKgDJkMXGthLWOs56kKQ5rp5TYicl7fgO9UQqXu
+ ev5O2L3o4e21uwXITGVomseQdY+Qe04Y/T/WM7HeDjr/7JmEwimZ7hNbOeMtoia9LttbN
+ g5pr4xGDi3m0Q6P+0L3W2+hJpAVbr0jO/VDoSvSwyLaVK2QHZ+I4BgXnNJkIz0t+kGplt
+ saiXEUpBOBxkznLXMZ445PmpTADf/ifWzk0Hcji+ICj17gtQ4/fZ0HqWhUoikwLx9rqXU
+ GNW/99j784mdip1MFALBZmFwkiT+Kc+67M5ib7lrp6xYEQ8hG/kKRA1YybtX2A=
diff --git a/zuul/README b/zuul/README
new file mode 100644
index 0000000..6549d4a
--- /dev/null
+++ b/zuul/README
@@ -0,0 +1,11 @@
+This directory contains the zuul main.yaml configuration files.
+
+To test a new repository with zuul, add a file *my-project.yaml*:
+- tenant:
+ name: 'local'
+ source:
+ gerrit:
+ untrusted-projects:
+ - my-project-repo-name
+
+Documentation is available here: https://softwarefactory-project.io/docs/zuul