Browse Source

initial commit

standalone
Nicolas Massé 5 years ago
commit
3eb11f69fd
  1. 7
      .gitignore
  2. 199
      README.md
  3. 42
      bootstrap.tf
  4. 24
      install-config.yaml.sample
  5. 65
      lb.tf
  6. 35
      main.tf
  7. 43
      master.tf
  8. 36
      network.tf
  9. 17
      post-install.tf
  10. 8
      provider.tf
  11. 23
      public_dns.tf
  12. 61
      storage.tf
  13. 5
      templates/dns.env
  14. 85
      templates/lb/cloud-init.cfg
  15. 74
      templates/lb/haproxy.cfg
  16. 11
      templates/lb/network-config.cfg
  17. 25
      templates/network.xslt
  18. 115
      templates/nfs-provisioner.yaml
  19. 25
      templates/registry-pv.yaml
  20. 104
      templates/storage/cloud-init.cfg
  21. 4
      templates/storage/network-config.cfg
  22. 146
      variables.tf
  23. 43
      worker.tf

7
.gitignore

@ -0,0 +1,7 @@
*.lock.hcl
*.tfstate
*.backup
.terraform
.vscode
terraform.tfvars
install-config.yaml

199
README.md

@ -0,0 +1,199 @@
# OpenShift 4 Installation
## Pre-requisites
### On your local machine
Install Terraform.
```sh
cat > hashicorp.repo <<"EOF"
[hashicorp]
name=Hashicorp Stable - $basearch
baseurl=https://rpm.releases.hashicorp.com/RHEL/8/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://rpm.releases.hashicorp.com/gpg
EOF
sudo dnf config-manager --add-repo hashicorp.repo
sudo dnf -y install terraform
```
Install the libvirt terraform provider.
```sh
curl -Lo /tmp/libvirt-provider.tgz https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.6.3/terraform-provider-libvirt-0.6.3+git.1604843676.67f4f2aa.Fedora_32.x86_64.tar.gz
mkdir -p ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
tar xvf /tmp/libvirt-provider.tgz -C ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
```
Install the Gandi terraform provider.
```sh
git clone https://github.com/go-gandi/terraform-provider-gandi
cd terraform-provider-gandi
make
make install
```
### On the hypervisor
```sh
curl https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.7/4.7.0/rhcos-4.7.0-x86_64-qemu.x86_64.qcow2.gz |gunzip -c > /var/lib/libvirt/images/rhcos-4.7.0-x86_64-qemu.x86_64.qcow2
curl -Lo /var/lib/libvirt/images/centos-stream-8.qcow2 http://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2
```
## Install
Define the cluster name and the bastion.
```sh
cluster=ocp4
bastion=nicolas@hp-ml350.itix.fr
```
Install **openshift-installer** and **oc** on the bastion.
```sh
ssh -A "$bastion" curl -Lo /tmp/openshift-installer.tgz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-4.7/openshift-install-linux.tar.gz
ssh -A "$bastion" sudo tar zxvf /tmp/openshift-installer.tgz -C /usr/local/bin openshift-install
ssh -A "$bastion" curl -Lo /tmp/oc.tgz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest-4.7/openshift-client-linux.tar.gz
ssh -A "$bastion" sudo tar zxvf /tmp/oc.tgz -C /usr/local/bin oc kubectl
```
Create the cluster configuration files.
```sh
mkdir "$cluster"
cp install-config.yaml.sample "$cluster/install-config.yaml"
openshift-install create manifests --dir="$cluster"
openshift-install create ignition-configs --dir="$cluster"
```
Customize the terraform variables.
```sh
cat > terraform.tfvars <<EOF
base_domain = "itix.xyz"
external_mac_address = "02:00:00:00:00:04"
public_cluster_ip = "90.79.1.247"
cluster_name = "$cluster"
EOF
```
Apply the terraform plan.
```sh
terraform apply
```
Copy the cluster definition on the bastion and run the bootstrap process from there.
```sh
scp -r "$cluster" "$bastion:'$cluster'"
ssh -A "$bastion" openshift-install --dir="$cluster" wait-for bootstrap-complete --log-level=info
```
Delete the bootstrap node.
```sh
echo 'bootstrap_nodes = 0' >> terraform.tfvars
terraform apply
```
Approve the pending CSRs.
```sh
for i in {0..120}; do
ssh -A "$bastion" oc --kubeconfig="$cluster/auth/kubeconfig" get csr --no-headers \
| awk '/Pending/ {print $1}' \
| xargs --no-run-if-empty ssh -A "$bastion" oc --kubeconfig="$cluster/auth/kubeconfig" adm certificate approve
sleep 15
done &
```
Make sure all CSRs have been issued.
```sh
ssh -A "$bastion" oc --kubeconfig="$cluster/auth/kubeconfig" get csr --no-headers
```
Provision storage for the registry.
```sh
ssh -A "$bastion" oc apply --kubeconfig="$cluster/auth/kubeconfig" -f - < "$cluster/registry-pv.yaml"
```
Patch the registry to use the new storage.
```sh
ssh -A "$bastion" oc patch --kubeconfig="$cluster/auth/kubeconfig" configs.imageregistry.operator.openshift.io cluster --type='json' --patch-file=/dev/fd/0 <<EOF
[{"op": "remove", "path": "/spec/storage" },{"op": "add", "path": "/spec/storage", "value": {"pvc":{"claim": "registry-storage"}}}]
EOF
```
Deploy the NFS provisioner.
```sh
ssh -A "$bastion" oc apply --kubeconfig="$cluster/auth/kubeconfig" -f - < "$cluster/nfs-provisioner.yaml"
```
Set image registry managementState from Removed to Managed.
```sh
ssh -A "$bastion" oc patch --kubeconfig="$cluster/auth/kubeconfig" configs.imageregistry.operator.openshift.io cluster --type merge --patch-file=/dev/fd/0 <<EOF
{"spec":{"managementState": "Managed"}}
EOF
```
Wait for installation to finish.
```sh
ssh -A "$bastion" openshift-install --dir="$cluster" wait-for install-complete
```
## Let's encrypt certificates
Install lego.
```sh
curl -Lo /tmp/lego.tgz https://github.com/go-acme/lego/releases/download/v4.3.1/lego_v4.3.1_linux_amd64.tar.gz
sudo tar zxvf /tmp/lego.tgz -C /usr/local/bin lego
```
Request a public certificate.
```sh
export GANDIV5_API_KEY="123...456"
. "$cluster/dns.env"
lego -m "nmasse@redhat.com" -d "$LE_API_HOSTNAME" -d "$LE_ROUTER_HOSTNAME" -a --dns gandi run --no-bundle
```
Create a secret containing the new router certificate.
```sh
oc create secret tls router-certs-$(date "+%Y-%m-%d") --cert=$HOME/.lego/certificates/$LE_API_HOSTNAME.crt --key=$HOME/.lego/certificates/$LE_API_HOSTNAME.key -n openshift-ingress --dry-run -o yaml > router.yaml
ssh -A "$bastion" oc apply -f - -n openshift-ingress < router.yaml
```
Update the ingress configuration.
```sh
ssh -A "$bastion" oc patch ingresscontroller default -n openshift-ingress-operator --type=merge --patch-file=/dev/fd/0 <<EOF
{"spec": { "defaultCertificate": { "name": "$(date "+%Y-%m-%d")" }}}
EOF
```
Create a secret containing the new certificate.
```sh
oc create secret tls api-certs-$(date "+%Y-%m-%d") --cert=$HOME/.lego/certificates/$LE_API_HOSTNAME.crt --key=$HOME/.lego/certificates/$LE_API_HOSTNAME.key -n openshift-config --dry-run -o yaml > api.yaml
ssh -A "$bastion" oc apply -f - -n openshift-config < api.yaml
```
Update the apiserver configuration.
```sh
oc patch apiserver cluster --type=merge --patch-file=/dev/fd/0 <<EOF
{"spec":{"servingCerts":{"namedCertificates":[{"names":["'$LE_API'"],"servingCertificate":{"name": "api-certs-$(date "+%Y-%m-%d")"}}]}}}
EOF

42
bootstrap.tf

@ -0,0 +1,42 @@
resource "libvirt_volume" "bootstrap_disk" {
name = "${local.bootstrap_name}.${var.volume_format}"
count = var.bootstrap_nodes
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
size = var.bootstrap_disk_size
}
resource "libvirt_ignition" "bootstrap_ignition" {
name = "${var.cluster_name}-bootstrap-ignition"
content = file("${path.module}/${var.cluster_name}/bootstrap.ign")
}
resource "libvirt_domain" "bootstrap" {
name = local.bootstrap_name
count = var.bootstrap_nodes
vcpu = var.bootstrap_vcpu
memory = var.bootstrap_memory_size
coreos_ignition = libvirt_ignition.bootstrap_ignition.id
disk {
volume_id = element(libvirt_volume.bootstrap_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_id = libvirt_network.ocp_net.id
addresses = [cidrhost(var.network_ip_range, 5)]
hostname = "bootstrap"
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
}

24
install-config.yaml.sample

@ -0,0 +1,24 @@
apiVersion: v1
baseDomain: itix.xyz
compute:
- name: worker
hyperthreading: Enabled
replicas: 3
controlPlane:
name: master
hyperthreading: Enabled
replicas: 3
metadata:
name: ocp4
networking:
clusterNetworks:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.30.0.0/16
networkType: OpenShiftSDN
platform:
none: {}
pullSecret: YOUR_PULL_SECRET_HERE
sshKey: |
YOUR_SSH_PUBLIC_KEY_HERE

65
lb.tf

@ -0,0 +1,65 @@
resource "libvirt_cloudinit_disk" "lb_cloudinit" {
name = "${local.lb_name}-cloudinit.iso"
user_data = data.template_file.lb_user_data.rendered
network_config = data.template_file.lb_network_config.rendered
pool = var.pool_name
}
data "template_file" "lb_user_data" {
template = file("${path.module}/templates/lb/cloud-init.cfg")
vars = {
haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", {
master_nodes = { for i in libvirt_domain.master : i.name => i.network_interface.0.addresses[0] },
worker_nodes = { for i in libvirt_domain.worker : i.name => i.network_interface.0.addresses[0] },
bootstrap_nodes = { for i in libvirt_domain.bootstrap : i.name => i.network_interface.0.addresses[0] }
})
}
}
data "template_file" "lb_network_config" {
template = file("${path.module}/templates/lb/network-config.cfg")
vars = {
ip = cidrhost(var.network_ip_range, 4)
dns = cidrhost(var.network_ip_range, 1)
gw = cidrhost(var.network_ip_range, 1)
}
}
resource "libvirt_volume" "lb_disk" {
name = "${local.lb_name}.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.centos_image}.${var.volume_format}"
size = var.lb_disk_size
}
resource "libvirt_domain" "lb" {
name = local.lb_name
vcpu = var.lb_vcpu
memory = var.lb_memory_size
cloudinit = libvirt_cloudinit_disk.lb_cloudinit.id
autostart = true
disk {
volume_id = libvirt_volume.lb_disk.id
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_id = libvirt_network.ocp_net.id
addresses = [cidrhost(var.network_ip_range, 4)]
hostname = "lb"
wait_for_lease = false
}
network_interface {
bridge = var.external_ifname
mac = var.external_mac_address
wait_for_lease = false
}
}

35
main.tf

@ -0,0 +1,35 @@
terraform {
required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = ">=0.6.3"
}
local = {
source = "hashicorp/local"
version = ">=2.0.0"
}
template = {
source = "hashicorp/template"
version = ">=2.2.0"
}
ignition = {
source = "community-terraform-providers/ignition"
version = "2.1.2"
}
gandi = {
version = "2.0.0"
source = "github/go-gandi/gandi"
}
}
}
locals {
ocp_nodes = { for i in concat(libvirt_domain.bootstrap, libvirt_domain.master, libvirt_domain.worker) : i.name => i.network_interface.0.addresses[0] }
additional_nodes = { (libvirt_domain.lb.name) = cidrhost(var.network_ip_range, 4), (libvirt_domain.storage.name) = libvirt_domain.storage.network_interface.0.addresses[0] }
all_nodes = merge(local.ocp_nodes, local.additional_nodes)
}
output "machines" {
value = local.all_nodes
}

43
master.tf

@ -0,0 +1,43 @@
resource "libvirt_volume" "master_disk" {
name = "${format(local.master_format, count.index + 1)}.${var.volume_format}"
count = var.master_nodes
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
size = var.master_disk_size
}
resource "libvirt_ignition" "master_ignition" {
name = "${var.cluster_name}-master-ignition"
content = file("${path.module}/${var.cluster_name}/master.ign")
}
resource "libvirt_domain" "master" {
count = var.master_nodes
name = format(local.master_format, count.index + 1)
vcpu = var.master_vcpu
memory = var.master_memory_size
coreos_ignition = libvirt_ignition.master_ignition.id
autostart = true
disk {
volume_id = element(libvirt_volume.master_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_id = libvirt_network.ocp_net.id
addresses = [cidrhost(var.network_ip_range, 11 + count.index)]
hostname = format("master%d", count.index + 1)
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
}

36
network.tf

@ -0,0 +1,36 @@
resource "libvirt_network" "ocp_net" {
name = var.cluster_name
mode = "nat"
domain = local.network_domain
addresses = [var.network_ip_range]
autostart = true
dns {
enabled = true
hosts {
hostname = "host"
ip = cidrhost(var.network_ip_range, 1)
}
hosts {
hostname = "api"
ip = cidrhost(var.network_ip_range, 4)
}
hosts {
hostname = "api-int"
ip = cidrhost(var.network_ip_range, 4)
}
hosts {
hostname = "etcd"
ip = cidrhost(var.network_ip_range, 4)
}
}
dhcp {
enabled = true
}
xml {
xslt = templatefile("${path.module}/templates/network.xslt", { alias = "apps.${local.network_domain}", ip = cidrhost(var.network_ip_range, 4) })
}
}

17
post-install.tf

@ -0,0 +1,17 @@
resource "local_file" "registry_pv" {
content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] })
filename = "${var.cluster_name}/registry-pv.yaml"
file_permission = "0644"
}
resource "local_file" "nfs_provisioner" {
content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] })
filename = "${var.cluster_name}/nfs-provisioner.yaml"
file_permission = "0644"
}
resource "local_file" "dns_config" {
content = templatefile("${path.module}/templates/dns.env", { api_server = "api.${local.network_domain}", router = "*.apps.${local.network_domain}", dns_zone = var.base_domain, cluster_name = var.cluster_name })
filename = "${var.cluster_name}/dns.env"
file_permission = "0644"
}

8
provider.tf

@ -0,0 +1,8 @@
provider "libvirt" {
uri = "qemu:///system"
}
provider "gandi" {
# key = "<livedns apikey>"
# sharing_id = "<sharing id>"
}

23
public_dns.tf

@ -0,0 +1,23 @@
data "gandi_domain" "public_domain" {
name = var.base_domain
}
resource "gandi_livedns_record" "api_record" {
zone = data.gandi_domain.public_domain.id
name = "api.ocp4"
type = "A"
ttl = 300
values = [
var.public_cluster_ip
]
}
resource "gandi_livedns_record" "router_record" {
zone = data.gandi_domain.public_domain.id
name = "*.apps.ocp4"
type = "A"
ttl = 300
values = [
var.public_cluster_ip
]
}

61
storage.tf

@ -0,0 +1,61 @@
resource "libvirt_cloudinit_disk" "storage_cloudinit" {
name = "${local.storage_name}-cloudinit.iso"
user_data = data.template_file.storage_user_data.rendered
network_config = data.template_file.storage_network_config.rendered
pool = var.pool_name
}
data "template_file" "storage_user_data" {
template = file("${path.module}/templates/storage/cloud-init.cfg")
}
data "template_file" "storage_network_config" {
template = file("${path.module}/templates/storage/network-config.cfg")
}
resource "libvirt_volume" "storage_os_disk" {
name = "${local.storage_name}-os.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.centos_image}.${var.volume_format}"
}
resource "libvirt_volume" "storage_data_disk" {
name = "${local.storage_name}-data.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
size = var.storage_disk_size
}
resource "libvirt_domain" "storage" {
name = local.storage_name
vcpu = var.storage_vcpu
memory = var.storage_memory_size
cloudinit = libvirt_cloudinit_disk.storage_cloudinit.id
autostart = true
disk {
volume_id = libvirt_volume.storage_os_disk.id
}
disk {
volume_id = libvirt_volume.storage_data_disk.id
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_id = libvirt_network.ocp_net.id
addresses = [cidrhost(var.network_ip_range, 6)]
hostname = "storage"
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
}

5
templates/dns.env

@ -0,0 +1,5 @@
export LE_API_HOSTNAME="${api_server}"
export LE_ROUTER_HOSTNAME="${router}"
export DNS_ZONE="${dns_zone}"
export DNS_API_RECORD="api.${cluster_name}"
export DNS_ROUTER_RECORD="*.apps.${cluster_name}"

85
templates/lb/cloud-init.cfg

@ -0,0 +1,85 @@
#cloud-config
# vim: syntax=yaml
resize_rootfs: true
users:
- name: nicolas
gecos: Nicolas MASSE
groups: wheel
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
packages:
# Useful tools
- net-tools
- hdparm
- iptraf
- iotop
- vim-enhanced
- tmux
- rsync
- tree
- unzip
- tar
- tcpdump
- telnet
- strace
- bind-utils
# Load Balancer
- haproxy
- firewalld
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
- [ "systemctl", "start", "--no-block", "serial-getty@ttyS0.service" ]
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%wheel\tALL=(ALL)\tALL/%wheel ALL=(ALL) NOPASSWD: ALL/", "/etc/sudoers" ]
# Fix file permissions
- [ "chown", "-R", "nicolas:nicolas", "/home/nicolas" ]
# Configure HAProxy
- [ "systemctl", "enable", "firewalld" ]
- [ "systemctl", "start", "firewalld" ]
- [ "setsebool", "-P", "haproxy_connect_any=1" ]
- [ "systemctl", "enable", "haproxy" ]
- [ "systemctl", "restart", "haproxy" ]
- [ "firewall-cmd", "--add-service=http", "--permanent" ]
- [ "firewall-cmd", "--add-service=https", "--permanent" ]
- [ "firewall-cmd", "--add-port=6443/tcp", "--permanent" ]
- [ "firewall-cmd", "--add-port=22623/tcp", "--permanent" ]
- [ "firewall-cmd", "--reload" ]
write_files:
- path: /root/.bashrc
# PS1='\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]# '
content: |
UFMxPSdcW1wwMzNbMDE7MzFtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXSMgJwo=
encoding: base64
append: true
- path: /etc/skel/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /home/nicolas/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /etc/haproxy/haproxy.cfg
content: ${jsonencode(haproxy_cfg)}

74
templates/lb/haproxy.cfg

@ -0,0 +1,74 @@
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option dontlognull
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen ingress-http
bind 0.0.0.0:80
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:80 check
%{endfor~}
%{for name, ip in worker_nodes~}
server ${name} ${ip}:80 check
%{endfor~}
listen ingress-https
bind 0.0.0.0:443
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:443 check
%{endfor~}
%{for name, ip in worker_nodes~}
server ${name} ${ip}:443 check
%{endfor~}
listen api
bind 0.0.0.0:6443
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:6443 check
%{endfor~}
%{for name, ip in bootstrap_nodes~}
server ${name} ${ip}:6443 check
%{endfor~}
listen machine-config-server
bind 0.0.0.0:22623
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:22623 check
%{endfor~}
%{for name, ip in bootstrap_nodes~}
server ${name} ${ip}:22623 check
%{endfor~}

11
templates/lb/network-config.cfg

@ -0,0 +1,11 @@
version: 2
ethernets:
eth0:
addresses:
- ${ip}/24
#gateway4: ${gw}
nameservers:
addresses: [${dns}]
eth1:
dhcp4: true
nameservers: {}

25
templates/network.xslt

@ -0,0 +1,25 @@
<?xml version="1.0" ?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:dnsmasq="http://libvirt.org/schemas/network/dnsmasq/1.0">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<!-- Identity transform -->
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<!-- Append custom dnsmasq options to the network element -->
<xsl:template match="/network">
<xsl:copy>
<xsl:copy-of select="@*"/>
<xsl:copy-of select="node()"/>
<dnsmasq:options>
<dnsmasq:option value="address=/${alias}/${ip}"/>
</dnsmasq:options>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>

115
templates/nfs-provisioner.yaml

@ -0,0 +1,115 @@
kind: Namespace
apiVersion: v1
metadata:
name: "openshift-nfs-provisioner"
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner"
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner"
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["security.openshift.io"]
resourceNames: ["hostmount-anyuid"]
resources: ["securitycontextconstraints"]
verbs: ["use"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner"
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
roleRef:
kind: Role
name: nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner"
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: redhat-emea-ssa-team/hetzner-ocp4
- name: NFS_SERVER
value: "${nfs_server}"
- name: NFS_PATH
value: "/srv/nfs/pv-user-pvs"
volumes:
- name: nfs-client-root
nfs:
server: "${nfs_server}"
path: "/srv/nfs/pv-user-pvs"
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: redhat-emea-ssa-team/hetzner-ocp4
parameters:
archiveOnDelete: "false"

25
templates/registry-pv.yaml

@ -0,0 +1,25 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-registry-storage
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 100Gi
nfs:
path: "/srv/nfs/pv-infra-registry"
server: "${nfs_server}"
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-storage
namespace: openshift-image-registry
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi

104
templates/storage/cloud-init.cfg

@ -0,0 +1,104 @@
#cloud-config
# vim: syntax=yaml
disk_setup:
/dev/vdb:
table_type: mbr
layout:
- 100
overwrite: false
fs_setup:
- label: storage
filesystem: xfs
device: /dev/vdb
partition: 1
resize_rootfs: true
mounts:
- [ "/dev/vdb1", "/srv", "xfs", "defaults", "0", "0" ]
users:
- name: nicolas
gecos: Nicolas MASSE
groups: wheel
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
packages:
# Useful tools
- net-tools
- hdparm
- iptraf
- iotop
- vim-enhanced
- tmux
- rsync
- tree
- unzip
- tar
- tcpdump
- telnet
- strace
- bind-utils
# NFS
- firewalld
- nfs-utils
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
- [ "systemctl", "start", "--no-block", "serial-getty@ttyS0.service" ]
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%wheel\tALL=(ALL)\tALL/%wheel ALL=(ALL) NOPASSWD: ALL/", "/etc/sudoers" ]
# Fix file permissions
- [ "chown", "-R", "nicolas:nicolas", "/home/nicolas" ]
# Enable NFS
- [ "mount", "/srv" ]
- [ "systemctl", "enable", "rpcbind" ]
- [ "systemctl", "start", "rpcbind" ]
- [ "systemctl", "enable", "nfs-server" ]
- [ "systemctl", "start", "nfs-server" ]
- [ "systemctl", "enable", "firewalld" ]
- [ "systemctl", "start", "firewalld" ]
- [ "setsebool", "-P", "nfs_export_all_rw", "1" ]
- [ "mkdir", "-p", "/srv/nfs" ]
- [ "exportfs", "-rav" ]
#- [ "/bin/bash", "-c", "for i in {0..999}; do pv=$(printf '/srv/nfs/pv-%03d\n' $i); mkdir $pv; chmod 777 $pv; done" ]
- [ "/bin/bash", "-c", "for pv in pv-infra-registry pv-user-pvs; do mkdir -p /srv/nfs/$pv; chmod 770 /srv/nfs/$pv; done" ]
- [ "firewall-cmd", "--add-service=nfs", "--permanent" ]
- [ "firewall-cmd", "--reload" ]
write_files:
- path: /root/.bashrc
# PS1='\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]# '
content: |
UFMxPSdcW1wwMzNbMDE7MzFtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXSMgJwo=
encoding: base64
append: true
- path: /etc/skel/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /home/nicolas/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /etc/exports
content: |
/srv/nfs *(rw,no_root_squash)

4
templates/storage/network-config.cfg

@ -0,0 +1,4 @@
version: 2
ethernets:
eth0:
dhcp4: true

146
variables.tf

@ -0,0 +1,146 @@
variable "master_nodes" {
type = number
default = 3
}
variable "worker_nodes" {
type = number
default = 3
}
variable "bootstrap_nodes" {
type = number
default = 1
}
variable "pool_name" {
type = string
default = "default"
}
variable "volume_format" {
type = string
default = "qcow2"
}
variable "centos_image" {
type = string
default = "centos-stream-8"
}
variable "coreos_image" {
type = string
default = "rhcos-4.7.0-x86_64-qemu.x86_64"
}
variable "cluster_name" {
type = string
default = "ocp4"
}
variable "external_ifname" {
type = string
default = "virbr1"
}
variable "external_mac_address" {
type = string
}
variable "base_domain" {
type = string
default = "ocp.lab"
}
variable "network_ip_range" {
type = string
default = "10.10.3.0/24"
}
variable "public_cluster_ip" {
type = string
}
variable "master_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "master_vcpu" {
type = number
default = 4
}
variable "master_memory_size" {
type = number
default = 16 * 1024
}
variable "lb_disk_size" {
type = number
default = 10 * 1024 * 1024 * 1024
}
variable "lb_vcpu" {
type = number
default = 2
}
variable "lb_memory_size" {
type = number
default = 4 * 1024
}
variable "storage_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "storage_vcpu" {
type = number
default = 2
}
variable "storage_memory_size" {
type = number
default = 8 * 1024
}
variable "worker_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "worker_vcpu" {
type = number
default = 2
}
variable "worker_memory_size" {
type = number
default = 8 * 1024
}
variable "bootstrap_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "bootstrap_vcpu" {
type = number
default = 4
}
variable "bootstrap_memory_size" {
type = number
default = 16 * 1024
}
locals {
master_format = "${var.cluster_name}-master-%02d"
worker_format = "${var.cluster_name}-worker-%02d"
bootstrap_name = "${var.cluster_name}-bootstrap"
storage_name = "${var.cluster_name}-storage"
lb_name = "${var.cluster_name}-lb"
network_domain = "${var.cluster_name}.${var.base_domain}"
}

43
worker.tf

@ -0,0 +1,43 @@
resource "libvirt_volume" "worker_disk" {
name = "${format(local.worker_format, count.index + 1)}.${var.volume_format}"
count = var.worker_nodes
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
size = var.worker_disk_size
}
resource "libvirt_ignition" "worker_ignition" {
name = "${var.cluster_name}-worker-ignition"
content = file("${path.module}/${var.cluster_name}/worker.ign")
}
resource "libvirt_domain" "worker" {
count = var.worker_nodes
name = format(local.worker_format, count.index + 1)
vcpu = var.worker_vcpu
memory = var.worker_memory_size
coreos_ignition = libvirt_ignition.worker_ignition.id
autostart = true
disk {
volume_id = element(libvirt_volume.worker_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_id = libvirt_network.ocp_net.id
addresses = [cidrhost(var.network_ip_range, 21 + count.index)]
hostname = format("worker%d", count.index + 1)
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
}
Loading…
Cancel
Save