Browse Source

initial commit

main
Nicolas Massé 5 years ago
commit
fda1a57708
  1. 7
      .gitignore
  2. 4
      .gitmodules
  3. 132
      README.md
  4. 1
      kubespray
  5. 66
      terraform/lb.tf
  6. 26
      terraform/main.tf
  7. 58
      terraform/master.tf
  8. 25
      terraform/network.xslt
  9. 5
      terraform/post-install.tf
  10. 2
      terraform/provider.tf
  11. 66
      terraform/storage.tf
  12. 64
      terraform/templates/inventory
  13. 84
      terraform/templates/lb/cloud-init.cfg
  14. 61
      terraform/templates/lb/haproxy.cfg
  15. 4
      terraform/templates/lb/network-config.cfg
  16. 41
      terraform/templates/main/cloud-init.cfg
  17. 4
      terraform/templates/main/network-config.cfg
  18. 102
      terraform/templates/storage/cloud-init.cfg
  19. 4
      terraform/templates/storage/network-config.cfg
  20. 121
      terraform/variables.tf
  21. 58
      terraform/worker.tf

7
.gitignore

@ -0,0 +1,7 @@
.terraform
*.tfstate
*.tfstate.backup
.vscode
*.lock.hcl
terraform.tfvars

4
.gitmodules

@ -0,0 +1,4 @@
[submodule "kubespray"]
path = kubespray
url = https://github.com/kubernetes-sigs/kubespray.git
branch = release-2.15

132
README.md

@ -0,0 +1,132 @@
# Kubernetes installation
## Pre-requisites
### On your local machine
Install Terraform.
```sh
cat > hashicorp.repo <<"EOF"
[hashicorp]
name=Hashicorp Stable - $basearch
baseurl=https://rpm.releases.hashicorp.com/RHEL/8/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://rpm.releases.hashicorp.com/gpg
EOF
sudo dnf config-manager --add-repo hashicorp.repo
sudo dnf -y install terraform
```
Install the libvirt terraform provider.
```sh
curl -Lo /tmp/libvirt-provider.tgz https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.6.3/terraform-provider-libvirt-0.6.3+git.1604843676.67f4f2aa.Fedora_32.x86_64.tar.gz
mkdir -p ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
tar xvf /tmp/libvirt-provider.tgz -C ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
```
Initialize Terraform.
```sh
cd terraform
terraform init
```
Install kubespray dependencies.
```sh
sudo dnf install ansible python3-netaddr python3-pbr python3-ruamel-yaml python3-jmespath
```
### On the hypervisor
Install libvirt.
```sh
sudo dnf install libvirt libvirt-daemon-kvm virt-install virt-viewer virt-top libguestfs-tools nmap-ncat
```
Fetch the latest CentOS Stream 8 cloud image.
```sh
sudo curl -Lo /var/lib/libvirt/images/centos-stream-8.qcow2 http://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20201217.0.x86_64.qcow2
```
## Install
Find a name for the cluster.
```sh
export CLUSTER_NAME=kube
```
Add the DNS entries to your DNS server (dnsmasq in the following example).
```sh
# Hosts
host-record=lb.kube.itix.lab,192.168.16.4,24h
host-record=storage.kube.itix.lab,192.168.16.6,24h
host-record=master1.kube.itix.lab,192.168.16.11,24h
host-record=master2.kube.itix.lab,192.168.16.12,24h
host-record=master3.kube.itix.lab,192.168.16.13,24h
host-record=worker1.kube.itix.lab,192.168.16.21,24h
host-record=worker2.kube.itix.lab,192.168.16.22,24h
# Services
host-record=api.kube.itix.lab,192.168.16.4,24h
cname=*.apps.kube.itix.lab,lb.kube.itix.lab
```
Deploy the Virtual Machines.
```sh
export LIBVIRT_DEFAULT_URI="qemu+ssh://$LIBVIRT_USER@$LIBVIRT_SERVER/system"
cd terraform
terraform init
terraform apply -var cluster_name=$CLUSTER_NAME
```
Set the default cluster variables.
```sh
cp -r inventory/sample/group_vars inventory/$CLUSTER_NAME/group_vars
```
Install Kubernetes.
```sh
cd ../kubespray
ansible -i inventory/$CLUSTER_NAME/inventory.ini all -m wait_for -a "port=22"
ansible-playbook -i inventory/$CLUSTER_NAME/inventory.ini cluster.yml
sudo chown -R $USER inventory/$CLUSTER_NAME/artifacts/
```
Ensure the cluster is up and running.
```sh
KUBECONFIG=inventory/$CLUSTER_NAME/artifacts/admin.conf kubectl get nodes
```
## Post-Install
Expose the dashboard.
```sh
KUBECONFIG=inventory/$CLUSTER_NAME/artifacts/admin.conf kubectl create ingress dashboard -n kube-system --rule "dashboard.apps.kube.itix.lab/*=kubernetes-dashboard:443,tls" --annotation=ingress.kubernetes.io/ssl-passthrough=true --annotation=nginx.ingress.kubernetes.io/backend-protocol=HTTPS --annotation=kubernetes.io/ingress.allow-http=false
```
Create the admin account.
```sh
export KUBECONFIG=inventory/$CLUSTER_NAME/artifacts/admin.conf
kubectl create sa admin -n kube-system
kubectl create clusterrolebinding admin --clusterrole=cluster-admin --serviceaccount=kube-system:admin -n kube-system
```
Fetch the admin password.
```sh
kubectl -n kube-system get secret $(kubectl -n kube-system get sa/admin -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
```

1
kubespray

@ -0,0 +1 @@
Subproject commit 82e90091472b56f84e36e04b96c6de36e3250aa5

66
terraform/lb.tf

@ -0,0 +1,66 @@
resource "libvirt_cloudinit_disk" "lb_cloudinit" {
name = "${local.lb_name}-cloudinit.iso"
user_data = data.template_file.lb_user_data.rendered
network_config = file("${path.module}/templates/lb/network-config.cfg")
pool = var.pool_name
}
data "template_file" "lb_user_data" {
template = file("${path.module}/templates/lb/cloud-init.cfg")
vars = {
haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", {
master_nodes = { for i in local.master_nodes : i.name => i.ip },
worker_nodes = { for i in local.worker_nodes : i.name => i.ip }
})
}
}
resource "libvirt_volume" "lb_disk" {
name = "${local.lb_name}.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.os_image}.${var.volume_format}"
size = var.lb_disk_size
}
locals {
lb_node = {
hostname = local.lb_hostname
name = local.lb_name
ip = cidrhost(var.network_ip_range, 4)
mac = format(var.network_mac_format, 4)
role = "lb"
}
}
resource "libvirt_domain" "lb" {
name = local.lb_name
vcpu = var.lb_vcpu
memory = var.lb_memory_size
cloudinit = libvirt_cloudinit_disk.lb_cloudinit.id
autostart = false
cpu = {
mode = "host-passthrough"
}
disk {
volume_id = libvirt_volume.lb_disk.id
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_name = var.network_name
mac = local.lb_node.mac
wait_for_lease = false
}
xml {
xslt = file("${path.module}/network.xslt")
}
}

26
terraform/main.tf

@ -0,0 +1,26 @@
terraform {
required_version = ">= 0.13"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = ">=0.6.3"
}
local = {
source = "hashicorp/local"
version = ">=2.0.0"
}
template = {
source = "hashicorp/template"
version = ">=2.2.0"
}
}
}
locals {
additional_nodes = [local.lb_node, local.storage_node]
all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes)
}
output "machines" {
value = local.all_nodes
}

58
terraform/master.tf

@ -0,0 +1,58 @@
resource "libvirt_cloudinit_disk" "master_cloudinit" {
name = "${var.cluster_name}-master-cloudinit.iso"
user_data = file("${path.module}/templates/main/cloud-init.cfg")
network_config = file("${path.module}/templates/main/network-config.cfg")
pool = var.pool_name
}
resource "libvirt_volume" "master_disk" {
name = "${format(local.master_format, count.index + 1)}.${var.volume_format}"
count = var.master_nodes
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.os_image}.${var.volume_format}"
size = var.master_disk_size
}
locals {
master_nodes = [for i in range(var.master_nodes) : {
hostname = format(local.master_hostname_format, i + 1)
name = format(local.master_format, i + 1)
ip = cidrhost(var.network_ip_range, 11 + i)
mac = format(var.network_mac_format, 11 + i)
role = "master"
}]
}
resource "libvirt_domain" "master" {
count = var.master_nodes
name = format(local.master_format, count.index + 1)
vcpu = var.master_vcpu
memory = var.master_memory_size
cloudinit = libvirt_cloudinit_disk.master_cloudinit.id
autostart = false
cpu = {
mode = "host-passthrough"
}
disk {
volume_id = element(libvirt_volume.master_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_name = var.network_name
mac = element(local.master_nodes.*.mac, count.index)
wait_for_lease = false
}
xml {
xslt = file("${path.module}/network.xslt")
}
}

25
terraform/network.xslt

@ -0,0 +1,25 @@
<?xml version="1.0" ?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<!-- Target portgroup -->
<xsl:param name="portgroup" select="'lab16'"/>
<!-- XSLT Identity template -->
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<!-- Put the NIC in the desired portgroup -->
<xsl:template match="/domain/devices/interface/source">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<xsl:attribute name="portgroup">
<xsl:value-of select="$portgroup"/>
</xsl:attribute>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>

5
terraform/post-install.tf

@ -0,0 +1,5 @@
resource "local_file" "ansible_inventory" {
content = templatefile("${path.module}/templates/inventory", { masters = local.master_nodes, workers = local.worker_nodes, lb_node = local.lb_node, api_endpoint = "api.${local.network_domain}" })
filename = "../kubespray/inventory/${var.cluster_name}/inventory.ini"
file_permission = "0644"
}

2
terraform/provider.tf

@ -0,0 +1,2 @@
provider "libvirt" {
}

66
terraform/storage.tf

@ -0,0 +1,66 @@
resource "libvirt_cloudinit_disk" "storage_cloudinit" {
name = "${local.storage_name}-cloudinit.iso"
user_data = file("${path.module}/templates/storage/cloud-init.cfg")
network_config = file("${path.module}/templates/storage/network-config.cfg")
pool = var.pool_name
}
resource "libvirt_volume" "storage_os_disk" {
name = "${local.storage_name}-os.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.os_image}.${var.volume_format}"
}
resource "libvirt_volume" "storage_data_disk" {
name = "${local.storage_name}-data.${var.volume_format}"
format = var.volume_format
pool = var.pool_name
size = var.storage_disk_size
}
locals {
storage_node = {
hostname = local.storage_hostname
name = local.storage_name
ip = cidrhost(var.network_ip_range, 6)
mac = format(var.network_mac_format, 6)
role = "storage"
}
}
resource "libvirt_domain" "storage" {
name = local.storage_name
vcpu = var.storage_vcpu
memory = var.storage_memory_size
cloudinit = libvirt_cloudinit_disk.storage_cloudinit.id
autostart = false
cpu = {
mode = "host-passthrough"
}
disk {
volume_id = libvirt_volume.storage_os_disk.id
}
disk {
volume_id = libvirt_volume.storage_data_disk.id
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_name = var.network_name
mac = local.storage_node.mac
wait_for_lease = false
}
xml {
xslt = file("${path.module}/network.xslt")
}
}

64
terraform/templates/inventory

@ -0,0 +1,64 @@
[all]
%{for host in masters~}
${host.hostname} ansible_host=${host.ip} etcd_member_name=${host.name}
%{endfor~}
%{for host in workers~}
${host.hostname} ansible_host=${host.ip}
%{endfor~}
[all:vars]
ansible_become=yes
ansible_user=nicolas
ansible_ssh_extra_args='-o StrictHostKeyChecking=no'
# Store the .kube/config file locally
kubeconfig_localhost=true
# Configure the Load Balancer in front of the api-server
loadbalancer_apiserver={"address":"${lb_node.ip}","port":6443}
apiserver_loadbalancer_domain_name=${api_endpoint}
[kube-node:vars]
# Add a label to all worker nodes
node_labels={"worker": "true"}
[k8s-cluster:vars]
# Enable the Nginx ingress controller
ingress_nginx_enabled=true
ingress_nginx_nodeselector={"worker":"true"}
# Enable the Kubernetes dashboard
dashboard_enabled=true
# ## configure a bastion host if your nodes are not directly reachable
# [bastion]
# bastion ansible_host=x.x.x.x ansible_user=some_user
# When kube-node contains etcd, you define your etcd cluster to be as well
# schedulable for Kubernetes workloads. If you want it a standalone, make
# sure those groups do not intersect. If you want the server to act both as
# master and node, the server must be defined on both groups kube-master and
# kube-node. If you want a standalone and unschedulable master, the server
# must be defined only in the kube-master and not kube-node.
[kube-master]
%{for host in masters~}
${host.hostname}
%{endfor~}
[etcd]
%{for host in masters~}
${host.hostname}
%{endfor~}
[kube-node]
%{for host in workers~}
${host.hostname}
%{endfor~}
[calico-rr]
[k8s-cluster:children]
kube-master
kube-node
calico-rr

84
terraform/templates/lb/cloud-init.cfg

@ -0,0 +1,84 @@
#cloud-config
# vim: syntax=yaml
resize_rootfs: true
users:
- name: nicolas
gecos: Nicolas MASSE
groups: wheel
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
packages:
# Useful tools
- net-tools
- hdparm
- iptraf
- iotop
- vim-enhanced
- tmux
- rsync
- tree
- unzip
- tar
- tcpdump
- telnet
- strace
- bind-utils
# Load Balancer
- haproxy
- firewalld
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
- [ "systemctl", "start", "--no-block", "serial-getty@ttyS0.service" ]
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%wheel\tALL=(ALL)\tALL/%wheel ALL=(ALL) NOPASSWD: ALL/", "/etc/sudoers" ]
# Fix file permissions
- [ "chown", "-R", "nicolas:nicolas", "/home/nicolas" ]
# Configure HAProxy
- [ "setsebool", "-P", "haproxy_connect_any=1" ]
- [ "systemctl", "enable", "haproxy" ]
- [ "systemctl", "restart", "haproxy" ]
- [ "firewall-offline-cmd", "--add-service=http" ]
- [ "firewall-offline-cmd", "--add-service=https" ]
- [ "firewall-offline-cmd", "--add-port=6443/tcp" ]
- [ "firewall-offline-cmd", "--add-port=22623/tcp" ]
- [ "systemctl", "enable", "firewalld" ]
- [ "systemctl", "start", "firewalld" ]
write_files:
- path: /root/.bashrc
# PS1='\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]# '
content: |
UFMxPSdcW1wwMzNbMDE7MzFtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXSMgJwo=
encoding: base64
append: true
- path: /etc/skel/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /home/nicolas/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /etc/haproxy/haproxy.cfg
content: ${jsonencode(haproxy_cfg)}

61
terraform/templates/lb/haproxy.cfg

@ -0,0 +1,61 @@
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option dontlognull
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen ingress-http
bind 0.0.0.0:80
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:80 check
%{endfor~}
%{for name, ip in worker_nodes~}
server ${name} ${ip}:80 check
%{endfor~}
listen ingress-https
bind 0.0.0.0:443
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:443 check
%{endfor~}
%{for name, ip in worker_nodes~}
server ${name} ${ip}:443 check
%{endfor~}
listen api
bind 0.0.0.0:6443
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:6443 check
%{endfor~}

4
terraform/templates/lb/network-config.cfg

@ -0,0 +1,4 @@
version: 2
ethernets:
eth0:
dhcp4: true

41
terraform/templates/main/cloud-init.cfg

@ -0,0 +1,41 @@
#cloud-config
# vim: syntax=yaml
resize_rootfs: true
users:
- name: nicolas
gecos: Nicolas MASSE
groups: wheel
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
packages:
# Useful tools
- net-tools
- hdparm
- iptraf
- iotop
- vim-enhanced
- tmux
- rsync
- tree
- unzip
- tar
- tcpdump
- telnet
- strace
- bind-utils
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
- [ "systemctl", "start", "--no-block", "serial-getty@ttyS0.service" ]
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%wheel\tALL=(ALL)\tALL/%wheel ALL=(ALL) NOPASSWD: ALL/", "/etc/sudoers" ]

4
terraform/templates/main/network-config.cfg

@ -0,0 +1,4 @@
version: 2
ethernets:
eth0:
dhcp4: true

102
terraform/templates/storage/cloud-init.cfg

@ -0,0 +1,102 @@
#cloud-config
# vim: syntax=yaml
disk_setup:
/dev/vdb:
table_type: mbr
layout:
- 100
overwrite: false
fs_setup:
- label: storage
filesystem: xfs
device: /dev/vdb
partition: 1
resize_rootfs: true
mounts:
- [ "/dev/vdb1", "/srv", "xfs", "defaults", "0", "0" ]
users:
- name: nicolas
gecos: Nicolas MASSE
groups: wheel
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
packages:
# Useful tools
- net-tools
- hdparm
- iptraf
- iotop
- vim-enhanced
- tmux
- rsync
- tree
- unzip
- tar
- tcpdump
- telnet
- strace
- bind-utils
# NFS
- firewalld
- nfs-utils
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
- [ "systemctl", "start", "--no-block", "serial-getty@ttyS0.service" ]
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%wheel\tALL=(ALL)\tALL/%wheel ALL=(ALL) NOPASSWD: ALL/", "/etc/sudoers" ]
# Fix file permissions
- [ "chown", "-R", "nicolas:nicolas", "/home/nicolas" ]
# Enable NFS
- [ "mount", "/srv" ]
- [ "systemctl", "enable", "rpcbind" ]
- [ "systemctl", "start", "rpcbind" ]
- [ "systemctl", "enable", "nfs-server" ]
- [ "systemctl", "start", "nfs-server" ]
- [ "setsebool", "-P", "nfs_export_all_rw", "1" ]
- [ "mkdir", "-p", "/srv/nfs" ]
- [ "exportfs", "-rav" ]
- [ "/bin/bash", "-c", "for pv in pv-infra-registry pv-user-pvs; do mkdir -p /srv/nfs/$pv; chmod 770 /srv/nfs/$pv; done" ]
- [ "firewall-offline-cmd", "--add-service=nfs" ]
- [ "systemctl", "enable", "firewalld" ]
- [ "systemctl", "start", "firewalld" ]
write_files:
- path: /root/.bashrc
# PS1='\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]# '
content: |
UFMxPSdcW1wwMzNbMDE7MzFtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXSMgJwo=
encoding: base64
append: true
- path: /etc/skel/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /home/nicolas/.bashrc
# PS1='\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
content: |
UFMxPSdcW1wwMzNbMDE7MzJtXF1cdUBcaFxbXDAzM1swMG1cXTpcW1wwMzNbMDE7MzRtXF1cd1xb
XDAzM1swMG1cXVwkICcK
encoding: base64
append: true
- path: /etc/exports
content: |
/srv/nfs *(rw,no_root_squash)

4
terraform/templates/storage/network-config.cfg

@ -0,0 +1,4 @@
version: 2
ethernets:
eth0:
dhcp4: true

121
terraform/variables.tf

@ -0,0 +1,121 @@
variable "master_nodes" {
type = number
default = 3
}
variable "worker_nodes" {
type = number
default = 2
}
variable "pool_name" {
type = string
default = "default"
}
variable "volume_format" {
type = string
default = "qcow2"
}
variable "os_image" {
type = string
default = "centos-stream-8"
}
variable "cluster_name" {
type = string
default = "kube"
}
variable "base_domain" {
type = string
default = "itix.lab"
}
variable "network_name" {
type = string
default = "lab"
}
variable "network_ip_range" {
type = string
default = "192.168.16.0/24"
}
variable "network_mac_format" {
type = string
default = "02:01:10:00:10:%02x"
}
variable "master_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "master_vcpu" {
type = number
default = 4
}
variable "master_memory_size" {
type = number
default = 16 * 1024
}
variable "lb_disk_size" {
type = number
default = 10 * 1024 * 1024 * 1024
}
variable "lb_vcpu" {
type = number
default = 2
}
variable "lb_memory_size" {
type = number
default = 4 * 1024
}
variable "storage_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "storage_vcpu" {
type = number
default = 2
}
variable "storage_memory_size" {
type = number
default = 8 * 1024
}
variable "worker_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "worker_vcpu" {
type = number
default = 4
}
variable "worker_memory_size" {
type = number
default = 8 * 1024
}
locals {
master_format = "${var.cluster_name}-master-%02d"
master_hostname_format = "master%d.${local.network_domain}"
worker_format = "${var.cluster_name}-worker-%02d"
worker_hostname_format = "worker%d.${local.network_domain}"
storage_name = "${var.cluster_name}-storage"
storage_hostname = "storage.${local.network_domain}"
lb_name = "${var.cluster_name}-lb"
lb_hostname = "lb.${local.network_domain}"
network_domain = "${var.cluster_name}.${var.base_domain}"
}

58
terraform/worker.tf

@ -0,0 +1,58 @@
resource "libvirt_cloudinit_disk" "worker_cloudinit" {
name = "${var.cluster_name}-worker-cloudinit.iso"
user_data = file("${path.module}/templates/main/cloud-init.cfg")
network_config = file("${path.module}/templates/main/network-config.cfg")
pool = var.pool_name
}
resource "libvirt_volume" "worker_disk" {
name = "${format(local.worker_format, count.index + 1)}.${var.volume_format}"
count = var.worker_nodes
format = var.volume_format
pool = var.pool_name
base_volume_name = "${var.os_image}.${var.volume_format}"
size = var.worker_disk_size
}
locals {
worker_nodes = [for i in range(var.worker_nodes) : {
hostname = format(local.worker_hostname_format, i + 1)
name = format(local.worker_format, i + 1)
ip = cidrhost(var.network_ip_range, 21 + i)
mac = format(var.network_mac_format, 21 + i)
role = "worker"
}]
}
resource "libvirt_domain" "worker" {
count = var.worker_nodes
name = format(local.worker_format, count.index + 1)
vcpu = var.worker_vcpu
memory = var.worker_memory_size
cloudinit = libvirt_cloudinit_disk.worker_cloudinit.id
autostart = false
cpu = {
mode = "host-passthrough"
}
disk {
volume_id = element(libvirt_volume.worker_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_name = var.network_name
mac = element(local.worker_nodes.*.mac, count.index)
wait_for_lease = false
}
xml {
xslt = file("${path.module}/network.xslt")
}
}
Loading…
Cancel
Save