Browse Source

install k8s

main
Nicolas Massé 4 years ago
parent
commit
bef104393c
  1. 28
      README.md
  2. 20
      acme.tf
  3. 4
      ansible/start.yaml
  4. 4
      ansible/stop.yaml
  5. 61
      bootstrap.tf
  6. 199
      clusterctl
  7. 24
      install-config.yaml.sample
  8. 5
      lb.tf
  9. 6
      local.env.sample
  10. 16
      main.tf
  11. 27
      master.tf
  12. 12
      post-install.tf
  13. 10
      provider.tf
  14. 23
      public_dns.tf
  15. 2
      storage.tf
  16. 21
      templates/base/cloud-init.cfg
  17. 4
      templates/base/network-config.cfg
  18. 2
      templates/inventory
  19. 20
      templates/lb/cloud-init.cfg
  20. 13
      templates/lb/haproxy.cfg
  21. 4
      terraform.tfvars.sample
  22. 45
      variables.tf
  23. 27
      worker.tf

28
README.md

@ -22,27 +22,10 @@ sudo dnf -y install terraform
Install the libvirt terraform provider.
```sh
curl -Lo /tmp/libvirt-provider.tgz https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.6.3/terraform-provider-libvirt-0.6.3+git.1604843676.67f4f2aa.Fedora_32.x86_64.tar.gz
mkdir -p ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
tar xvf /tmp/libvirt-provider.tgz -C ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.3/linux_amd64
```
Install the Gandi terraform provider.
```sh
git clone https://github.com/go-gandi/terraform-provider-gandi
cd terraform-provider-gandi
make
make install
```
Install the acme terraform provider.
```sh
git clone https://github.com/vancluever/terraform-provider-acme
cd terraform-provider-acme
mkdir -p ~/.terraform.d/plugins/vancluever/acme/2.3.0/linux_amd64/
GOBIN=$HOME/.terraform.d/plugins/vancluever/acme/2.3.0/linux_amd64/ make
curl -Lo /tmp/libvirt-provider.zip https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.6.14/terraform-provider-libvirt_0.6.14_linux_amd64.zip
mkdir -p ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.14/linux_amd64
unzip -d ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.14/linux_amd64 /tmp/libvirt-provider.zip
mv -i ~/.terraform.d/plugins/registry.terraform.io/dmacvicar/libvirt/0.6.14/linux_amd64/terraform-provider-libvirt{_v0.6.14,}
```
Create the template files from their samples.
@ -50,7 +33,6 @@ Create the template files from their samples.
```sh
cp terraform.tfvars.sample terraform.tfvars
cp local.env.sample local.env
cp install-config.yaml.sample install-config.yaml
```
Install the required Ansible collections.
@ -83,7 +65,7 @@ dns=dnsmasq
Download the required images.
```sh
curl https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/4.9/4.9.0/rhcos-4.9.0-x86_64-qemu.x86_64.qcow2.gz |gunzip -c > /var/lib/libvirt/images/base-images/rhcos-4.9.0-x86_64-qemu.x86_64.qcow2
curl -Lo /var/lib/libvirt/images/base-images/focal-server-cloudimg-amd64.qcow2 https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img
curl -Lo /var/lib/libvirt/images/base-images/centos-stream-8.qcow2 http://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20210210.0.x86_64.qcow2
```

20
acme.tf

@ -1,20 +0,0 @@
resource "tls_private_key" "account_key" {
algorithm = "RSA"
rsa_bits = 2048
}
resource "acme_registration" "cluster_reg" {
account_key_pem = tls_private_key.account_key.private_key_pem
email_address = var.acme_account_email
}
resource "acme_certificate" "cluster_cert" {
account_key_pem = acme_registration.cluster_reg.account_key_pem
common_name = "api.${local.network_domain}"
subject_alternative_names = ["*.apps.${local.network_domain}"]
key_type = "2048" // RSA 2048
dns_challenge {
provider = "gandiv5"
}
}

4
ansible/start.yaml

@ -1,5 +1,5 @@
- name: Start the OpenShift cluster
hosts: localhost
- name: Start the Kubernetes cluster
hosts: bastion
gather_facts: no
become: no
vars:

4
ansible/stop.yaml

@ -1,5 +1,5 @@
- name: Stop the OpenShift cluster
hosts: localhost
- name: Stop the Kubernetes cluster
hosts: bastion
gather_facts: no
become: no
vars:

61
bootstrap.tf

@ -1,61 +0,0 @@
resource "libvirt_volume" "bootstrap_disk" {
name = "${local.bootstrap_name}.${var.volume_format}"
count = var.bootstrap_nodes
format = var.volume_format
pool = libvirt_pool.cluster_storage.name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
base_volume_pool = var.base_image_pool
size = var.bootstrap_disk_size
}
resource "libvirt_ignition" "bootstrap_ignition" {
name = "${var.cluster_name}-bootstrap-ignition"
content = file("${path.module}/.clusters/${var.cluster_name}/bootstrap.ign")
pool = libvirt_pool.cluster_storage.name
}
locals {
bootstrap_nodes = [for i in range(var.bootstrap_nodes) : {
name = local.bootstrap_name
ip = cidrhost(var.network_ip_range, 5)
mac = format(var.network_mac_format, 5)
role = "bootstrap"
}]
}
resource "libvirt_domain" "bootstrap" {
name = local.bootstrap_name
count = var.bootstrap_nodes
vcpu = var.bootstrap_vcpu
memory = var.bootstrap_memory_size
coreos_ignition = libvirt_ignition.bootstrap_ignition.id
qemu_agent = true
cpu = {
mode = "host-passthrough"
}
disk {
volume_id = element(libvirt_volume.bootstrap_disk.*.id, count.index)
}
# Makes the tty0 available via `virsh console`
console {
type = "pty"
target_port = "0"
}
network_interface {
network_name = var.network_name
mac = element(local.bootstrap_nodes.*.mac, count.index)
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
xml {
xslt = file("${path.module}/portgroups/${var.network_portgroup}.xslt")
}
}

199
clusterctl

@ -25,13 +25,11 @@ function init () {
fi
mkdir -p ".clusters/$cluster_name"
sed "s/__CLUSTER_NAME__/$cluster_name/" install-config.yaml > ".clusters/$cluster_name/install-config.yaml"
sed "s/__CLUSTER_NAME__/$cluster_name/" terraform.tfvars > ".clusters/$cluster_name/terraform.tfvars"
echo "Cluster $cluster_name initialized successfully!"
echo
echo "Review and adjust the following files to your needs:"
echo "- .clusters/$cluster_name/install-config.yaml"
echo "- .clusters/$cluster_name/terraform.tfvars"
echo
exit 0
@ -42,67 +40,14 @@ function destroy () {
local cluster_name="${1:-}"
terraform destroy -var-file=".clusters/$cluster_name/terraform.tfvars" -state=".clusters/$cluster_name/terraform.tfstate"
sed -i.bak 's/^\s*bootstrap_nodes\s*=\s*.*$/bootstrap_nodes = 1/' ".clusters/$cluster_name/terraform.tfvars"
}
function prepare () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
# Make a backup since the openshift-install command will consume it
if [ -f ".clusters/$cluster_name/install-config.yaml" ]; then
cp ".clusters/$cluster_name/install-config.yaml" ".clusters/$cluster_name/install-config.yaml.bak"
fi
# Include the cluster dir in the path for disconnected installations
export PATH="$PWD/.clusters/$cluster_name:$PATH"
openshift-install version
# Create installation files
openshift-install create manifests --dir=".clusters/$cluster_name"
}
function apply () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
prepare "$cluster_name"
# Create installation files
openshift-install create ignition-configs --dir=".clusters/$cluster_name"
# Provision the infrastructure and wait for bootstrap to complete
terraform apply -var-file=".clusters/$cluster_name/terraform.tfvars" -state=".clusters/$cluster_name/terraform.tfstate" -auto-approve
openshift-install --dir=".clusters/$cluster_name" wait-for bootstrap-complete --log-level=info
# Destroy the bootstrap node
sed -i.bak 's/^\s*bootstrap_nodes\s*=\s*.*$/bootstrap_nodes = 0/' ".clusters/$cluster_name/terraform.tfvars"
terraform apply -var-file=".clusters/$cluster_name/terraform.tfvars" -state=".clusters/$cluster_name/terraform.tfstate" -auto-approve
# Auto-approve all pending CSRs
for i in {0..240}; do
approve_csr "$cluster_name"
sleep 15
done &
# Wait for the installation to complete
openshift-install --dir=".clusters/$cluster_name" wait-for install-complete
}
function ping () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" whoami
}
function approve_csr () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" get csr --no-headers \
| awk '/Pending/ {print $1}' \
| xargs --no-run-if-empty oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" adm certificate approve
}
function start () {
@ -122,59 +67,8 @@ function stop () {
function post_install_nfs () {
local cluster_name="${1:-}"
oc apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/registry-pv.yaml"
oc patch --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" configs.imageregistry.operator.openshift.io cluster --type=json --patch-file=/dev/fd/0 <<EOF
[{"op": "remove", "path": "/spec/storage" },{"op": "add", "path": "/spec/storage", "value": {"pvc":{"claim": "registry-storage"}}}]
EOF
oc apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/nfs-provisioner.yaml"
oc patch --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" configs.imageregistry.operator.openshift.io cluster --type merge --patch-file=/dev/fd/0 <<EOF
{"spec":{"managementState": "Managed"}}
EOF
}
function post_install_le () {
local cluster_name="${1:-}"
cert_dn="$(openssl x509 -noout -subject -in ".clusters/$cluster_name/cluster.crt")"
cert_cn="${cert_dn#subject=CN = }"
# Deploy certificate to ingress
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" create secret tls router-certs-$(date "+%Y-%m-%d") --cert=".clusters/$cluster_name/cluster.crt" --key=".clusters/$cluster_name/cluster.key" -n openshift-ingress --dry-run -o yaml > ".clusters/$cluster_name/router-certs.yaml"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f ".clusters/$cluster_name/router-certs.yaml" -n openshift-ingress
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" patch ingresscontroller default -n openshift-ingress-operator --type=merge --patch-file=/dev/fd/0 <<EOF
{"spec": { "defaultCertificate": { "name": "router-certs-$(date "+%Y-%m-%d")" }}}
EOF
# Deploy certificate to api
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" create secret tls api-certs-$(date "+%Y-%m-%d") --cert=".clusters/$cluster_name/cluster.crt" --key=".clusters/$cluster_name/cluster.key" -n openshift-config --dry-run -o yaml > ".clusters/$cluster_name/api-certs.yaml"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f ".clusters/$cluster_name/api-certs.yaml" -n openshift-config
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" patch apiserver cluster --type=merge --patch-file=/dev/fd/0 <<EOF
{"spec":{"servingCerts":{"namedCertificates":[{"names":["$cert_cn"],"servingCertificate":{"name": "api-certs-$(date "+%Y-%m-%d")"}}]}}}
EOF
}
function post_install_sso () {
local cluster_name="${1:-}"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" create secret generic redhat-sso-client-secret -n openshift-config --from-literal="clientSecret=$GOOGLE_CLIENT_SECRET" --dry-run -o yaml > ".clusters/$cluster_name/sso-secret.yaml"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f ".clusters/$cluster_name/sso-secret.yaml"
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f - <<EOF
apiVersion: config.openshift.io/v1
kind: OAuth
metadata:
name: cluster
spec:
identityProviders:
- google:
clientID: "$GOOGLE_CLIENT_ID"
clientSecret:
name: redhat-sso-client-secret
hostedDomain: redhat.com
mappingMethod: claim
name: RedHatSSO
type: Google
EOF
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" adm policy add-cluster-role-to-user cluster-admin "$OCP_ADMIN"
kubectl apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/registry-pv.yaml"
kubectl apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/nfs-provisioner.yaml"
}
function post_install () {
@ -183,7 +77,7 @@ function post_install () {
shift
if [ $# -eq 0 ]; then
set nfs sso le
set nfs
fi
for i; do
@ -191,57 +85,6 @@ function post_install () {
done
}
function install_addon_acmhub () {
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f - <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: open-cluster-management
spec:
finalizers:
- kubernetes
EOF
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f - <<EOF
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: open-cluster-management
namespace: open-cluster-management
spec:
targetNamespaces:
- open-cluster-management
EOF
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f - <<EOF
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: acm-operator-subscription
namespace: open-cluster-management
spec:
sourceNamespace: openshift-marketplace
source: redhat-operators
channel: release-2.2
installPlanApproval: Automatic
name: advanced-cluster-management
EOF
while ! oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -n open-cluster-management get mch --all-namespaces -o yaml &>/dev/null; do
echo "Waiting for the MultiClusterHub CRD to appear..."
sleep 5
done
oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" apply -f - <<EOF
apiVersion: operator.open-cluster-management.io/v1
kind: MultiClusterHub
metadata:
name: multiclusterhub
namespace: open-cluster-management
EOF
echo
echo "RH-ACM Current state is: $(oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" get mch multiclusterhub -n open-cluster-management -o=jsonpath='{.status.phase}')"
echo
echo "RH-ACM Console: $(oc --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" get route multicloud-console -n open-cluster-management -o jsonpath="https://{.spec.host}")"
echo
}
function install_addon () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
@ -264,19 +107,11 @@ function shell () {
export TF_CLI_ARGS_state_list="-state=.clusters/$cluster_name/terraform.tfstate"
export TF_CLI_ARGS_state_rm="-state=.clusters/$cluster_name/terraform.tfstate"
# Include the cluster dir in the path for disconnected installations
export PATH="$PWD/.clusters/$cluster_name:$PATH"
# OpenShift
# Kubernetes
export KUBECONFIG="$PWD/.clusters/$cluster_name/auth/kubeconfig"
export OC_BINARY="$(which oc)"
export KUBECTL_BINARY="$(which oc)"
export KUBECTL_BINARY="$(which kubectl)"
export CLUSTER_NAME="$cluster_name"
export PS1="[$CLUSTER_NAME:\w] "
function oc () {
"$OC_BINARY" --insecure-skip-tls-verify "$@"
}
export -f oc
function kubectl () {
"$KUBECTL_BINARY" --insecure-skip-tls-verify "$@"
}
@ -302,14 +137,6 @@ init)
shift
init "$@"
;;
prepare)
if [ -z "${2:-}" ]; then
echo "Usage: $0 prepare cluster-name"
exit 1
fi
shift
prepare "$@"
;;
start)
if [ -z "${2:-}" ]; then
echo "Usage: $0 start cluster-name"
@ -334,22 +161,6 @@ apply)
shift
apply "$@"
;;
approve-csr)
if [ -z "${2:-}" ]; then
echo "Usage: $0 approve-csr cluster-name"
exit 1
fi
shift
approve_csr "$@"
;;
ping)
if [ -z "${2:-}" ]; then
echo "Usage: $0 ping cluster-name"
exit 1
fi
shift
ping "$@"
;;
destroy)
if [ -z "${2:-}" ]; then
echo "Usage: $0 destroy cluster-name"

24
install-config.yaml.sample

@ -1,24 +0,0 @@
apiVersion: v1
baseDomain: PUT_YOUR_DNS_DOMAIN_HERE
compute:
- name: worker
hyperthreading: Enabled
replicas: 3
controlPlane:
name: master
hyperthreading: Enabled
replicas: 3
metadata:
name: __CLUSTER_NAME__
networking:
clusterNetworks:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.30.0.0/16
networkType: OpenShiftSDN
platform:
none: {}
pullSecret: PUT_YOUR_PULL_SECRET_HERE
sshKey: |
PUT_YOUR_SSH_PUBLIC_KEY_HERE

5
lb.tf

@ -10,8 +10,7 @@ data "template_file" "lb_user_data" {
vars = {
haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", {
master_nodes = { for i in local.master_nodes : i.name => i.ip },
worker_nodes = { for i in local.worker_nodes : i.name => i.ip },
bootstrap_nodes = { for i in local.bootstrap_nodes : i.name => i.ip }
worker_nodes = { for i in local.worker_nodes : i.name => i.ip }
})
}
}
@ -46,7 +45,7 @@ resource "libvirt_domain" "lb" {
autostart = false
qemu_agent = true
cpu = {
cpu {
mode = "host-passthrough"
}

6
local.env.sample

@ -1,7 +1 @@
export GANDI_KEY="123...456"
export GANDIV5_API_KEY="123...456"
export GOOGLE_CLIENT_ID="client_id"
export GOOGLE_CLIENT_SECRET="client_secret"
export LE_EMAIL="user@redhat.com"
export OCP_ADMIN="user@redhat.com"
export LIBVIRT_DEFAULT_URI="qemu+ssh://user@libvirt.server/system"

16
main.tf

@ -3,7 +3,7 @@ terraform {
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = ">=0.6.3"
version = ">=0.6.14"
}
local = {
source = "hashicorp/local"
@ -17,18 +17,6 @@ terraform {
source = "community-terraform-providers/ignition"
version = "2.1.2"
}
gandi = {
version = "2.0.0"
source = "github/go-gandi/gandi"
}
acme = {
source = "vancluever/acme"
version = "2.3.0"
}
tls = {
source = "hashicorp/tls"
version = ">=3.1.0"
}
}
}
@ -40,7 +28,7 @@ resource "libvirt_pool" "cluster_storage" {
locals {
additional_nodes = [local.lb_node, local.storage_node]
all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes, local.bootstrap_nodes)
all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes)
}
output "machines" {

27
master.tf

@ -1,19 +1,28 @@
resource "libvirt_cloudinit_disk" "master_cloudinit" {
name = "master-cloudinit.iso"
user_data = data.template_file.master_user_data.rendered
network_config = data.template_file.master_network_config.rendered
pool = libvirt_pool.cluster_storage.name
}
data "template_file" "master_user_data" {
template = file("${path.module}/templates/base/cloud-init.cfg")
}
data "template_file" "master_network_config" {
template = file("${path.module}/templates/base/network-config.cfg")
}
resource "libvirt_volume" "master_disk" {
name = "${format(local.master_format, count.index + 1)}.${var.volume_format}"
count = var.master_nodes
format = var.volume_format
pool = libvirt_pool.cluster_storage.name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
base_volume_name = "${var.ubuntu_image}.${var.volume_format}"
base_volume_pool = var.base_image_pool
size = var.master_disk_size
}
resource "libvirt_ignition" "master_ignition" {
name = "${var.cluster_name}-master-ignition"
content = file("${path.module}/.clusters/${var.cluster_name}/master.ign")
pool = libvirt_pool.cluster_storage.name
}
locals {
master_nodes = [for i in range(var.master_nodes) : {
name = format(local.master_format, i + 1)
@ -28,10 +37,10 @@ resource "libvirt_domain" "master" {
name = format(local.master_format, count.index + 1)
vcpu = var.master_vcpu
memory = var.master_memory_size
coreos_ignition = libvirt_ignition.master_ignition.id
cloudinit = libvirt_cloudinit_disk.master_cloudinit.id
autostart = false
cpu = {
cpu {
mode = "host-passthrough"
}

12
post-install.tf

@ -15,15 +15,3 @@ resource "local_file" "ansible_inventory" {
filename = ".clusters/${var.cluster_name}/inventory"
file_permission = "0644"
}
resource "local_file" "cluster_key" {
content = acme_certificate.cluster_cert.private_key_pem
filename = ".clusters/${var.cluster_name}/cluster.key"
file_permission = "0600"
}
resource "local_file" "cluster_cert" {
content = "${acme_certificate.cluster_cert.certificate_pem}${acme_certificate.cluster_cert.issuer_pem}"
filename = ".clusters/${var.cluster_name}/cluster.crt"
file_permission = "0644"
}

10
provider.tf

@ -1,12 +1,2 @@
provider "libvirt" {
}
provider "gandi" {
# key = "<livedns apikey>"
# sharing_id = "<sharing id>"
}
provider "acme" {
server_url = "https://acme-v02.api.letsencrypt.org/directory"
# server_url = "https://acme-staging-v02.api.letsencrypt.org/directory"
}

23
public_dns.tf

@ -1,23 +0,0 @@
data "gandi_domain" "public_domain" {
name = var.base_domain
}
resource "gandi_livedns_record" "api_record" {
zone = data.gandi_domain.public_domain.id
name = "api.${var.cluster_name}"
type = "A"
ttl = 300
values = [
var.public_cluster_ip
]
}
resource "gandi_livedns_record" "router_record" {
zone = data.gandi_domain.public_domain.id
name = "*.apps.${var.cluster_name}"
type = "A"
ttl = 300
values = [
var.public_cluster_ip
]
}

2
storage.tf

@ -45,7 +45,7 @@ resource "libvirt_domain" "storage" {
autostart = false
qemu_agent = true
cpu = {
cpu {
mode = "host-passthrough"
}

21
templates/base/cloud-init.cfg

@ -0,0 +1,21 @@
#cloud-config
# vim: syntax=yaml
resize_rootfs: true
users:
- name: nicolas
gecos: Nicolas MASSE
groups: sudo
lock_passwd: false
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFW62WJXI1ZCMfNA4w0dMpL0fsldhbEfULNGIUB0nQui nmasse@localhost.localdomain
runcmd:
# Disable SSH password authentication
- [ "sed", "-i.post-install", "-e", "s/PasswordAuthentication yes/PasswordAuthentication no/", "/etc/ssh/sshd_config" ]
- [ "systemctl", "restart", "sshd" ]
# Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%sudo\tALL=(ALL:ALL) ALL/%sudo ALL=(ALL:ALL) NOPASSWD: ALL/", "/etc/sudoers" ]

4
templates/base/network-config.cfg

@ -0,0 +1,4 @@
version: 2
ethernets:
ens3:
dhcp4: true

2
templates/inventory

@ -1,3 +1,5 @@
[all:vars]
nodes=${jsonencode(nodes)}
[bastion]
admin.itix.lab ansible_user=nicolas

20
templates/lb/cloud-init.cfg

@ -33,25 +33,6 @@ packages:
- haproxy
- firewalld
# Uncomment the following lines for HTTP proxy support
#
# bootcmd:
# - |
# cloud-init-per once env sh -c "mkdir -p /etc/systemd/system/cloud-config.service.d &&
# mkdir -p /etc/systemd/system/cloud-final.service.d && { cat > /etc/cloud/env <<-EOF
# http_proxy=http://admin.itix.lab:3128
# https_proxy=http://admin.itix.lab:3128
# EOF
# } && { cat > /etc/systemd/system/cloud-config.service.d/override.conf <<-EOF
# [Service]
# EnvironmentFile=/etc/cloud/env
# EOF
# } && { cat > /etc/systemd/system/cloud-final.service.d/override.conf <<-EOF
# [Service]
# EnvironmentFile=/etc/cloud/env
# EOF
# } && systemctl daemon-reload"
runcmd:
# Enable KVM virsh console access
- [ "systemctl", "enable", "serial-getty@ttyS0.service" ]
@ -70,7 +51,6 @@ runcmd:
- [ "firewall-offline-cmd", "--add-service=http" ]
- [ "firewall-offline-cmd", "--add-service=https" ]
- [ "firewall-offline-cmd", "--add-port=6443/tcp" ]
- [ "firewall-offline-cmd", "--add-port=22623/tcp" ]
- [ "systemctl", "enable", "firewalld" ]
- [ "systemctl", "start", "firewalld" ]

13
templates/lb/haproxy.cfg

@ -59,16 +59,3 @@ listen api
%{for name, ip in master_nodes~}
server ${name} ${ip}:6443 check
%{endfor~}
%{for name, ip in bootstrap_nodes~}
server ${name} ${ip}:6443 check
%{endfor~}
listen machine-config-server
bind 0.0.0.0:22623
mode tcp
%{for name, ip in master_nodes~}
server ${name} ${ip}:22623 check
%{endfor~}
%{for name, ip in bootstrap_nodes~}
server ${name} ${ip}:22623 check
%{endfor~}

4
terraform.tfvars.sample

@ -1,8 +1,6 @@
base_domain = "PUT_YOUR_DNS_DOMAIN_HERE"
public_cluster_ip = "1.2.3.4"
network_ip_range = "192.168.7.0/24"
network_mac_format = "02:01:07:00:07:%02x"
cluster_name = "__CLUSTER_NAME__"
bootstrap_nodes = 1
worker_nodes = 2
acme_account_email = "your.username@redhat.com"
network_portgroup = "lab7"

45
variables.tf

@ -5,12 +5,7 @@ variable "master_nodes" {
variable "worker_nodes" {
type = number
default = 3
}
variable "bootstrap_nodes" {
type = number
default = 1
default = 2
}
variable "volume_format" {
@ -23,19 +18,19 @@ variable "centos_image" {
default = "centos-stream-8"
}
variable "coreos_image" {
variable "ubuntu_image" {
type = string
default = "rhcos-4.9.0-x86_64-qemu.x86_64"
default = "focal-server-cloudimg-amd64"
}
variable "cluster_name" {
type = string
default = "ocp4"
default = "k8s"
}
variable "base_domain" {
type = string
default = "ocp.lab"
default = "itix.lab"
}
variable "network_name" {
@ -45,7 +40,7 @@ variable "network_name" {
variable "network_portgroup" {
type = string
default = "lab8"
default = "lab7"
}
variable "network_ip_range" {
@ -58,10 +53,6 @@ variable "network_mac_format" {
default = "02:01:07:00:07:%02x"
}
variable "public_cluster_ip" {
type = string
}
variable "master_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
@ -69,12 +60,12 @@ variable "master_disk_size" {
variable "master_vcpu" {
type = number
default = 4
default = 2
}
variable "master_memory_size" {
type = number
default = 16 * 1024
default = 10 * 1024
}
variable "lb_disk_size" {
@ -122,25 +113,6 @@ variable "worker_memory_size" {
default = 8 * 1024
}
variable "bootstrap_disk_size" {
type = number
default = 120 * 1024 * 1024 * 1024
}
variable "bootstrap_vcpu" {
type = number
default = 4
}
variable "bootstrap_memory_size" {
type = number
default = 16 * 1024
}
variable "acme_account_email" {
type = string
}
variable "base_image_pool" {
type = string
default = "base-images"
@ -149,7 +121,6 @@ variable "base_image_pool" {
locals {
master_format = "${var.cluster_name}-master-%02d"
worker_format = "${var.cluster_name}-worker-%02d"
bootstrap_name = "${var.cluster_name}-bootstrap"
storage_name = "${var.cluster_name}-storage"
lb_name = "${var.cluster_name}-lb"
network_domain = "${var.cluster_name}.${var.base_domain}"

27
worker.tf

@ -1,19 +1,28 @@
resource "libvirt_cloudinit_disk" "worker_cloudinit" {
name = "worker-cloudinit.iso"
user_data = data.template_file.worker_user_data.rendered
network_config = data.template_file.worker_network_config.rendered
pool = libvirt_pool.cluster_storage.name
}
data "template_file" "worker_user_data" {
template = file("${path.module}/templates/base/cloud-init.cfg")
}
data "template_file" "worker_network_config" {
template = file("${path.module}/templates/base/network-config.cfg")
}
resource "libvirt_volume" "worker_disk" {
name = "${format(local.worker_format, count.index + 1)}.${var.volume_format}"
count = var.worker_nodes
format = var.volume_format
pool = libvirt_pool.cluster_storage.name
base_volume_name = "${var.coreos_image}.${var.volume_format}"
base_volume_name = "${var.ubuntu_image}.${var.volume_format}"
base_volume_pool = var.base_image_pool
size = var.worker_disk_size
}
resource "libvirt_ignition" "worker_ignition" {
name = "${var.cluster_name}-worker-ignition"
content = file("${path.module}/.clusters/${var.cluster_name}/worker.ign")
pool = libvirt_pool.cluster_storage.name
}
locals {
worker_nodes = [for i in range(var.worker_nodes) : {
name = format(local.worker_format, i + 1)
@ -28,10 +37,10 @@ resource "libvirt_domain" "worker" {
name = format(local.worker_format, count.index + 1)
vcpu = var.worker_vcpu
memory = var.worker_memory_size
coreos_ignition = libvirt_ignition.worker_ignition.id
cloudinit = libvirt_cloudinit_disk.master_cloudinit.id
autostart = false
cpu = {
cpu {
mode = "host-passthrough"
}

Loading…
Cancel
Save