Browse Source

install on bridged network

main
Nicolas Massé 5 years ago
parent
commit
79d1990dd1
  1. 10
      ansible/start.yaml
  2. 12
      ansible/stop.yaml
  3. 1
      ansible/templates/dnsmasq.conf.j2
  4. 19
      bootstrap.tf
  5. 35
      lb.tf
  6. 5
      main.tf
  7. 24
      master.tf
  8. 36
      network.tf
  9. 25
      network.xslt
  10. 6
      post-install.tf
  11. 18
      storage.tf
  12. 5
      templates/dns.env
  13. 2
      templates/inventory
  14. 7
      templates/lb/network-config.cfg
  15. 32
      templates/network.xslt
  16. 5
      terraform.tfvars.sample
  17. 15
      variables.tf
  18. 24
      worker.tf

10
ansible/start.yaml

@ -18,14 +18,6 @@
workers: '{{ nodes | selectattr("role", "eq", "worker") | list }}' workers: '{{ nodes | selectattr("role", "eq", "worker") | list }}'
masters: '{{ nodes | selectattr("role", "eq", "master") | list }}' masters: '{{ nodes | selectattr("role", "eq", "master") | list }}'
tasks: tasks:
- name: Configure name resolution for the cluster
template:
src: dnsmasq.conf.j2
dest: /etc/NetworkManager/dnsmasq.d/zone-{{ network_domain }}.conf
- name: Restart dnsmasq
command: pkill -f [d]nsmasq.*--enable-dbus=org.freedesktop.NetworkManager.dnsmasq
- name: Start the Load Balancer and the Storage - name: Start the Load Balancer and the Storage
community.libvirt.virt: community.libvirt.virt:
name: '{{ item.name }}' name: '{{ item.name }}'
@ -39,7 +31,7 @@
- name: Wait for the Load Balancer to appear - name: Wait for the Load Balancer to appear
wait_for: wait_for:
port: 443 port: 443
host: '{{ lb.ip[1] }}' host: '{{ lb.ip }}'
- name: Wait for the NFS Server to appear - name: Wait for the NFS Server to appear
wait_for: wait_for:

12
ansible/stop.yaml

@ -32,7 +32,7 @@
command: info command: info
register: vm register: vm
until: "vm[item.name].state == 'shutdown'" until: "vm[item.name].state == 'shutdown'"
retries: 24 retries: 48
delay: 5 delay: 5
loop: '{{ workers }}' loop: '{{ workers }}'
loop_control: loop_control:
@ -52,7 +52,7 @@
command: info command: info
register: vm register: vm
until: "vm[item.name].state == 'shutdown'" until: "vm[item.name].state == 'shutdown'"
retries: 24 retries: 48
delay: 5 delay: 5
loop: '{{ masters }}' loop: '{{ masters }}'
loop_control: loop_control:
@ -67,11 +67,3 @@
- '{{ storage }}' - '{{ storage }}'
loop_control: loop_control:
label: "{{ item.name }}" label: "{{ item.name }}"
- name: Unconfigure name resolution for the cluster
file:
path: /etc/NetworkManager/dnsmasq.d/zone-{{ network_domain }}.conf
state: absent
- name: Restart dnsmasq
command: pkill -f [d]nsmasq.*--enable-dbus=org.freedesktop.NetworkManager.dnsmasq

1
ansible/templates/dnsmasq.conf.j2

@ -1 +0,0 @@
server=/{{ network_domain }}/{{ dns_server }}

19
bootstrap.tf

@ -12,12 +12,22 @@ resource "libvirt_ignition" "bootstrap_ignition" {
content = file("${path.module}/${var.cluster_name}/bootstrap.ign") content = file("${path.module}/${var.cluster_name}/bootstrap.ign")
} }
locals {
bootstrap_nodes = [for i in range(var.bootstrap_nodes) : {
name = local.bootstrap_name
ip = cidrhost(var.network_ip_range, 5)
mac = format(var.network_mac_format, 5)
role = "bootstrap"
}]
}
resource "libvirt_domain" "bootstrap" { resource "libvirt_domain" "bootstrap" {
name = local.bootstrap_name name = local.bootstrap_name
count = var.bootstrap_nodes count = var.bootstrap_nodes
vcpu = var.bootstrap_vcpu vcpu = var.bootstrap_vcpu
memory = var.bootstrap_memory_size memory = var.bootstrap_memory_size
coreos_ignition = libvirt_ignition.bootstrap_ignition.id coreos_ignition = libvirt_ignition.bootstrap_ignition.id
qemu_agent = true
cpu = { cpu = {
mode = "host-passthrough" mode = "host-passthrough"
@ -34,13 +44,16 @@ resource "libvirt_domain" "bootstrap" {
} }
network_interface { network_interface {
network_id = libvirt_network.ocp_net.id network_name = var.network_name
addresses = [cidrhost(var.network_ip_range, 5)] mac = element(local.bootstrap_nodes.*.mac, count.index)
hostname = "bootstrap"
# When creating the domain resource, wait until the network interface gets # When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be # a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied. # available when the domain is up and the plan applied.
wait_for_lease = true wait_for_lease = true
} }
xml {
xslt = file("${path.module}/network.xslt")
}
} }

35
lb.tf

@ -9,20 +9,15 @@ data "template_file" "lb_user_data" {
template = file("${path.module}/templates/lb/cloud-init.cfg") template = file("${path.module}/templates/lb/cloud-init.cfg")
vars = { vars = {
haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", { haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", {
master_nodes = { for i in libvirt_domain.master : i.name => i.network_interface.0.addresses[0] }, master_nodes = { for i in local.master_nodes : i.name => i.ip },
worker_nodes = { for i in libvirt_domain.worker : i.name => i.network_interface.0.addresses[0] }, worker_nodes = { for i in local.worker_nodes : i.name => i.ip },
bootstrap_nodes = { for i in libvirt_domain.bootstrap : i.name => i.network_interface.0.addresses[0] } bootstrap_nodes = { for i in local.bootstrap_nodes : i.name => i.ip }
}) })
} }
} }
data "template_file" "lb_network_config" { data "template_file" "lb_network_config" {
template = file("${path.module}/templates/lb/network-config.cfg") template = file("${path.module}/templates/lb/network-config.cfg")
vars = {
ip = cidrhost(var.network_ip_range, 4)
dns = cidrhost(var.network_ip_range, 1)
gw = cidrhost(var.network_ip_range, 1)
}
} }
resource "libvirt_volume" "lb_disk" { resource "libvirt_volume" "lb_disk" {
@ -33,6 +28,15 @@ resource "libvirt_volume" "lb_disk" {
size = var.lb_disk_size size = var.lb_disk_size
} }
locals {
lb_node = {
name = local.lb_name
ip = cidrhost(var.network_ip_range, 4)
mac = format(var.network_mac_format, 4)
role = "lb"
}
}
resource "libvirt_domain" "lb" { resource "libvirt_domain" "lb" {
name = local.lb_name name = local.lb_name
vcpu = var.lb_vcpu vcpu = var.lb_vcpu
@ -56,9 +60,8 @@ resource "libvirt_domain" "lb" {
} }
network_interface { network_interface {
network_id = libvirt_network.ocp_net.id network_name = var.network_name
addresses = [cidrhost(var.network_ip_range, 4)] mac = local.lb_node.mac
hostname = "lb"
# When creating the domain resource, wait until the network interface gets # When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be # a DHCP lease from libvirt, so that the computed IP addresses will be
@ -66,13 +69,7 @@ resource "libvirt_domain" "lb" {
wait_for_lease = true wait_for_lease = true
} }
network_interface { xml {
bridge = var.external_ifname xslt = file("${path.module}/network.xslt")
mac = var.external_mac_address
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
} }
} }

5
main.tf

@ -33,10 +33,7 @@ terraform {
} }
locals { locals {
master_nodes = [for i in libvirt_domain.master : { name = i.name, ip = i.network_interface.0.addresses[0], role = "master" }] additional_nodes = [local.lb_node, local.storage_node]
worker_nodes = [for i in libvirt_domain.worker : { name = i.name, ip = i.network_interface.0.addresses[0], role = "worker" }]
bootstrap_nodes = [for i in libvirt_domain.bootstrap : { name = i.name, ip = i.network_interface.0.addresses[0], role = "bootstrap" }]
additional_nodes = [{ name = (libvirt_domain.lb.name), ip = [libvirt_domain.lb.network_interface.0.addresses[0], libvirt_domain.lb.network_interface.1.addresses[0]], role = "lb" }, { name = (libvirt_domain.storage.name), ip = libvirt_domain.storage.network_interface.0.addresses[0], role = "storage" }]
all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes, local.bootstrap_nodes) all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes, local.bootstrap_nodes)
} }

24
master.tf

@ -12,6 +12,15 @@ resource "libvirt_ignition" "master_ignition" {
content = file("${path.module}/${var.cluster_name}/master.ign") content = file("${path.module}/${var.cluster_name}/master.ign")
} }
locals {
master_nodes = [for i in range(var.master_nodes) : {
name = format(local.master_format, i + 1)
ip = cidrhost(var.network_ip_range, 11 + i)
mac = format(var.network_mac_format, 11 + i)
role = "master"
}]
}
resource "libvirt_domain" "master" { resource "libvirt_domain" "master" {
count = var.master_nodes count = var.master_nodes
name = format(local.master_format, count.index + 1) name = format(local.master_format, count.index + 1)
@ -35,13 +44,12 @@ resource "libvirt_domain" "master" {
} }
network_interface { network_interface {
network_id = libvirt_network.ocp_net.id network_name = var.network_name
addresses = [cidrhost(var.network_ip_range, 11 + count.index)] mac = element(local.master_nodes.*.mac, count.index)
hostname = format("master%d", count.index + 1) wait_for_lease = false
}
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be xml {
# available when the domain is up and the plan applied. xslt = file("${path.module}/network.xslt")
wait_for_lease = true
} }
} }

36
network.tf

@ -1,36 +0,0 @@
resource "libvirt_network" "ocp_net" {
name = var.cluster_name
mode = "nat"
domain = local.network_domain
addresses = [var.network_ip_range]
autostart = true
dns {
enabled = true
hosts {
hostname = "host"
ip = cidrhost(var.network_ip_range, 1)
}
hosts {
hostname = "api"
ip = cidrhost(var.network_ip_range, 4)
}
hosts {
hostname = "api-int"
ip = cidrhost(var.network_ip_range, 4)
}
hosts {
hostname = "etcd"
ip = cidrhost(var.network_ip_range, 4)
}
}
dhcp {
enabled = true
}
xml {
xslt = templatefile("${path.module}/templates/network.xslt", { alias = "apps.${local.network_domain}", ip = cidrhost(var.network_ip_range, 4), network_domain = local.network_domain })
}
}

25
network.xslt

@ -0,0 +1,25 @@
<?xml version="1.0" ?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<!-- Target portgroup -->
<xsl:param name="portgroup" select="'lab8'"/>
<!-- XSLT Identity template -->
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<!-- Put the NIC in the desired portgroup -->
<xsl:template match="/domain/devices/interface/source">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
<xsl:attribute name="portgroup">
<xsl:value-of select="$portgroup"/>
</xsl:attribute>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>

6
post-install.tf

@ -1,17 +1,17 @@
resource "local_file" "registry_pv" { resource "local_file" "registry_pv" {
content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] }) content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = local.storage_node.ip })
filename = "${var.cluster_name}/registry-pv.yaml" filename = "${var.cluster_name}/registry-pv.yaml"
file_permission = "0644" file_permission = "0644"
} }
resource "local_file" "nfs_provisioner" { resource "local_file" "nfs_provisioner" {
content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] }) content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = local.storage_node.ip })
filename = "${var.cluster_name}/nfs-provisioner.yaml" filename = "${var.cluster_name}/nfs-provisioner.yaml"
file_permission = "0644" file_permission = "0644"
} }
resource "local_file" "ansible_inventory" { resource "local_file" "ansible_inventory" {
content = templatefile("${path.module}/templates/inventory", { nodes = local.all_nodes, network_domain = local.network_domain, dns_server = cidrhost(var.network_ip_range, 1) }) content = templatefile("${path.module}/templates/inventory", { nodes = local.all_nodes })
filename = "${var.cluster_name}/inventory" filename = "${var.cluster_name}/inventory"
file_permission = "0644" file_permission = "0644"
} }

18
storage.tf

@ -27,6 +27,15 @@ resource "libvirt_volume" "storage_data_disk" {
size = var.storage_disk_size size = var.storage_disk_size
} }
locals {
storage_node = {
name = local.storage_name
ip = cidrhost(var.network_ip_range, 6)
mac = format(var.network_mac_format, 6)
role = "storage"
}
}
resource "libvirt_domain" "storage" { resource "libvirt_domain" "storage" {
name = local.storage_name name = local.storage_name
vcpu = var.storage_vcpu vcpu = var.storage_vcpu
@ -54,13 +63,16 @@ resource "libvirt_domain" "storage" {
} }
network_interface { network_interface {
network_id = libvirt_network.ocp_net.id network_name = var.network_name
addresses = [cidrhost(var.network_ip_range, 6)] mac = local.storage_node.mac
hostname = "storage"
# When creating the domain resource, wait until the network interface gets # When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be # a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied. # available when the domain is up and the plan applied.
wait_for_lease = true wait_for_lease = true
} }
xml {
xslt = file("${path.module}/network.xslt")
}
} }

5
templates/dns.env

@ -1,5 +0,0 @@
export LE_API_HOSTNAME="${api_server}"
export LE_ROUTER_HOSTNAME="${router}"
export DNS_ZONE="${dns_zone}"
export DNS_API_RECORD="api.${cluster_name}"
export DNS_ROUTER_RECORD="*.apps.${cluster_name}"

2
templates/inventory

@ -1,6 +1,4 @@
[hypervisor] [hypervisor]
[hypervisor:vars] [hypervisor:vars]
network_domain=${network_domain}
dns_server=${dns_server}
nodes=${jsonencode(nodes)} nodes=${jsonencode(nodes)}

7
templates/lb/network-config.cfg

@ -1,11 +1,4 @@
version: 2 version: 2
ethernets: ethernets:
eth0: eth0:
addresses:
- ${ip}/24
#gateway4: ${gw}
nameservers:
addresses: [${dns}]
eth1:
dhcp4: true dhcp4: true
nameservers: {}

32
templates/network.xslt

@ -1,32 +0,0 @@
<?xml version="1.0" ?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:dnsmasq="http://libvirt.org/schemas/network/dnsmasq/1.0">
<xsl:output omit-xml-declaration="yes" indent="yes"/>
<!-- Identity transform -->
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<!-- Append custom dnsmasq options to the network element -->
<xsl:template match="/network">
<xsl:copy>
<xsl:copy-of select="@*"/>
<xsl:copy-of select="node()"/>
<dnsmasq:options>
<!-- fix for the 5s timeout on DNS -->
<!-- see https://www.math.tamu.edu/~comech/tools/linux-slow-dns-lookup/ -->
<dnsmasq:option value="auth-server=${network_domain},"/>
<dnsmasq:option value="auth-zone=${network_domain}"/>
<!-- Wildcard route -->
<dnsmasq:option value="host-record=lb.${network_domain},${ip}"/>
<dnsmasq:option value="cname=*.apps.${network_domain},lb.${network_domain}"/>
</dnsmasq:options>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>

5
terraform.tfvars.sample

@ -1,7 +1,8 @@
base_domain = "PUT_YOUR_DNS_DOMAIN_HERE" base_domain = "PUT_YOUR_DNS_DOMAIN_HERE"
external_mac_address = "02:00:00:00:00:04"
public_cluster_ip = "1.2.3.4" public_cluster_ip = "1.2.3.4"
network_ip_range = "10.10.0.0/24" network_ip_range = "192.168.7.0/24"
network_mac_format = "02:01:07:00:07:%02x"
cluster_name = "__CLUSTER_NAME__" cluster_name = "__CLUSTER_NAME__"
bootstrap_nodes = 1 bootstrap_nodes = 1
worker_nodes = 2
acme_account_email = "your.username@redhat.com" acme_account_email = "your.username@redhat.com"

15
variables.tf

@ -38,23 +38,24 @@ variable "cluster_name" {
default = "ocp4" default = "ocp4"
} }
variable "external_ifname" { variable "base_domain" {
type = string type = string
default = "virbr1" default = "ocp.lab"
} }
variable "external_mac_address" { variable "network_name" {
type = string type = string
default = "lab"
} }
variable "base_domain" { variable "network_ip_range" {
type = string type = string
default = "ocp.lab" default = "192.168.7.0/24"
} }
variable "network_ip_range" { variable "network_mac_format" {
type = string type = string
default = "10.10.3.0/24" default = "02:01:07:00:07:%02x"
} }
variable "public_cluster_ip" { variable "public_cluster_ip" {

24
worker.tf

@ -12,6 +12,15 @@ resource "libvirt_ignition" "worker_ignition" {
content = file("${path.module}/${var.cluster_name}/worker.ign") content = file("${path.module}/${var.cluster_name}/worker.ign")
} }
locals {
worker_nodes = [for i in range(var.worker_nodes) : {
name = format(local.worker_format, i + 1)
ip = cidrhost(var.network_ip_range, 21 + i)
mac = format(var.network_mac_format, 21 + i)
role = "worker"
}]
}
resource "libvirt_domain" "worker" { resource "libvirt_domain" "worker" {
count = var.worker_nodes count = var.worker_nodes
name = format(local.worker_format, count.index + 1) name = format(local.worker_format, count.index + 1)
@ -35,13 +44,12 @@ resource "libvirt_domain" "worker" {
} }
network_interface { network_interface {
network_id = libvirt_network.ocp_net.id network_name = var.network_name
addresses = [cidrhost(var.network_ip_range, 21 + count.index)] mac = element(local.worker_nodes.*.mac, count.index)
hostname = format("worker%d", count.index + 1) wait_for_lease = false
}
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be xml {
# available when the domain is up and the plan applied. xslt = file("${path.module}/network.xslt")
wait_for_lease = true
} }
} }

Loading…
Cancel
Save