diff --git a/ansible/start.yaml b/ansible/start.yaml
index f06a751..474950d 100644
--- a/ansible/start.yaml
+++ b/ansible/start.yaml
@@ -18,14 +18,6 @@
workers: '{{ nodes | selectattr("role", "eq", "worker") | list }}'
masters: '{{ nodes | selectattr("role", "eq", "master") | list }}'
tasks:
- - name: Configure name resolution for the cluster
- template:
- src: dnsmasq.conf.j2
- dest: /etc/NetworkManager/dnsmasq.d/zone-{{ network_domain }}.conf
-
- - name: Restart dnsmasq
- command: pkill -f [d]nsmasq.*--enable-dbus=org.freedesktop.NetworkManager.dnsmasq
-
- name: Start the Load Balancer and the Storage
community.libvirt.virt:
name: '{{ item.name }}'
@@ -39,7 +31,7 @@
- name: Wait for the Load Balancer to appear
wait_for:
port: 443
- host: '{{ lb.ip[1] }}'
+ host: '{{ lb.ip }}'
- name: Wait for the NFS Server to appear
wait_for:
diff --git a/ansible/stop.yaml b/ansible/stop.yaml
index 761920c..9160faf 100644
--- a/ansible/stop.yaml
+++ b/ansible/stop.yaml
@@ -32,7 +32,7 @@
command: info
register: vm
until: "vm[item.name].state == 'shutdown'"
- retries: 24
+ retries: 48
delay: 5
loop: '{{ workers }}'
loop_control:
@@ -52,7 +52,7 @@
command: info
register: vm
until: "vm[item.name].state == 'shutdown'"
- retries: 24
+ retries: 48
delay: 5
loop: '{{ masters }}'
loop_control:
@@ -67,11 +67,3 @@
- '{{ storage }}'
loop_control:
label: "{{ item.name }}"
-
- - name: Unconfigure name resolution for the cluster
- file:
- path: /etc/NetworkManager/dnsmasq.d/zone-{{ network_domain }}.conf
- state: absent
-
- - name: Restart dnsmasq
- command: pkill -f [d]nsmasq.*--enable-dbus=org.freedesktop.NetworkManager.dnsmasq
diff --git a/ansible/templates/dnsmasq.conf.j2 b/ansible/templates/dnsmasq.conf.j2
deleted file mode 100644
index 8ded78a..0000000
--- a/ansible/templates/dnsmasq.conf.j2
+++ /dev/null
@@ -1 +0,0 @@
-server=/{{ network_domain }}/{{ dns_server }}
diff --git a/bootstrap.tf b/bootstrap.tf
index da0e62a..2cbaf81 100644
--- a/bootstrap.tf
+++ b/bootstrap.tf
@@ -12,12 +12,22 @@ resource "libvirt_ignition" "bootstrap_ignition" {
content = file("${path.module}/${var.cluster_name}/bootstrap.ign")
}
+locals {
+ bootstrap_nodes = [for i in range(var.bootstrap_nodes) : {
+ name = local.bootstrap_name
+ ip = cidrhost(var.network_ip_range, 5)
+ mac = format(var.network_mac_format, 5)
+ role = "bootstrap"
+ }]
+}
+
resource "libvirt_domain" "bootstrap" {
name = local.bootstrap_name
count = var.bootstrap_nodes
vcpu = var.bootstrap_vcpu
memory = var.bootstrap_memory_size
coreos_ignition = libvirt_ignition.bootstrap_ignition.id
+ qemu_agent = true
cpu = {
mode = "host-passthrough"
@@ -34,13 +44,16 @@ resource "libvirt_domain" "bootstrap" {
}
network_interface {
- network_id = libvirt_network.ocp_net.id
- addresses = [cidrhost(var.network_ip_range, 5)]
- hostname = "bootstrap"
+ network_name = var.network_name
+ mac = element(local.bootstrap_nodes.*.mac, count.index)
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
+
+ xml {
+ xslt = file("${path.module}/network.xslt")
+ }
}
diff --git a/lb.tf b/lb.tf
index 1c5671d..5335747 100644
--- a/lb.tf
+++ b/lb.tf
@@ -9,20 +9,15 @@ data "template_file" "lb_user_data" {
template = file("${path.module}/templates/lb/cloud-init.cfg")
vars = {
haproxy_cfg = templatefile("${path.module}/templates/lb/haproxy.cfg", {
- master_nodes = { for i in libvirt_domain.master : i.name => i.network_interface.0.addresses[0] },
- worker_nodes = { for i in libvirt_domain.worker : i.name => i.network_interface.0.addresses[0] },
- bootstrap_nodes = { for i in libvirt_domain.bootstrap : i.name => i.network_interface.0.addresses[0] }
+ master_nodes = { for i in local.master_nodes : i.name => i.ip },
+ worker_nodes = { for i in local.worker_nodes : i.name => i.ip },
+ bootstrap_nodes = { for i in local.bootstrap_nodes : i.name => i.ip }
})
}
}
data "template_file" "lb_network_config" {
template = file("${path.module}/templates/lb/network-config.cfg")
- vars = {
- ip = cidrhost(var.network_ip_range, 4)
- dns = cidrhost(var.network_ip_range, 1)
- gw = cidrhost(var.network_ip_range, 1)
- }
}
resource "libvirt_volume" "lb_disk" {
@@ -33,6 +28,15 @@ resource "libvirt_volume" "lb_disk" {
size = var.lb_disk_size
}
+locals {
+ lb_node = {
+ name = local.lb_name
+ ip = cidrhost(var.network_ip_range, 4)
+ mac = format(var.network_mac_format, 4)
+ role = "lb"
+ }
+}
+
resource "libvirt_domain" "lb" {
name = local.lb_name
vcpu = var.lb_vcpu
@@ -56,9 +60,8 @@ resource "libvirt_domain" "lb" {
}
network_interface {
- network_id = libvirt_network.ocp_net.id
- addresses = [cidrhost(var.network_ip_range, 4)]
- hostname = "lb"
+ network_name = var.network_name
+ mac = local.lb_node.mac
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
@@ -66,13 +69,7 @@ resource "libvirt_domain" "lb" {
wait_for_lease = true
}
- network_interface {
- bridge = var.external_ifname
- mac = var.external_mac_address
-
- # When creating the domain resource, wait until the network interface gets
- # a DHCP lease from libvirt, so that the computed IP addresses will be
- # available when the domain is up and the plan applied.
- wait_for_lease = true
+ xml {
+ xslt = file("${path.module}/network.xslt")
}
}
diff --git a/main.tf b/main.tf
index 154603f..a2d9647 100644
--- a/main.tf
+++ b/main.tf
@@ -33,10 +33,7 @@ terraform {
}
locals {
- master_nodes = [for i in libvirt_domain.master : { name = i.name, ip = i.network_interface.0.addresses[0], role = "master" }]
- worker_nodes = [for i in libvirt_domain.worker : { name = i.name, ip = i.network_interface.0.addresses[0], role = "worker" }]
- bootstrap_nodes = [for i in libvirt_domain.bootstrap : { name = i.name, ip = i.network_interface.0.addresses[0], role = "bootstrap" }]
- additional_nodes = [{ name = (libvirt_domain.lb.name), ip = [libvirt_domain.lb.network_interface.0.addresses[0], libvirt_domain.lb.network_interface.1.addresses[0]], role = "lb" }, { name = (libvirt_domain.storage.name), ip = libvirt_domain.storage.network_interface.0.addresses[0], role = "storage" }]
+ additional_nodes = [local.lb_node, local.storage_node]
all_nodes = concat(local.additional_nodes, local.master_nodes, local.worker_nodes, local.bootstrap_nodes)
}
diff --git a/master.tf b/master.tf
index e3444e6..9b49b7a 100644
--- a/master.tf
+++ b/master.tf
@@ -12,6 +12,15 @@ resource "libvirt_ignition" "master_ignition" {
content = file("${path.module}/${var.cluster_name}/master.ign")
}
+locals {
+ master_nodes = [for i in range(var.master_nodes) : {
+ name = format(local.master_format, i + 1)
+ ip = cidrhost(var.network_ip_range, 11 + i)
+ mac = format(var.network_mac_format, 11 + i)
+ role = "master"
+ }]
+}
+
resource "libvirt_domain" "master" {
count = var.master_nodes
name = format(local.master_format, count.index + 1)
@@ -35,13 +44,12 @@ resource "libvirt_domain" "master" {
}
network_interface {
- network_id = libvirt_network.ocp_net.id
- addresses = [cidrhost(var.network_ip_range, 11 + count.index)]
- hostname = format("master%d", count.index + 1)
-
- # When creating the domain resource, wait until the network interface gets
- # a DHCP lease from libvirt, so that the computed IP addresses will be
- # available when the domain is up and the plan applied.
- wait_for_lease = true
+ network_name = var.network_name
+ mac = element(local.master_nodes.*.mac, count.index)
+ wait_for_lease = false
+ }
+
+ xml {
+ xslt = file("${path.module}/network.xslt")
}
}
diff --git a/network.tf b/network.tf
deleted file mode 100644
index 82b7900..0000000
--- a/network.tf
+++ /dev/null
@@ -1,36 +0,0 @@
-resource "libvirt_network" "ocp_net" {
- name = var.cluster_name
- mode = "nat"
- domain = local.network_domain
- addresses = [var.network_ip_range]
- autostart = true
-
- dns {
- enabled = true
-
- hosts {
- hostname = "host"
- ip = cidrhost(var.network_ip_range, 1)
- }
- hosts {
- hostname = "api"
- ip = cidrhost(var.network_ip_range, 4)
- }
- hosts {
- hostname = "api-int"
- ip = cidrhost(var.network_ip_range, 4)
- }
- hosts {
- hostname = "etcd"
- ip = cidrhost(var.network_ip_range, 4)
- }
- }
-
- dhcp {
- enabled = true
- }
-
- xml {
- xslt = templatefile("${path.module}/templates/network.xslt", { alias = "apps.${local.network_domain}", ip = cidrhost(var.network_ip_range, 4), network_domain = local.network_domain })
- }
-}
diff --git a/network.xslt b/network.xslt
new file mode 100644
index 0000000..4369f7e
--- /dev/null
+++ b/network.xslt
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/post-install.tf b/post-install.tf
index a12f0fb..5e467d1 100644
--- a/post-install.tf
+++ b/post-install.tf
@@ -1,17 +1,17 @@
resource "local_file" "registry_pv" {
- content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] })
+ content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = local.storage_node.ip })
filename = "${var.cluster_name}/registry-pv.yaml"
file_permission = "0644"
}
resource "local_file" "nfs_provisioner" {
- content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = libvirt_domain.storage.network_interface.0.addresses[0] })
+ content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = local.storage_node.ip })
filename = "${var.cluster_name}/nfs-provisioner.yaml"
file_permission = "0644"
}
resource "local_file" "ansible_inventory" {
- content = templatefile("${path.module}/templates/inventory", { nodes = local.all_nodes, network_domain = local.network_domain, dns_server = cidrhost(var.network_ip_range, 1) })
+ content = templatefile("${path.module}/templates/inventory", { nodes = local.all_nodes })
filename = "${var.cluster_name}/inventory"
file_permission = "0644"
}
diff --git a/storage.tf b/storage.tf
index 2c49d4f..211c21f 100644
--- a/storage.tf
+++ b/storage.tf
@@ -27,6 +27,15 @@ resource "libvirt_volume" "storage_data_disk" {
size = var.storage_disk_size
}
+locals {
+ storage_node = {
+ name = local.storage_name
+ ip = cidrhost(var.network_ip_range, 6)
+ mac = format(var.network_mac_format, 6)
+ role = "storage"
+ }
+}
+
resource "libvirt_domain" "storage" {
name = local.storage_name
vcpu = var.storage_vcpu
@@ -54,13 +63,16 @@ resource "libvirt_domain" "storage" {
}
network_interface {
- network_id = libvirt_network.ocp_net.id
- addresses = [cidrhost(var.network_ip_range, 6)]
- hostname = "storage"
+ network_name = var.network_name
+ mac = local.storage_node.mac
# When creating the domain resource, wait until the network interface gets
# a DHCP lease from libvirt, so that the computed IP addresses will be
# available when the domain is up and the plan applied.
wait_for_lease = true
}
+
+ xml {
+ xslt = file("${path.module}/network.xslt")
+ }
}
diff --git a/templates/dns.env b/templates/dns.env
deleted file mode 100644
index ec44b8c..0000000
--- a/templates/dns.env
+++ /dev/null
@@ -1,5 +0,0 @@
-export LE_API_HOSTNAME="${api_server}"
-export LE_ROUTER_HOSTNAME="${router}"
-export DNS_ZONE="${dns_zone}"
-export DNS_API_RECORD="api.${cluster_name}"
-export DNS_ROUTER_RECORD="*.apps.${cluster_name}"
diff --git a/templates/inventory b/templates/inventory
index 16a7ed7..e142f22 100644
--- a/templates/inventory
+++ b/templates/inventory
@@ -1,6 +1,4 @@
[hypervisor]
[hypervisor:vars]
-network_domain=${network_domain}
-dns_server=${dns_server}
nodes=${jsonencode(nodes)}
\ No newline at end of file
diff --git a/templates/lb/network-config.cfg b/templates/lb/network-config.cfg
index e1d25c5..39ca322 100644
--- a/templates/lb/network-config.cfg
+++ b/templates/lb/network-config.cfg
@@ -1,11 +1,4 @@
version: 2
ethernets:
eth0:
- addresses:
- - ${ip}/24
- #gateway4: ${gw}
- nameservers:
- addresses: [${dns}]
- eth1:
- dhcp4: true
- nameservers: {}
+ dhcp4: true
\ No newline at end of file
diff --git a/templates/network.xslt b/templates/network.xslt
deleted file mode 100644
index 9a49f5c..0000000
--- a/templates/network.xslt
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/terraform.tfvars.sample b/terraform.tfvars.sample
index 5f39771..82b81ee 100644
--- a/terraform.tfvars.sample
+++ b/terraform.tfvars.sample
@@ -1,7 +1,8 @@
base_domain = "PUT_YOUR_DNS_DOMAIN_HERE"
-external_mac_address = "02:00:00:00:00:04"
public_cluster_ip = "1.2.3.4"
-network_ip_range = "10.10.0.0/24"
+network_ip_range = "192.168.7.0/24"
+network_mac_format = "02:01:07:00:07:%02x"
cluster_name = "__CLUSTER_NAME__"
bootstrap_nodes = 1
+worker_nodes = 2
acme_account_email = "your.username@redhat.com"
diff --git a/variables.tf b/variables.tf
index 6ef418e..3fca289 100644
--- a/variables.tf
+++ b/variables.tf
@@ -38,23 +38,24 @@ variable "cluster_name" {
default = "ocp4"
}
-variable "external_ifname" {
+variable "base_domain" {
type = string
- default = "virbr1"
+ default = "ocp.lab"
}
-variable "external_mac_address" {
- type = string
+variable "network_name" {
+ type = string
+ default = "lab"
}
-variable "base_domain" {
+variable "network_ip_range" {
type = string
- default = "ocp.lab"
+ default = "192.168.7.0/24"
}
-variable "network_ip_range" {
+variable "network_mac_format" {
type = string
- default = "10.10.3.0/24"
+ default = "02:01:07:00:07:%02x"
}
variable "public_cluster_ip" {
diff --git a/worker.tf b/worker.tf
index b99c88d..d3b1c15 100644
--- a/worker.tf
+++ b/worker.tf
@@ -12,6 +12,15 @@ resource "libvirt_ignition" "worker_ignition" {
content = file("${path.module}/${var.cluster_name}/worker.ign")
}
+locals {
+ worker_nodes = [for i in range(var.worker_nodes) : {
+ name = format(local.worker_format, i + 1)
+ ip = cidrhost(var.network_ip_range, 21 + i)
+ mac = format(var.network_mac_format, 21 + i)
+ role = "worker"
+ }]
+}
+
resource "libvirt_domain" "worker" {
count = var.worker_nodes
name = format(local.worker_format, count.index + 1)
@@ -35,13 +44,12 @@ resource "libvirt_domain" "worker" {
}
network_interface {
- network_id = libvirt_network.ocp_net.id
- addresses = [cidrhost(var.network_ip_range, 21 + count.index)]
- hostname = format("worker%d", count.index + 1)
-
- # When creating the domain resource, wait until the network interface gets
- # a DHCP lease from libvirt, so that the computed IP addresses will be
- # available when the domain is up and the plan applied.
- wait_for_lease = true
+ network_name = var.network_name
+ mac = element(local.worker_nodes.*.mac, count.index)
+ wait_for_lease = false
+ }
+
+ xml {
+ xslt = file("${path.module}/network.xslt")
}
}