Browse Source

add kubepray$

main
Nicolas Massé 4 years ago
parent
commit
d32f2c5b6d
  1. 3
      .gitignore
  2. 4
      .gitmodules
  3. 1
      ansible/kubespray
  4. 2
      ansible/start.yaml
  5. 2
      ansible/stop.yaml
  6. 59
      clusterctl
  7. 6
      kubespray.yaml.sample
  8. 10
      post-install.tf
  9. 5
      templates/base/cloud-init.cfg
  10. 5
      templates/inventory
  11. 47
      templates/inventory.yaml
  12. 20
      templates/nfs-provisioner.yaml
  13. 25
      templates/registry-pv.yaml

3
.gitignore

@ -8,3 +8,6 @@ install-config.yaml
.lego .lego
local.env local.env
.clusters .clusters
ansible/ansible-venv
kubespray.yaml

4
.gitmodules

@ -0,0 +1,4 @@
[submodule "kubespray"]
path = ansible/kubespray
url = https://github.com/kubernetes-sigs/kubespray
branch = release-2.18

1
ansible/kubespray

@ -0,0 +1 @@
Subproject commit 2cc5f04bada0938caedab680872aefcb69c9926c

2
ansible/start.yaml

@ -1,5 +1,5 @@
- name: Start the Kubernetes cluster - name: Start the Kubernetes cluster
hosts: bastion hosts: jumphost
gather_facts: no gather_facts: no
become: no become: no
vars: vars:

2
ansible/stop.yaml

@ -1,5 +1,5 @@
- name: Stop the Kubernetes cluster - name: Stop the Kubernetes cluster
hosts: bastion hosts: jumphost
gather_facts: no gather_facts: no
become: no become: no
vars: vars:

59
clusterctl

@ -26,11 +26,14 @@ function init () {
mkdir -p ".clusters/$cluster_name" mkdir -p ".clusters/$cluster_name"
sed "s/__CLUSTER_NAME__/$cluster_name/" terraform.tfvars > ".clusters/$cluster_name/terraform.tfvars" sed "s/__CLUSTER_NAME__/$cluster_name/" terraform.tfvars > ".clusters/$cluster_name/terraform.tfvars"
cp kubespray.yaml ".clusters/$cluster_name/kubespray.yaml"
ln -sf ../../ansible/kubespray/inventory/sample/group_vars/ ".clusters/$cluster_name/group_vars"
echo "Cluster $cluster_name initialized successfully!" echo "Cluster $cluster_name initialized successfully!"
echo echo
echo "Review and adjust the following files to your needs:" echo "Review and adjust the following files to your needs:"
echo "- .clusters/$cluster_name/terraform.tfvars" echo "- .clusters/$cluster_name/terraform.tfvars"
echo "- .clusters/$cluster_name/kubespray.yaml"
echo echo
exit 0 exit 0
} }
@ -54,21 +57,20 @@ function start () {
assert_cluster_name "$@" assert_cluster_name "$@"
local cluster_name="${1:-}" local cluster_name="${1:-}"
ansible-playbook -i ".clusters/$cluster_name/inventory" ansible/start.yaml ansible-playbook -i ".clusters/$cluster_name/inventory.yaml" ansible/start.yaml
} }
function stop () { function stop () {
assert_cluster_name "$@" assert_cluster_name "$@"
local cluster_name="${1:-}" local cluster_name="${1:-}"
ansible-playbook -i ".clusters/$cluster_name/inventory" ansible/stop.yaml ansible-playbook -i ".clusters/$cluster_name/inventory.yaml" ansible/stop.yaml
} }
function post_install_nfs () { function post_install_nfs () {
local cluster_name="${1:-}" local cluster_name="${1:-}"
kubectl apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/registry-pv.yaml" kubectl apply --insecure-skip-tls-verify -f ".clusters/$cluster_name/nfs-provisioner.yaml"
kubectl apply --insecure-skip-tls-verify --kubeconfig=".clusters/$cluster_name/auth/kubeconfig" -f ".clusters/$cluster_name/nfs-provisioner.yaml"
} }
function post_install () { function post_install () {
@ -80,11 +82,48 @@ function post_install () {
set nfs set nfs
fi fi
export KUBECONFIG="$PWD/.clusters/$cluster_name/kube.config"
for i; do for i; do
post_install_$i "$cluster_name" post_install_$i "$cluster_name"
done done
} }
function ensure_kubespray_venv_ready () {
if [ ! -d "ansible/ansible-venv" ]; then
python -m venv ansible/ansible-venv
ansible-venv/bin/pip install -r ansible/kubespray/requirements.txt
ansible-venv/bin/pip install selinux
fi
}
function kubespray_install () {
local cluster_name="${1:-}"
ensure_kubespray_venv_ready
ansible/ansible-venv/bin/ansible-playbook -i ."clusters/$cluster_name/inventory.yaml" ansible/kubespray/cluster.yml -e "@.clusters/$cluster_name/kubespray.yaml"
ln -sf artifacts/admin.conf ".clusters/$cluster_name/kube.config"
}
function kubespray_remove () {
local cluster_name="${1:-}"
ensure_kubespray_venv_ready
ansible/ansible-venv/bin/ansible-playbook -i ."clusters/$cluster_name/inventory.yaml" ansible/kubespray/reset.yml -e "@.clusters/$cluster_name/kubespray.yaml"
rm -f ".clusters/$cluster_name/kube.config"
}
function kubespray () {
assert_cluster_name "$@"
local cluster_name="${1:-}"
shift
if [ $# -eq 0 ]; then
set install
fi
kubespray_$1 "$cluster_name"
}
function install_addon () { function install_addon () {
assert_cluster_name "$@" assert_cluster_name "$@"
local cluster_name="${1:-}" local cluster_name="${1:-}"
@ -98,7 +137,7 @@ function shell () {
local cluster_name="${1:-}" local cluster_name="${1:-}"
# Ansible # Ansible
export DEFAULT_HOST_LIST="$PWD/.clusters/$cluster_name" export DEFAULT_HOST_LIST="$PWD/.clusters/$cluster_name/inventory.yaml"
# Terraform # Terraform
export TF_CLI_ARGS_plan="-var-file=.clusters/$cluster_name/terraform.tfvars -state=.clusters/$cluster_name/terraform.tfstate" export TF_CLI_ARGS_plan="-var-file=.clusters/$cluster_name/terraform.tfvars -state=.clusters/$cluster_name/terraform.tfstate"
@ -108,7 +147,7 @@ function shell () {
export TF_CLI_ARGS_state_rm="-state=.clusters/$cluster_name/terraform.tfstate" export TF_CLI_ARGS_state_rm="-state=.clusters/$cluster_name/terraform.tfstate"
# Kubernetes # Kubernetes
export KUBECONFIG="$PWD/.clusters/$cluster_name/auth/kubeconfig" export KUBECONFIG="$PWD/.clusters/$cluster_name/kube.config"
export KUBECTL_BINARY="$(which kubectl)" export KUBECTL_BINARY="$(which kubectl)"
export CLUSTER_NAME="$cluster_name" export CLUSTER_NAME="$cluster_name"
export PS1="[$CLUSTER_NAME:\w] " export PS1="[$CLUSTER_NAME:\w] "
@ -161,6 +200,14 @@ apply)
shift shift
apply "$@" apply "$@"
;; ;;
kubespray)
if [ -z "${2:-}" ]; then
echo "Usage: $0 kubespray cluster-name {install|remove}"
exit 1
fi
shift
kubespray "$@"
;;
destroy) destroy)
if [ -z "${2:-}" ]; then if [ -z "${2:-}" ]; then
echo "Usage: $0 destroy cluster-name" echo "Usage: $0 destroy cluster-name"

6
kubespray.yaml.sample

@ -0,0 +1,6 @@
container_manager: crio
download_container: 'false'
etcd_kubeadm_enabled: 'true'
loadbalancer_apiserver_type: nginx
credentials_dir: "{{ inventory_dir }}"
kubeconfig_localhost: true

10
post-install.tf

@ -1,9 +1,3 @@
resource "local_file" "registry_pv" {
content = templatefile("${path.module}/templates/registry-pv.yaml", { nfs_server = local.storage_node.ip })
filename = ".clusters/${var.cluster_name}/registry-pv.yaml"
file_permission = "0644"
}
resource "local_file" "nfs_provisioner" { resource "local_file" "nfs_provisioner" {
content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = local.storage_node.ip }) content = templatefile("${path.module}/templates/nfs-provisioner.yaml", { nfs_server = local.storage_node.ip })
filename = ".clusters/${var.cluster_name}/nfs-provisioner.yaml" filename = ".clusters/${var.cluster_name}/nfs-provisioner.yaml"
@ -11,7 +5,7 @@ resource "local_file" "nfs_provisioner" {
} }
resource "local_file" "ansible_inventory" { resource "local_file" "ansible_inventory" {
content = templatefile("${path.module}/templates/inventory", { nodes = local.all_nodes }) content = templatefile("${path.module}/templates/inventory.yaml", { nodes = local.all_nodes, lb_ip = local.lb_node.ip })
filename = ".clusters/${var.cluster_name}/inventory" filename = ".clusters/${var.cluster_name}/inventory.yaml"
file_permission = "0644" file_permission = "0644"
} }

5
templates/base/cloud-init.cfg

@ -8,6 +8,7 @@ users:
gecos: Nicolas MASSE gecos: Nicolas MASSE
groups: sudo groups: sudo
lock_passwd: false lock_passwd: false
shell: /bin/bash
passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0 passwd: $6$XUTB20jVVXIqh78k$L1A9Lft5JlbOtNbeDP.fOZ5giLl09LfJGGCon5uwtsIhPJoNkj4SIk08Rb6vSowOps2ik5tlUwT2ZOZ6jjr7.0
ssh_authorized_keys: ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPR1tt58X0+vbvsCR12gMAqr+g7vjt1Fx/qqz9EiboIs nicolas@localhost.localdomain
@ -19,3 +20,7 @@ runcmd:
- [ "systemctl", "restart", "sshd" ] - [ "systemctl", "restart", "sshd" ]
# Enable sudo without password # Enable sudo without password
- [ "sed", "-i.post-install", "-e", "s/^%sudo\tALL=(ALL:ALL) ALL/%sudo ALL=(ALL:ALL) NOPASSWD: ALL/", "/etc/sudoers" ] - [ "sed", "-i.post-install", "-e", "s/^%sudo\tALL=(ALL:ALL) ALL/%sudo ALL=(ALL:ALL) NOPASSWD: ALL/", "/etc/sudoers" ]
packages:
# Needed to mount NFS Persistent Volumes
- nfs-client

5
templates/inventory

@ -1,5 +0,0 @@
[all:vars]
nodes=${jsonencode(nodes)}
[bastion]
admin.itix.lab ansible_user=nicolas

47
templates/inventory.yaml

@ -0,0 +1,47 @@
all:
vars:
ansible_user: nicolas
children:
jumphost:
hosts:
admin.itix.lab:
ansible_user: nicolas
nodes: ${jsonencode(nodes)}
etcd:
vars:
ansible_become: yes
ansible_ssh_common_args: -o StrictHostKeyChecking=no
hosts:
%{for node in nodes}
%{if node.role == "master"}
${jsonencode(node.name)}:
ansible_host: ${jsonencode(node.ip)}
etcd_member_name: ${jsonencode(node.name)}
%{endif}
%{endfor}
k8s_cluster:
vars:
ansible_become: yes
ansible_ssh_common_args: -o StrictHostKeyChecking=no
loadbalancer_apiserver:
address: ${lb_ip}
port: 6443
children:
calico_rr: {}
kube_control_plane:
hosts:
%{for node in nodes}
%{if node.role == "master"}
${jsonencode(node.name)}: {}
%{endif}
%{endfor}
kube_node:
hosts:
%{for node in nodes}
%{if node.role == "worker"}
${jsonencode(node.name)}:
ansible_host: ${jsonencode(node.ip)}
%{endif}
%{endfor}
ungrouped: {}

20
templates/nfs-provisioner.yaml

@ -1,13 +1,13 @@
kind: Namespace kind: Namespace
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: "openshift-nfs-provisioner" name: "nfs-provisioner"
--- ---
kind: ServiceAccount kind: ServiceAccount
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: nfs-client-provisioner name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner" namespace: "nfs-provisioner"
--- ---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -34,7 +34,7 @@ metadata:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nfs-client-provisioner name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner" namespace: "nfs-provisioner"
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: nfs-client-provisioner-runner name: nfs-client-provisioner-runner
@ -44,21 +44,17 @@ kind: Role
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: nfs-client-provisioner name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner" namespace: "nfs-provisioner"
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["endpoints"] resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"] verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["security.openshift.io"]
resourceNames: ["hostmount-anyuid"]
resources: ["securitycontextconstraints"]
verbs: ["use"]
--- ---
kind: RoleBinding kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: nfs-client-provisioner name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner" namespace: "nfs-provisioner"
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nfs-client-provisioner name: nfs-client-provisioner
@ -71,7 +67,7 @@ kind: Deployment
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: nfs-client-provisioner name: nfs-client-provisioner
namespace: "openshift-nfs-provisioner" namespace: "nfs-provisioner"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -93,7 +89,7 @@ spec:
mountPath: /persistentvolumes mountPath: /persistentvolumes
env: env:
- name: PROVISIONER_NAME - name: PROVISIONER_NAME
value: redhat-emea-ssa-team/hetzner-ocp4 value: nfs-subdir-external-provisioner
- name: NFS_SERVER - name: NFS_SERVER
value: "${nfs_server}" value: "${nfs_server}"
- name: NFS_PATH - name: NFS_PATH
@ -110,6 +106,6 @@ metadata:
name: managed-nfs-storage name: managed-nfs-storage
annotations: annotations:
storageclass.kubernetes.io/is-default-class: "true" storageclass.kubernetes.io/is-default-class: "true"
provisioner: redhat-emea-ssa-team/hetzner-ocp4 provisioner: nfs-subdir-external-provisioner
parameters: parameters:
archiveOnDelete: "false" archiveOnDelete: "false"

25
templates/registry-pv.yaml

@ -1,25 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-registry-storage
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 100Gi
nfs:
path: "/srv/nfs/pv-infra-registry"
server: "${nfs_server}"
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-storage
namespace: openshift-image-registry
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Gi
Loading…
Cancel
Save