Browse Source

server re-install

master
Nicolas Massé 7 years ago
parent
commit
2f7bcc6a03
  1. 123
      README.md
  2. 2
      openshift-ansible
  3. 15
      playbooks/configure-openshift-access-control.yml
  4. 60
      playbooks/deploy-prometheus.yml
  5. 1
      playbooks/preparation.yml
  6. 7
      playbooks/provision-global-templates-and-imagestreams.yml
  7. 11
      playbooks/site.yml
  8. 15
      playbooks/switch-to-iptables.yml
  9. 40
      prod.hosts
  10. 10
      roles/base/tasks/main.yml
  11. 2
      roles/base/templates/hosts
  12. 2
      roles/hostpath-provisioner/tasks/main.yml
  13. 48
      roles/name-resolution/tasks/main.yml
  14. 28
      roles/name-resolution/templates/dnsmasq.conf
  15. 5
      roles/name-resolution/templates/resolv.conf
  16. 8
      roles/openshift-prereq/tasks/main.yml
  17. 3
      roles/openshift-templates/defaults/main.yml
  18. 520
      roles/openshift-templates/files/sso71-allinone.yaml
  19. 105
      roles/openshift-templates/tasks/main.yml
  20. 8
      roles/sso/defaults/main.yml
  21. 64
      roles/sso/tasks/main.yml

123
README.md

@ -1,4 +1,123 @@
# OpenShift-Lab # OpenShift-Lab
This project is my Ansible Playbook to install OpenShift on my personal server.
More to see there... Well, maybe... This project is my Ansible Playbook to install OpenShift on my Hetzner server.
## Operating System install
Go to [access.redhat.com](https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.6/x86_64/product-software) and download the boot ISO image of the latest RHEL 7.
Upload this ISO image to any large file transfer such as [send.firefox.com](https://send.firefox.com) or [dl.free.fr](http://dl.free.fr/).
Go to your [Hetzner console](https://robot.your-server.de/server), select your server and book a KVM (**Support** > **Remote Console (KVM)** > **I would like to make an appointment**).
Choose a date, time and duration. For the duration, two hours should be enough.
In the message box, type something like:
```raw
Dear Hetzner Support team,
I would like to install RHEL 7 on my server. Could you please burn the following ISO image on a CD or prepare a USB Key accordingly for me ?
<Put the link to the ISO image here>
Many thanks for your help.
Best regards.
```
Click **Send Request**
At the specified timeframe, you should receive a mail containing the login details to connect to your KVM.
Open the KVM console. This is a Java applet, so make sure there is no security restriction on their execution.
Reboot your server using the **Ctrl+Ald+Delete** button.
When the bios shows up, press **<F11>** to enter the boot menu and boot from the CD or USB Key, according to the Hetzner instructions.
[![Hetzner install](https://img.youtube.com/vi/q-brW2_23Lo/0.jpg)](https://www.youtube.com/watch?v=q-brW2_23Lo)
## Getting a public certificates with Let's encrypt
On the Ansible control node, install [lego](https://github.com/go-acme/lego):
```sh
brew install lego
```
Get a certificate for the wildcard domain as well as the master hostname:
```sh
GANDIV5_API_KEY=[REDACTED] lego -d openshift.itix.fr -d app.itix.fr -d '*.app.itix.fr' -a -m your.email@example.test --path $HOME/.lego --dns gandiv5 run
```
See [this guide](https://github.com/nmasse-itix/OpenShift-Examples/tree/master/Public-Certificates-with-Letsencrypt) for more details.
## Preparation
Register the server on RHN:
```sh
sudo subscription-manager register --name=openshift.itix.fr
sudo subscription-manager refresh
sudo subscription-manager list --available --matches '*Employee SKU*'
sudo subscription-manager attach --pool=8a85f9833e1404a9013e3cddf95a0599
```
Edit `/etc/sysconfig/network-scripts/ifcfg-eno1` and add:
```sh
NM_CONTROLLED="yes"
PEERDNS="yes"
DOMAIN="itix.fr"
```
## OpenShift Install
Create a file named `group_vars/OSEv3`, containing your secrets:
```sh
cat <<EOF > group_vars/OSEv3
---
# Generated on https://access.redhat.com/terms-based-registry/
oreg_auth_password: your.password.here
oreg_auth_user: '123|user-name'
# The regular user account you created on your server
ansible_ssh_user: nicolas
openshift_additional_registry_credentials:
- host: registry.connect.redhat.com
user: rhn-username
password: rhn-password
test_image: sonatype/nexus-repository-manager:latest
# see: https://github.com/nmasse-itix/OpenShift-Examples/tree/master/Login-to-OpenShift-with-your-Google-Account
openshift_master_identity_providers:
- name: RedHat
login: true
challenge: false
kind: GoogleIdentityProvider
clientID: your.client_id.apps.googleusercontent.com
clientSecret: your.client_secret.here
hostedDomain: redhat.com
EOF
```
Create a file named `group_vars/all`, containing your global variables:
```sh
cat <<EOF > group_vars/OSEv3
---
# The regular user account you created on your server
ansible_ssh_user: nicolas
EOF
```
Run the OpenShift install:
```sh
ansible-playbook -i prod.hosts playbooks/preparation.yml
ansible-playbook -i prod.hosts openshift-ansible/playbooks/deploy_cluster.yml
ansible-playbook -i prod.hosts playbooks/post-install.yml
```

2
openshift-ansible

@ -1 +1 @@
Subproject commit 5c64b07726038812ef6633e8b815a75aa30248b2 Subproject commit af96f82881eb95231e1a9fc7db06d9d031963b1d

15
playbooks/configure-openshift-access-control.yml

@ -3,17 +3,6 @@
- name: Configure the OpenShift Access Control Layer - name: Configure the OpenShift Access Control Layer
hosts: itix hosts: itix
become: yes become: yes
vars:
itix_sso_route: sso.{{ openshift_master_default_subdomain }}
tasks: tasks:
- name: Remove authenticated users the right to create projects - name: Nicolas is admin
command: oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth command: oc adm policy add-cluster-role-to-user cluster-admin nicolas.masse@itix.fr
- name: Nicolas can create projects
command: oc adm policy add-cluster-role-to-user self-provisioner nicolas.masse@itix.fr
- name: Give the monitoring rights to nicolas
command: oc adm policy add-role-to-user view nicolas.masse@itix.fr -n openshift-metrics
roles:
- { name: 'sso', tags: 'sso' }

60
playbooks/deploy-prometheus.yml

@ -1,60 +0,0 @@
---
- name: Deploy Prometheus on OpenShift
hosts: itix
become: yes
vars:
itix_openshift_docker_images_repo_url: https://github.com/nmasse-itix/OpenShift-Docker-Images.git
itix_prometheus_project: openshift-metrics
itix_grafana_route: grafana.{{ openshift_master_default_subdomain }}
itix_prometheus_route: prometheus.{{ openshift_master_default_subdomain }}
itix_alerts_route: prometheus-alerts.{{ openshift_master_default_subdomain }}
tasks:
- name: Create a temporary directory
tempfile:
state: directory
register: tempfile
- name: Clone the nmasse-itix/OpenShift-Docker-Images GIT repository
git:
repo: '{{ itix_openshift_docker_images_repo_url }}'
dest: '{{ tempfile.path }}'
version: '{{ itix_openshift_docker_images_repo_tag|default(''master'') }}'
- name: Process the grafana-prometheus-storage template
command: oc process -f '{{ tempfile.path }}/grafana/grafana-prometheus-storage.yaml' -p 'NAMESPACE={{ itix_prometheus_project }}'
register: oc_process
- set_fact:
grafana_prometheus_storage: '{{ oc_process.stdout }}'
- name: Process the grafana-prometheus template
command: oc process -f '{{ tempfile.path }}/grafana/grafana-prometheus.yaml' -p 'NAMESPACE={{ itix_prometheus_project }}' -p 'PROMETHEUS_ROUTE_HOSTNAME={{ itix_prometheus_route }}' -p 'ALERTS_ROUTE_HOSTNAME={{ itix_alerts_route }}'
register: oc_process
- set_fact:
grafana_prometheus: '{{ oc_process.stdout }}'
- name: Process the grafana-base template
command: oc process -f '{{ tempfile.path }}/grafana/grafana-base.yaml' -p 'NAMESPACE={{ itix_prometheus_project }}' -p 'GRAFANA_ROUTE_HOSTNAME={{ itix_grafana_route }}'
register: oc_process
- set_fact:
grafana_base: '{{ oc_process.stdout }}'
- name: Create the objects
command: oc create -f -
args:
stdin: '{{ item|to_json }}'
register: oc
failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr
changed_when: oc.rc == 0
with_items:
- '{{ grafana_prometheus_storage }}'
- '{{ grafana_prometheus }}'
- '{{ grafana_base }}'
- name: Delete the temporary directory
file:
path: '{{ tempfile.path }}'
state: absent

1
playbooks/preparation.yml

@ -5,6 +5,5 @@
become: yes become: yes
roles: roles:
- { name: 'base', tags: 'base' } - { name: 'base', tags: 'base' }
- { name: 'name-resolution', tags: 'name-resolution' }
- { name: 'docker', tags: 'docker' } - { name: 'docker', tags: 'docker' }
- { name: 'openshift-prereq', tags: 'openshift-prereq' } - { name: 'openshift-prereq', tags: 'openshift-prereq' }

7
playbooks/provision-global-templates-and-imagestreams.yml

@ -1,7 +0,0 @@
---
- name: Provision the default templates and image streams in OpenShift
hosts: itix
become: yes
roles:
- { name: 'openshift-templates', tags: 'openshift-templates' }

11
playbooks/site.yml

@ -1,20 +1,17 @@
--- ---
- include: "switch-to-iptables.yml"
- include: "preparation.yml" - include: "preparation.yml"
# Launch the OpenShift Installer Playbook # Launch the OpenShift Installer Playbook
- include: "../openshift-ansible/playbooks/byo/openshift-cluster/config.yml" - include: "../openshift-ansible/playbooks/deploy_cluster.yml"
- include: "post-install.yml" - include: "post-install.yml"
# Update the default templates and image streams # Update the default templates and image streams
- include: "../openshift-ansible/playbooks/byo/openshift-master/additional_config.yml" - include: "../openshift-ansible/playbooks/openshift-master/additional_config.yml"
- include: "provision-global-templates-and-imagestreams.yml"
- include: "deploy-prometheus.yml" # Deploy the let's encrypt certificates
- include: "../openshift-ansible/playbooks/redeploy-certificates.yml"
- include: "configure-openshift-access-control.yml" - include: "configure-openshift-access-control.yml"

15
playbooks/switch-to-iptables.yml

@ -1,15 +0,0 @@
---
- name: Switch to iptables
hosts: itix
become: yes
tasks:
- name: Install iptables-services
yum: name=iptables-services state=installed
tags: rpm
- name: Disable firewalld
service: name=firewalld state=stopped enabled=no
- name: Enable iptables
service: name=iptables state=started enabled=yes

40
prod.hosts

@ -3,10 +3,7 @@
# #
[itix:vars] [itix:vars]
itix_dns_suffix=itix.fr itix_dns_suffix=itix.fr
itix_openshift_version=3.10 itix_openshift_version=3.11
itix_application_templates_repo_tag=ose-v1.4.7
itix_openshift_origin_repo_tag=release-3.10
itix_openshift_docker_images_repo_tag=master
[itix:children] [itix:children]
masters masters
@ -25,7 +22,7 @@ openshift.itix.fr
openshift.itix.fr openshift.itix.fr
[nodes] [nodes]
openshift.itix.fr openshift_node_group_name='node-config-all-in-one' openshift_kubelet_name_override='openshift.itix.fr' openshift.itix.fr openshift_node_group_name='node-config-all-in-one'
# #
# The rest is used only by the OpenShift installer playbook # The rest is used only by the OpenShift installer playbook
@ -36,14 +33,6 @@ nodes
etcd etcd
[OSEv3:vars] [OSEv3:vars]
#
# Starting with 3.6, default templates and imagestreams can be left out
# see https://bugzilla.redhat.com/show_bug.cgi?id=1506578
#
openshift_install_examples=true
openshift_examples_load_quickstarts=false
openshift_examples_load_xpaas=false
# Yes, we need to use sudo # Yes, we need to use sudo
ansible_become=yes ansible_become=yes
@ -53,9 +42,6 @@ deployment_type=openshift-enterprise
# Clustering method # Clustering method
openshift_master_cluster_method=native openshift_master_cluster_method=native
# Bypass Registry Security Checks
openshift_docker_insecure_registries=172.30.0.0/16
# Make sure NTP is enabled # Make sure NTP is enabled
openshift_clock_enabled=true openshift_clock_enabled=true
@ -65,3 +51,25 @@ openshift_additional_projects={}
# Enable the "flat network" SDN # Enable the "flat network" SDN
os_sdn_network_plugin_name='redhat/openshift-ovs-subnet' os_sdn_network_plugin_name='redhat/openshift-ovs-subnet'
# Do not use CRI-O
openshift_use_crio=false
# Use Firewalld
os_firewall_use_firewalld=yes
# Let's encrypt Certificates
openshift_master_overwrite_named_certificates=true
openshift_master_named_certificates=[{ "certfile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.crt", "keyfile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.key", "cafile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.issuer.crt", "names": [ "openshift.itix.fr" ] }]
openshift_hosted_router_certificate={ "certfile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.crt", "keyfile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.key", "cafile": "{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.issuer.crt" }
openshift_master_openid_ca_file="{{ lookup('env','HOME') }}/.lego/certificates/openshift.itix.fr.issuer.crt"
# Since we are using Let's Encrypt, generate a warning about expiration only
# during the last 30 days
openshift_certificate_expiry_warning_days=30
# Since we have a public certificate for the master console, we need two different hostnames
openshift_master_cluster_hostname=openshift-internal.itix.fr
openshift_master_cluster_public_hostname=openshift.itix.fr
# Skip docker images check since it fails no matter what
openshift_disable_check=docker_image_availability

10
roles/base/tasks/main.yml

@ -31,14 +31,14 @@
- openssh-clients - openssh-clients
tags: rpm tags: rpm
- name: Install Open-VM tools
yum: name=open-vm-tools state=installed
tags: rpm
- name: Fix /etc/environment to include PATH - name: Fix /etc/environment to include PATH
lineinfile: dest=/etc/environment regexp="^PATH=" line="PATH=/bin:/usr/bin:/sbin:/usr/sbin" lineinfile: dest=/etc/environment regexp="^PATH=" line="PATH=/bin:/usr/bin:/sbin:/usr/sbin"
tags: config tags: config
- name: Make sure each machine has an up-to-date /etc/hosts
template: dest=/etc/hosts src=hosts
tags: config
- name: Persist the hostname - name: Persist the hostname
lineinfile: dest=/etc/sysconfig/network regexp="^HOSTNAME=" line="HOSTNAME={{ inventory_hostname_short }}" lineinfile: dest=/etc/sysconfig/network regexp="^HOSTNAME=" line="HOSTNAME={{ inventory_hostname_short }}"
tags: tags:
@ -46,7 +46,7 @@
- dns - dns
- name: Set the hostname - name: Set the hostname
command: hostnamectl set-hostname {{ inventory_hostname_short }} --static command: hostnamectl set-hostname {{ inventory_hostname_short }}
tags: tags:
- config - config
- dns - dns

2
roles/name-resolution/templates/hosts → roles/base/templates/hosts

@ -2,8 +2,6 @@
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
{% if "name-server" not in groups %}
{% for item in groups['all'] %} {% for item in groups['all'] %}
{{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ hostvars[item]['inventory_hostname']}} {{ hostvars[item]['inventory_hostname_short']}} {{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ hostvars[item]['inventory_hostname']}} {{ hostvars[item]['inventory_hostname_short']}}
{% endfor %} {% endfor %}
{% endif %}

2
roles/hostpath-provisioner/tasks/main.yml

@ -35,5 +35,5 @@
changed_when: oc.rc == 0 changed_when: oc.rc == 0
- name: Add the new volume to docker-registry - name: Add the new volume to docker-registry
command: oc volume dc docker-registry -n default --add=true --overwrite=true --type=persistentVolumeClaim --name=registry-storage --claim-name=registry-storage command: oc set volume dc docker-registry -n default --add=true --overwrite=true --type=persistentVolumeClaim --name=registry-storage --claim-name=registry-storage
when: hostpath_provisioner_patch_docker_registry when: hostpath_provisioner_patch_docker_registry

48
roles/name-resolution/tasks/main.yml

@ -1,48 +0,0 @@
---
- name: Check for mandatory variables required by this playbook
fail:
msg: "This playbook requires {{item}} to be set."
when: "item is not defined or item == ''"
with_items:
- itix_dns_suffix
- openshift_master_default_subdomain
- name: Make sure each machine has an up-to-date /etc/hosts
template: dest=/etc/hosts src=hosts
tags: config
- name: Install dnsmasq
yum: name=dnsmasq state=installed
when: "'name-server' in group_names" # Only on admin server
tags: rpm
- name: Set dnsmasq config
template: src=dnsmasq.conf dest=/etc/dnsmasq.conf
when: "'name-server' in group_names" # Only on admin server
tags: config
- name: Generate an /etc/hosts with all hosts
template: dest=/etc/hosts.dnsmasq src=hosts
when: "'name-server' in group_names" # Only on admin server
tags: config
- name: Make sure dnsmasq daemon is enabled and started
service: name=dnsmasq state=started enabled=yes
when: "'name-server' in group_names" # Only on admin server
tags: config
- name: Add an iptable rule to allow DNS queries from other hosts
lineinfile: dest=/etc/sysconfig/iptables line="-A INPUT -p udp --dport 53 -j ACCEPT" insertafter="-A INPUT -i lo -j ACCEPT"
when: "'name-server' in group_names" # Only on admin server
tags: iptables
- name: Restart iptables
service: name=iptables enabled=yes state=restarted
when: "'name-server' in group_names" # Only on admin server
tags: iptables
- name: Fix the /etc/resolv.conf of other hosts
template: dest=/etc/resolv.conf src=resolv.conf
when: "'name-server' in groups and 'name-server' not in group_names" # On all other nodes (if a name server has been setup)
tags: config

28
roles/name-resolution/templates/dnsmasq.conf

@ -1,28 +0,0 @@
# {{ ansible_managed }}
domain-needed
bogus-priv
expand-hosts
log-queries
local-ttl=60
# Do not read the default /etc/hosts
no-hosts
# But read this one...
addn-hosts=/etc/hosts.dnsmasq
# Default suffix for all machines
domain={{ itix_dns_suffix }}
#
# Wildcard DNS entries (see lab_route_suffix variable)
#
# note: will generate something like this :
# address=/app.openshift.test/192.168.23.20
#
{% if 'lb' in groups %}
address=/{{ openshift_master_default_subdomain }}/{{ hostvars[groups['lb'][0]]['ansible_default_ipv4']['address'] }}
{% else %}
address=/{{ openshift_master_default_subdomain }}/{{ hostvars[groups['masters'][0]]['ansible_default_ipv4']['address'] }}
{% endif %}

5
roles/name-resolution/templates/resolv.conf

@ -1,5 +0,0 @@
# {{ ansible_managed }}
search {{ itix_dns_suffix }}
{% for item in groups['name-server'] %}
nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }}
{% endfor %}

8
roles/openshift-prereq/tasks/main.yml

@ -11,7 +11,7 @@
- rhel-7-server-optional-rpms - rhel-7-server-optional-rpms
- rhel-7-server-extras-rpms - rhel-7-server-extras-rpms
- rhel-7-server-ose-{{ itix_openshift_version }}-rpms - rhel-7-server-ose-{{ itix_openshift_version }}-rpms
- rhel-7-fast-datapath-rpms # see https://access.redhat.com/solutions/3008401 - rhel-7-server-ansible-2.6-rpms
tags: rpm tags: rpm
- name: Install required RPMs - name: Install required RPMs
@ -20,8 +20,12 @@
- git - git
- net-tools - net-tools
- bind-utils - bind-utils
- yum-utils
- bridge-utils - bridge-utils
- bash-completion - bash-completion
- NetworkManager - NetworkManager
- nfs-utils - kexec-tools
- sos
- psacct
- skopeo
tags: rpm tags: rpm

3
roles/openshift-templates/defaults/main.yml

@ -1,3 +0,0 @@
---
itix_application_templates_repo_url: https://github.com/jboss-openshift/application-templates.git

520
roles/openshift-templates/files/sso71-allinone.yaml

@ -1,520 +0,0 @@
kind: Template
apiVersion: v1
metadata:
annotations:
iconClass: icon-sso
tags: 'sso,keycloak,jboss'
version: 1.4.7
openshift.io/display-name: 'Single Sign-On 7.1 (development mode)'
openshift.io/provider-display-name: 'Red Hat, Inc.'
description: 'A self-containing Red Hat SSO application with a PostgreSQL database. For more information about using this template, see https://github.com/jboss-openshift/application-templates.'
template.openshift.io/long-description: 'This template defines resources needed to develop Red Hat Single Sign-On 7.1 server based deployment and deployment configuration for PostgreSQL using persistence.'
template.openshift.io/documentation-url: 'https://access.redhat.com/documentation/en/red-hat-single-sign-on/'
template.openshift.io/support-url: 'https://access.redhat.com'
name: sso71-allinone
labels:
template: sso71-allinone
xpaas: 1.4.7
message: 'A new persistent SSO service (using PostgreSQL) has been created in your project. The admin username/password for accessing the master realm via the SSO console is ${SSO_ADMIN_USERNAME}/${SSO_ADMIN_PASSWORD}. The username/password for accessing the PostgreSQL database "${DB_DATABASE}" is ${DB_USERNAME}/${DB_PASSWORD}.'
parameters:
- displayName: 'Application Name'
description: 'The name for the application.'
name: APPLICATION_NAME
value: sso
required: true
- displayName: 'Custom http Route Hostname'
description: 'Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>'
name: SSO_HOSTNAME
value: ""
required: false
- displayName: 'The password that protects the Keystores'
description: 'The password for the keystores and certificates (auto-generated)'
name: SSO_KEYSTORE_PASSWORD
generate: expression
from: '[a-zA-Z0-9]{8}'
required: true
- displayName: 'Database JNDI Name'
description: 'Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql'
name: DB_JNDI
value: 'java:jboss/datasources/KeycloakDS'
required: false
- displayName: 'Database Name'
description: 'Database name'
name: DB_DATABASE
value: sso
required: true
- displayName: 'Datasource Minimum Pool Size'
description: 'Sets xa-pool/min-pool-size for the configured datasource.'
name: DB_MIN_POOL_SIZE
required: false
- displayName: 'Datasource Maximum Pool Size'
description: 'Sets xa-pool/max-pool-size for the configured datasource.'
name: DB_MAX_POOL_SIZE
required: false
- displayName: 'Datasource Transaction Isolation'
description: 'Sets transaction-isolation for the configured datasource.'
name: DB_TX_ISOLATION
required: false
- displayName: 'PostgreSQL Maximum number of connections'
description: 'The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.'
name: POSTGRESQL_MAX_CONNECTIONS
required: false
- displayName: 'PostgreSQL Shared Buffers'
description: 'Configures how much memory is dedicated to PostgreSQL for caching data.'
name: POSTGRESQL_SHARED_BUFFERS
required: false
- displayName: 'Database Username'
description: 'Database user name'
name: DB_USERNAME
value: sso
required: true
- displayName: 'Database Password'
description: 'Database user password'
name: DB_PASSWORD
from: '[a-zA-Z0-9]{8}'
generate: expression
required: true
- displayName: 'Database Volume Capacity'
description: 'Size of persistent storage for database volume.'
name: VOLUME_CAPACITY
value: 1Gi
required: true
- displayName: 'JGroups Cluster Password'
description: 'JGroups cluster password'
name: JGROUPS_CLUSTER_PASSWORD
from: '[a-zA-Z0-9]{8}'
generate: expression
required: true
- displayName: 'ImageStream Namespace'
description: 'Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you''ve installed the ImageStreams in a different namespace/project.'
name: IMAGE_STREAM_NAMESPACE
value: openshift
required: true
- displayName: 'SSO Admin Username'
description: 'SSO Server admin username'
name: SSO_ADMIN_USERNAME
value: 'admin'
required: true
- displayName: 'SSO Admin Password'
description: 'SSO Server admin password'
name: SSO_ADMIN_PASSWORD
from: '[a-zA-Z0-9]{8}'
generate: expression
required: true
- displayName: 'SSO Realm'
description: 'Realm to be created in the SSO server (e.g. demo).'
name: SSO_REALM
value: ""
required: false
- displayName: 'SSO Service Username'
description: 'The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.'
name: SSO_SERVICE_USERNAME
value: ""
required: false
- displayName: 'SSO Service Password'
description: 'The password for the SSO service user.'
name: SSO_SERVICE_PASSWORD
value: ""
required: false
- displayName: 'PostgreSQL Image Stream Tag'
description: 'The tag to use for the "postgresql" image stream. Typically, this aligns with the major.minor version of PostgreSQL.'
name: POSTGRESQL_IMAGE_STREAM_TAG
value: '9.5'
required: true
- description: 'Container memory limit'
name: MEMORY_LIMIT
value: 1Gi
required: false
objects:
##
## Route and Services
##
- kind: Service
apiVersion: v1
spec:
ports:
- name: http
port: 8080
targetPort: 8080
- name: https
port: 8443
targetPort: 8443
selector:
deploymentConfig: '${APPLICATION_NAME}'
metadata:
name: '${APPLICATION_NAME}'
labels:
application: '${APPLICATION_NAME}'
annotations:
description: 'The web server''s http/https ports.'
service.alpha.openshift.io/dependencies: '[{"name": "${APPLICATION_NAME}-postgresql", "kind": "Service"}]'
service.alpha.openshift.io/serving-cert-secret-name: '${APPLICATION_NAME}-https-secret'
- kind: Service
apiVersion: v1
spec:
ports:
- name: postgresql
port: 5432
targetPort: 5432
selector:
deploymentConfig: '${APPLICATION_NAME}-postgresql'
metadata:
name: '${APPLICATION_NAME}-postgresql'
labels:
application: '${APPLICATION_NAME}'
annotations:
description: 'The database server''s port.'
- kind: Route
apiVersion: v1
id: '${APPLICATION_NAME}'
metadata:
name: '${APPLICATION_NAME}'
labels:
application: '${APPLICATION_NAME}'
annotations:
description: 'Route for application''s https service.'
spec:
host: '${SSO_HOSTNAME}'
port:
targetPort: https
to:
kind: Service
name: '${APPLICATION_NAME}'
port:
tls:
termination: reencrypt
insecureEdgeTerminationPolicy: Redirect
##
## Persistence
##
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: '${APPLICATION_NAME}-database'
labels:
application: '${APPLICATION_NAME}'
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: '${VOLUME_CAPACITY}'
##
## Service Accounts, Secrets and Role Bindings
##
- apiVersion: v1
kind: ServiceAccount
metadata:
name: '${APPLICATION_NAME}'
labels:
application: '${APPLICATION_NAME}'
secrets:
- name: '${APPLICATION_NAME}-https-secret'
- apiVersion: v1
kind: RoleBinding
metadata:
name: '${APPLICATION_NAME}-can-read-project'
labels:
application: '${APPLICATION_NAME}'
roleRef:
name: view
subjects:
- kind: ServiceAccount
name: '${APPLICATION_NAME}'
##
## Deploy RH-SSO
##
- kind: DeploymentConfig
apiVersion: v1
metadata:
name: '${APPLICATION_NAME}'
labels:
application: '${APPLICATION_NAME}'
spec:
strategy:
type: Recreate
recreateParams:
pre:
failurePolicy: Abort
execNewPod:
containerName: '${APPLICATION_NAME}'
command:
- /usr/bin/keytool
- -genseckey
- -alias
- jgroups
- -keystore
- /etc/eap-secret-volume/jgroups.jceks
- -keyalg
- Blowfish
- -keysize
- "56"
- -storetype
- JCEKS
- -keypass
- '${SSO_KEYSTORE_PASSWORD}'
- -storepass
- '${SSO_KEYSTORE_PASSWORD}'
volumes:
- '${APPLICATION_NAME}-eap-secrets'
triggers:
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- '${APPLICATION_NAME}'
from:
kind: ImageStreamTag
namespace: '${IMAGE_STREAM_NAMESPACE}'
name: 'redhat-sso71-openshift:1.2'
- type: ConfigChange
replicas: 1
selector:
deploymentConfig: '${APPLICATION_NAME}'
template:
metadata:
name: '${APPLICATION_NAME}'
labels:
deploymentConfig: '${APPLICATION_NAME}'
application: '${APPLICATION_NAME}'
spec:
serviceAccountName: '${APPLICATION_NAME}'
terminationGracePeriodSeconds: 75
initContainers:
- name: openshift-ca-pemtokeystore
image: syndesis/pemtokeystore:v0.2.1
imagePullPolicy: IfNotPresent
args:
- -keystore
- /etc/eap-secret-volume/keystore.jks
- -ca-file
- /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- -ca-file
- /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
- -cert-file
- tls=/etc/openshift-secret-volume/tls.crt
- -key-file
- tls=/etc/openshift-secret-volume/tls.key
- -keystore-password
- '${SSO_KEYSTORE_PASSWORD}'
volumeMounts:
- name: "${APPLICATION_NAME}-openshift-secret"
mountPath: "/etc/openshift-secret-volume"
- name: '${APPLICATION_NAME}-eap-secrets'
mountPath: "/etc/eap-secret-volume"
resources:
limits:
memory: 255Mi
requests:
memory: 20Mi
containers:
- name: '${APPLICATION_NAME}'
image: ' '
imagePullPolicy: Always
resources:
limits:
memory: '${MEMORY_LIMIT}'
volumeMounts:
- name: '${APPLICATION_NAME}-eap-secrets'
mountPath: /etc/eap-secret-volume
# Workaround for a bug on overlayfs2
# See https://github.com/openshift/openshift-ansible/issues/2823
- name: '${APPLICATION_NAME}-eap-configuration'
mountPath: /opt/eap/standalone/configuration/standalone_xml_history/
lifecycle:
preStop:
exec:
command:
- /opt/eap/bin/jboss-cli.sh
- '-c'
- ':shutdown(timeout=60)'
livenessProbe:
exec:
command:
- /bin/bash
- '-c'
- /opt/eap/bin/livenessProbe.sh
initialDelaySeconds: 60
readinessProbe:
exec:
command:
- /bin/bash
- '-c'
- /opt/eap/bin/readinessProbe.sh
ports:
- name: jolokia
containerPort: 8778
protocol: TCP
- name: http
containerPort: 8080
protocol: TCP
- name: https
containerPort: 8443
protocol: TCP
- name: ping
containerPort: 8888
protocol: TCP
env:
- name: DB_SERVICE_PREFIX_MAPPING
value: '${APPLICATION_NAME}-postgresql=DB'
- name: DB_JNDI
value: '${DB_JNDI}'
- name: DB_USERNAME
value: '${DB_USERNAME}'
- name: DB_PASSWORD
value: '${DB_PASSWORD}'
- name: DB_DATABASE
value: '${DB_DATABASE}'
- name: TX_DATABASE_PREFIX_MAPPING
value: '${APPLICATION_NAME}-postgresql=DB'
- name: DB_MIN_POOL_SIZE
value: '${DB_MIN_POOL_SIZE}'
- name: DB_MAX_POOL_SIZE
value: '${DB_MAX_POOL_SIZE}'
- name: DB_TX_ISOLATION
value: '${DB_TX_ISOLATION}'
- name: OPENSHIFT_KUBE_PING_LABELS
value: 'application=${APPLICATION_NAME}'
- name: OPENSHIFT_KUBE_PING_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: HTTPS_KEYSTORE_DIR
value: /etc/eap-secret-volume
- name: HTTPS_KEYSTORE
value: 'keystore.jks'
- name: HTTPS_KEYSTORE_TYPE
value: 'JKS'
- name: HTTPS_NAME
value: 'tls'
- name: HTTPS_PASSWORD
value: '${SSO_KEYSTORE_PASSWORD}'
- name: JGROUPS_ENCRYPT_SECRET
value: '${SSO_KEYSTORE_PASSWORD}'
- name: JGROUPS_ENCRYPT_KEYSTORE_DIR
value: /etc/eap-secret-volume
- name: JGROUPS_ENCRYPT_KEYSTORE
value: jgroups.jceks
- name: JGROUPS_ENCRYPT_NAME
value: jgroups
- name: JGROUPS_ENCRYPT_KEYSTORE
value: '${SSO_KEYSTORE_PASSWORD}'
- name: JGROUPS_CLUSTER_PASSWORD
value: '${JGROUPS_CLUSTER_PASSWORD}'
- name: SSO_ADMIN_USERNAME
value: '${SSO_ADMIN_USERNAME}'
- name: SSO_ADMIN_PASSWORD
value: '${SSO_ADMIN_PASSWORD}'
- name: SSO_REALM
value: '${SSO_REALM}'
- name: SSO_SERVICE_USERNAME
value: '${SSO_SERVICE_USERNAME}'
- name: SSO_SERVICE_PASSWORD
value: '${SSO_SERVICE_PASSWORD}'
- name: SSO_TRUSTSTORE
value: 'keystore.jks'
- name: SSO_TRUSTSTORE_DIR
value: /etc/eap-secret-volume
- name: SSO_TRUSTSTORE_PASSWORD
value: '${SSO_KEYSTORE_PASSWORD}'
volumes:
- name: '${APPLICATION_NAME}-openshift-secret'
secret:
secretName: '${APPLICATION_NAME}-https-secret'
- name: '${APPLICATION_NAME}-eap-secrets'
emptyDir: {}
# Workaround for a bug on overlayfs2
# See https://github.com/openshift/openshift-ansible/issues/2823
- name: '${APPLICATION_NAME}-eap-configuration'
emptyDir: {}
##
## Deploy PostgreSQL
##
- kind: DeploymentConfig
apiVersion: v1
metadata:
name: '${APPLICATION_NAME}-postgresql'
labels:
application: '${APPLICATION_NAME}'
spec:
strategy:
type: Recreate
triggers:
- type: ImageChange
imageChangeParams:
automatic: true
containerNames:
- '${APPLICATION_NAME}-postgresql'
from:
kind: ImageStreamTag
namespace: '${IMAGE_STREAM_NAMESPACE}'
name: 'postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}'
- type: ConfigChange
replicas: 1
selector:
deploymentConfig: '${APPLICATION_NAME}-postgresql'
template:
metadata:
name: '${APPLICATION_NAME}-postgresql'
labels:
deploymentConfig: '${APPLICATION_NAME}-postgresql'
application: '${APPLICATION_NAME}'
spec:
terminationGracePeriodSeconds: 60
containers:
- name: '${APPLICATION_NAME}-postgresql'
image: postgresql
imagePullPolicy: Always
ports:
- name: postgresql
containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /var/lib/pgsql/data
name: '${APPLICATION_NAME}-postgresql-pvol'
env:
- name: POSTGRESQL_USER
value: '${DB_USERNAME}'
- name: POSTGRESQL_PASSWORD
value: '${DB_PASSWORD}'
- name: POSTGRESQL_DATABASE
value: '${DB_DATABASE}'
- name: POSTGRESQL_MAX_CONNECTIONS
value: '${POSTGRESQL_MAX_CONNECTIONS}'
- name: POSTGRESQL_MAX_PREPARED_TRANSACTIONS
value: '${POSTGRESQL_MAX_CONNECTIONS}'
- name: POSTGRESQL_SHARED_BUFFERS
value: '${POSTGRESQL_SHARED_BUFFERS}'
volumes:
- name: '${APPLICATION_NAME}-postgresql-pvol'
persistentVolumeClaim:
claimName: '${APPLICATION_NAME}-database'

105
roles/openshift-templates/tasks/main.yml

@ -1,105 +0,0 @@
---
- name: Create a temporary directory
tempfile:
state: directory
register: tempfile
- name: Copy the Custom SSO template
copy:
src: 'sso71-allinone.yaml'
dest: '{{ tempfile.path }}/sso71-allinone.yaml'
- name: Clone the jboss-openshift/application-templates GIT repository
git:
repo: '{{ itix_application_templates_repo_url }}'
dest: '{{ tempfile.path }}/application-templates'
version: '{{ itix_application_templates_repo_tag|default(''master'') }}'
- name: Get an archive of the OpenShift GIT repository
get_url:
url: 'https://github.com/openshift/origin/archive/{{ itix_openshift_origin_repo_tag|default(''master'') }}.tar.gz'
dest: '{{ tempfile.path }}/openshift-origin.tar.gz'
- name: Get the Minio template
get_url:
url: 'https://raw.githubusercontent.com/nmasse-itix/OpenShift-Docker-Images/master/minio/minio.yaml'
dest: '{{ tempfile.path }}/minio.yaml'
- name: Extract the OpenShift GIT archive
unarchive:
remote_src: yes
src: '{{ tempfile.path }}/openshift-origin.tar.gz'
dest: '{{ tempfile.path }}'
- name: Symlink the OpenShift GIT repo
file:
src: '{{ tempfile.path }}/origin-{{ itix_openshift_origin_repo_tag|default(''master'') }}'
dest: '{{ tempfile.path }}/openshift-origin'
state: link
- set_fact:
objects_to_import:
- '{{ tempfile.path }}/application-templates/jboss-image-streams.json'
- '{{ tempfile.path }}/application-templates/openjdk/openjdk18-web-basic-s2i.json'
- '{{ tempfile.path }}/openshift-origin/examples/jenkins/jenkins-persistent-template.json'
- '{{ tempfile.path }}/sso71-allinone.yaml'
- '{{ tempfile.path }}/minio.yaml'
- name: Install new ImageStreams/Templates in the "openshift" namespace
command: oc create -n openshift -f "{{ item }}"
register: oc
failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr
changed_when: oc.rc == 0
with_items: '{{ objects_to_import }}'
- name: Update existing ImageStreams/Templates in the "openshift" namespace
command: oc replace -n openshift -f "{{ item }}"
register: oc
failed_when: oc.rc > 0 and 'Error from server (NotFound):' not in oc.stderr
changed_when: oc.rc == 0
with_items: '{{ objects_to_import }}'
- name: Get a list of currently installed templates
command: oc get templates -n openshift -o name
register: oc_get_templates
- name: Delete ephemeral templates
command: oc delete {{ item }} -n openshift
when: item|regex_search('-ephemeral$')
with_items: '{{ oc_get_templates.stdout_lines }}'
- name: Get a list of currently installed image streams
command: oc get is -n openshift -o name
register: oc_get_is
- name: Delete unwanted image streams
command: oc delete {{ item }} -n openshift
when: item|regex_search('(datagrid|datavirt|decisionserver|eap64|processserver|tomcat7)')
with_items: '{{ oc_get_is.stdout_lines }}'
- name: Get a list of the remaining image streams
command: oc get is -n openshift -o name
register: oc_get_is
- name: Update each image stream
command: oc import-image {{ item }} --confirm --scheduled --all -n openshift
with_items: '{{ oc_get_is.stdout_lines }}'
- name: Import additional Red Hat image streams (initial import)
command: oc import-image -n openshift {{ item.key }} --from {{ item.value }} --confirm --scheduled
with_dict:
rhel7-atomic: registry.access.redhat.com/rhel7-atomic:latest
rhel7: registry.access.redhat.com/rhel7:latest
when: '(''imagestreams/'' ~ item.key) not in oc_get_is.stdout_lines'
- name: Import additional Red Hat image streams (additional tags)
command: oc tag -n openshift {{ item.value }} {{ item.key }} --scheduled
with_dict:
'rhel7-atomic:7.4': registry.access.redhat.com/rhel7-atomic:7.4
'rhel7:7.4': registry.access.redhat.com/rhel7:7.4
- name: Delete the temporary directory
file:
path: '{{ tempfile.path }}'
state: absent

8
roles/sso/defaults/main.yml

@ -1,8 +0,0 @@
---
itix_sso_template: sso71-allinone
itix_sso_project: sso
itix_sso_realm: itix
itix_sso_application_name: sso
itix_sso_retries: 30
itix_sso_delay: 5

64
roles/sso/tasks/main.yml

@ -1,64 +0,0 @@
---
- name: Get a list of existing projects
command: oc get projects -o name
register: oc_get_projects
changed_when: false
- name: Create a new project for SSO
command: oc new-project "{{ itix_sso_project }}"
when: '"projects/" ~ itix_sso_project not in oc_get_projects.stdout_lines'
- name: Query existing deploymentconfigs
command: oc get dc -n "{{ itix_sso_project }}" -o name -l "application={{ itix_sso_application_name }}"
register: oc_get_dc
changed_when: false
- name: Deploy app if needed
set_fact:
deploy_needed: "{{ 'deploymentconfigs/' ~ itix_sso_application_name not in oc_get_dc.stdout_lines }}"
- name: Process the OpenShift Template and create the OpenShift objects
command: oc new-app -n {{ itix_sso_project }} {{ itix_sso_template }} -p "SSO_HOSTNAME={{ itix_sso_hostname }}" -p "APPLICATION_NAME={{ itix_sso_application_name }}"
when: deploy_needed
- name: Wait for OpenShift to create all objects
pause:
seconds: '{{ itix_sso_delay }}'
when: deploy_needed
- include: common/wait_for.yml
static: no
vars:
pod_to_wait:
- sso
- sso-postgresql
delay: "{{ itix_sso_delay }}"
retries: "{{ itix_sso_retries }}"
project: "{{ itix_sso_project }}"
tags: status
- name: Get Admin Username
command: oc get dc {{ itix_sso_application_name }} -n "{{ itix_sso_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="SSO_ADMIN_USERNAME")].value}'
register: username
changed_when: false
tags: status
- name: Get Admin Password
command: oc get dc {{ itix_sso_application_name }} -n "{{ itix_sso_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="SSO_ADMIN_PASSWORD")].value}'
register: password
changed_when: false
tags: status
- name: Get Route URL
command: oc get route {{ itix_sso_application_name }} -n "{{ itix_sso_project }}" -o 'jsonpath={.spec.host}'
register: route
changed_when: false
tags: status
- set_fact:
sso_route_name: '{{ route.stdout }}'
- name: SSO is ready !
debug: msg="Login on https://{{ sso_route_name }}/auth/admin with username = '{{ username.stdout }}' and password = '{{ password.stdout }}'"
tags: status
Loading…
Cancel
Save