This commit is contained in:
Eric Meehan 2024-07-19 22:06:03 -04:00
parent abd86e10e6
commit 76f0489016
54 changed files with 998 additions and 438 deletions

View File

@ -2,5 +2,42 @@
- name: Deploy eom.dev one service at a time
hosts: alpha-control-plane
become: true
pre_tasks:
- name: Create issuer for letsencrypt staging
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
email: eric@eom.dev
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-staging-issuer-account-key
solvers:
- http01:
ingress:
ingressClassName: nginx
- name: Create issuer for letsencrypt production
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
email: eric@eom.dev
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-production-issuer-account-key
solvers:
- http01:
ingress:
ingressClassName: nginx
roles:
- role: ericomeehan.eom.dev

View File

@ -2,9 +2,4 @@
hosts: alpha-control-plane
become: true
roles:
- role: ericomeehan.gondwanamc.com
vars:
target_namespace: testing
nodePorts:
gondwanamc-24454: 30000
gondwanamc-25565: 30001
- role: ericomeehan.gondwanamc

View File

@ -8,8 +8,8 @@ all:
control_plane:
hosts:
alpha-control-plane:
ansible-host: 192.168.1.121
ansible-host: 192.168.1.132
workers:
hosts:
alpha-worker-0:
ansible-host: 192.168.1.122
ansible-host: 192.168.1.130

View File

@ -1,104 +1,15 @@
---
# tasks file for ericomeehan.cluster-gateway
- name: Create persistent volume for gateway
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolume
metadata:
name: gateway-pv
spec:
capacity:
storage: {{ gateway_storage }}
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /mnt/data/pv.gateway.eom.dev
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- alpha-worker-0
- name: Download latest Debian netinstall ISO
get_url:
url: {{ debian_image_url }}
dest: /data/debian-netinst.iso
- name: Create persistent volume claim for gateway
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim.gateway.eom.dev
namespace: {{ namespace }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ gateway_storage }}
storageClassName: standard
volumeName: pv.gateway.eom.dev
- name: Create virtual machine disk image
command: qemu-img create -f qcow2 /data/store-0/cluster-gateway.qcow2 4G
- name: Create a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment.gateway.eom.dev
namespace: {{ namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
template:
metadata:
labels:
app: eom.dev
spec:
containers:
- name: gateway
image: gateway
volumeMounts:
- name: pv.gateway.eom.dev
mountPath: /usr/src/gateway/files
ports:
- containerPort: 3000
env:
- name: REDMINE_DB_MYSQL
value: "mariadb"
- name: REDMINE_DB_PORT
value: "8086"
- name: REDMINE_DB_DATABASE
value: "gateway"
- name: REDMINE_DB_USERNAME
value: "gateway"
- name: REDMINE_DB_PASSWORD
value: "{{ gateway_db_password }}"
volumes:
- name: pv.gateway.eom.dev
persistentVolumeClaim:
claimName: pv-claim.gateway.eom.dev
- name: Install Debian on the VM
command: virt-install --name cluster-gateway --ram 1024 --vcpus 1 --os-type linux --os-variant debian12 --disk /data/store-0/cluster-gateway.qcow2 --network bridge:virbr0 --graphics none --console pty,target_type=serial --extra-args 'console=ttyS0,115200n8 serial' --cdrom /data/debian-netinst.iso
- name: Expose deployment as a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: service.gateway.eom.dev
namespace: {{ namespace }}
spec:
selector:
app: eom.dev
ports:
- port: 3000
protocol: TCP
name: gateway-port-3000
type: ClusterIP
- name: Start the cluster gateway
command: virsh autostart cluster-gateway

View File

@ -1,11 +1,15 @@
---
# defaults file for ericomeehan.org-suite
reverse_proxy_production_storage: 2Gi
mariadb_production_storage: 64Gi
gitea_production_storage: 64Gi
mediawiki_production_storage: 256Gi
nextcloud_production_storage: 256Gi
redmine_production_storage: 64Gi
target_namespace: eom
gitea_storage: 64Gi
grafana_storage: 8Gi
influxdb_storage: 8Gi
mariadb_storage: 64Gi
mediawiki_storage: 256Gi
nextcloud_storage: 256Gi
redmine_storage: 64Gi
redis_storage: 32Gi
# TODO: use ansible vault
mariadb_root_password: 123abc

View File

@ -141,7 +141,7 @@ SSLEngine on
# Some ECC cipher suites (http://www.ietf.org/rfc/rfc4492.txt)
# require an ECC certificate which can also be configured in
# parallel.
SSLCertificateFile "/usr/local/apache2/conf/server.crt"
SSLCertificateFile "/usr/local/apache2/conf/ssl/tls.crt"
#SSLCertificateFile "/usr/local/apache2/conf/server-dsa.crt"
#SSLCertificateFile "/usr/local/apache2/conf/server-ecc.crt"
@ -151,7 +151,7 @@ SSLCertificateFile "/usr/local/apache2/conf/server.crt"
# you've both a RSA and a DSA private key you can configure
# both in parallel (to also allow the use of DSA ciphers, etc.)
# ECC keys, when in use, can also be configured in parallel
SSLCertificateKeyFile "/usr/local/apache2/conf/server.key"
SSLCertificateKeyFile "/usr/local/apache2/conf/ssl/tls.key"
#SSLCertificateKeyFile "/usr/local/apache2/conf/server-dsa.key"
#SSLCertificateKeyFile "/usr/local/apache2/conf/server-ecc.key"

View File

@ -562,8 +562,8 @@ SSLRandomSeed connect builtin
Allow from all
</Proxy>
ProxyPass / http://services.gitea.eom.dev.production.svc.cluster.local/
ProxyPassReverse / http://services.gitea.eom.dev.production.svc.cluster.local/
ProxyPass / http://services.gitea.eom.production.svc.cluster.local/
ProxyPassReverse / http://services.gitea.eom.production.svc.cluster.local/
</VirtualHost>
<VirtualHost *:443>
@ -579,8 +579,8 @@ SSLRandomSeed connect builtin
Allow from all
</Proxy>
ProxyPass / http://services.mediawiki.eom.dev.production.svc.cluster.local/
ProxyPassReverse / http://services.mediawiki.eom.dev.production.svc.cluster.local/
ProxyPass / http://services.mediawiki.eom.production.svc.cluster.local/
ProxyPassReverse / http://services.mediawiki.eom.production.svc.cluster.local/
</VirtualHost>
<VirtualHost *:443>
@ -596,8 +596,8 @@ SSLRandomSeed connect builtin
Allow from all
</Proxy>
ProxyPass / http://services.nextcloud.eom.dev.production.svc.cluster.local/
ProxyPassReverse / http://services.nextcloud.eom.dev.production.svc.cluster.local/
ProxyPass / http://services.nextcloud.eom.production.svc.cluster.local/
ProxyPassReverse / http://services.nextcloud.eom.production.svc.cluster.local/
</VirtualHost>
<VirtualHost *:443>
@ -613,6 +613,6 @@ SSLRandomSeed connect builtin
Allow from all
</Proxy>
ProxyPass / http://services.redmine.eom.dev.production.svc.cluster.local/
ProxyPassReverse / http://services.redmine.eom.dev.production.svc.cluster.local/
ProxyPass / http://services.redmine.eom.production.svc.cluster.local/
ProxyPassReverse / http://services.redmine.eom.production.svc.cluster.local/
</VirtualHost>

View File

@ -8,16 +8,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-gitea-eom-dev
name: gitea
spec:
capacity:
storage: "{{ gitea_production_storage }}"
storage: "{{ gitea_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-gitea-eom-dev-production
path: "/data/store-0/eom/gitea"
nodeAffinity:
required:
nodeSelectorTerms:
@ -34,16 +34,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-gitea-eom-dev-production
namespace: production
name: gitea
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ gitea_production_storage }}"
storage: "{{ gitea_storage }}"
storageClassName: standard
volumeName: pv-gitea-eom-dev-production
volumeName: gitea
- name: Create a deployment
k8s:
@ -51,23 +51,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-gitea-eom-dev-production
namespace: production
name: gitea
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: gitea
template:
metadata:
labels:
app: eom.dev
app: gitea
spec:
containers:
- name: gitea
image: gitea/gitea
volumeMounts:
- name: pv-gitea-eom-dev-production
- name: gitea
mountPath: /var/lib/gitea
ports:
- containerPort: 2222
@ -76,7 +76,7 @@
- name: GITEA__database__DB_TYPE
value: "mysql"
- name: GITEA__database__HOST
value: "service.mariadb.eom.dev.production.svc.cluster.local:3306"
value: "mariadb.eom.svc.cluster.local:3306"
- name: GITEA__database__NAME
value: "gitea"
- name: GITEA__database__USER
@ -84,9 +84,9 @@
- name: GITEA__database__PASSWD
value: "{{ gitea_mariadb_password }}"
volumes:
- name: pv-gitea-eom-dev-production
- name: gitea
persistentVolumeClaim:
claimName: pv-claim.redis.eom.dev
claimName: gitea
- name: Expose deployment as a service
k8s:
@ -94,11 +94,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-gitea-eom-dev-production
namespace: production
name: gitea
namespace: eom
spec:
selector:
app: eom.dev
app: gitea
ports:
- port: 22
targetPort: 2222

View File

@ -7,16 +7,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-grafana-eom-dev-production
name: grafana
spec:
capacity:
storage: {{ grafana_production_storage }}
storage: "{{ grafana_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/vault-0/pv-grafana-eom-dev-production
path: "/data/store-0/eom/grafana"
nodeAffinity:
required:
nodeSelectorTerms:
@ -33,16 +33,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-grafana-eom-dev-production
namespace: production
name: grafana
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ grafana_production_storage }}
storage: "{{ grafana_storage }}"
storageClassName: standard
volumeName: pv-grafana-eom-dev-production
volumeName: grafana
- name: Create a deployment
k8s:
@ -50,30 +50,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-grafana-eom-dev-production
namespace: production
name: grafana
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: grafana
template:
metadata:
labels:
app: eom.dev
app: grafana
spec:
securityContext:
runAsUser: 472
fsGroup: 472
runAsGroup: 472
containers:
- name: grafana
image: grafana/grafana
volumeMounts:
- name: pv-grafana-eom-dev-production
- name: grafana
mountPath: /var/lib/grafana
ports:
- containerPort: 30000
- containerPort: 3000
volumes:
- name: pv-grafana-eom-dev-production
- name: grafana
persistentVolumeClaim:
claimName: pv-claim-grafana-eom-dev-production
claimName: grafana
- name: Expose deployment as a service
k8s:
@ -81,11 +85,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-grafana-eom-dev-production
namespace: production
name: grafana
namespace: eom
spec:
selector:
app: eom.dev
app: grafana
ports:
- port: 80
targetPort: 3000

View File

@ -7,16 +7,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-influxdb-eom-dev
name: influxdb
spec:
capacity:
storage: {{ influxdb_production_storage }}
storage: "{{ influxdb_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/vault-0/pv-influxdb-eom-dev
path: "/data/vault-0/eom/influxdb"
nodeAffinity:
required:
nodeSelectorTerms:
@ -33,16 +33,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-influxdb-eom-dev
namespace: production
name: influxdb
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ influxdb_production_storage }}
storage: "{{ influxdb_storage }}"
storageClassName: standard
volumeName: pv-influxdb-eom-dev
volumeName: influxdb
- name: Create a deployment
k8s:
@ -50,42 +50,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-influxdb-eom-dev
namespace: production
name: influxdb
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: influxdb
template:
metadata:
labels:
app: eom.dev
app: influxdb
spec:
containers:
- name: influxdb
image: influxdb
volumeMounts:
- name: pv-influxdb-eom-dev
- name: influxdb
mountPath: /var/lib/influxdb2
ports:
containerPort: 8086
env:
# TODO: are these defined?
- name: DOCKER_INFLUXDB_INIT_MODE
value: "setup"
- name: DOCKER_INFLUXDB_INIT_USERNAME
value: "{{ influxdb_init_username }}"
- name: DOCKER_INFLUXDB_INIT_PASSWORD
value: "{{ influxdb_init_password }}"
- name: DOCKER_INFLUXDB_INIT_ORG
value: "{{ influxdb_init_org }}"
- name: DOCKER_INFLUXDB_INIT_BUCKET
value: "{{ influxdb_init_bucket }}"
- containerPort: 8086
volumes:
- name: pv-influxdb-eom-dev
- name: influxdb
persistentVolumeClaim:
claimName: pv-claim-influxdb-eom-dev
claimName: influxdb
- name: Expose deployment as a service
k8s:
@ -93,11 +81,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-influxdb-eom-dev
namespace: production
name: influxdb
namespace: eom
spec:
selector:
app: eom.dev
app: influxdb
ports:
- port: 8086
protocol: TCP

View File

@ -5,8 +5,8 @@
state: present
api_version: v1
kind: ConfigMap
name: cm-mariadb-eom-dev-production
namespace: production
name: mariadb
namespace: eom
definition:
data:
schema.sql: "{{ lookup('template', 'schema.sql.j2') }}"
@ -18,16 +18,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-mariadb-eom-dev-production
name: mariadb
spec:
capacity:
storage: "{{ mariadb_production_storage }}"
storage: "{{ mariadb_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-mariadb-eom-dev-production
path: "/data/store-0/eom/mariadb"
nodeAffinity:
required:
nodeSelectorTerms:
@ -44,16 +44,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-mariadb-eom-dev-production
namespace: production
name: mariadb
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ mariadb_production_storage }}"
storage: "{{ mariadb_storage }}"
storageClassName: standard
volumeName: pv-mariadb-eom-dev-production
volumeName: mariadb
- name: Create a deployment
k8s:
@ -61,38 +61,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-mariadb-eom-dev-production
namespace: production
name: mariadb
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: mariadb
template:
metadata:
labels:
app: eom.dev
app: mariadb
spec:
containers:
- name: mariadb
image: mariadb
volumeMounts:
- name: pv-mariadb-eom-dev-production
- name: mariadb
mountPath: /var/lib/mysql
- name: schema-sql
mountPath: /docker-entrypoint-initdb.d
ports:
- containerPort: 8086
- containerPort: 3306
env:
- name: MARIADB_ROOT_PASSWORD
value: "{{ mariadb_root_password }}"
volumes:
- name: schema-sql
configMap:
name: cm-mariadb-eom-dev-production
- name: pv-mariadb-eom-dev-production
name: mariadb
- name: mariadb
persistentVolumeClaim:
claimName: pvc-mariadb-eom-dev-production
claimName: mariadb
- name: Expose deployment as a service
k8s:
@ -100,14 +100,13 @@
apiVersion: v1
kind: Service
metadata:
name: service-mariadb-eom-dev-production
namespace: production
name: mariadb
namespace: eom
spec:
selector:
app: eom.dev
app: mariadb
ports:
- port: 3306
targetPort: 8086
protocol: TCP
name: mariadb-port-8086
name: mariadb-port-3306
type: ClusterIP

View File

@ -8,16 +8,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-mediawiki-eom-dev
name: mediawiki
spec:
capacity:
storage: "{{ mediawiki_production_storage }}"
storage: "{{ mediawiki_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-mediawiki-eom-dev
path: "/data/store-0/eom/mediawiki"
nodeAffinity:
required:
nodeSelectorTerms:
@ -36,16 +36,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-mediawiki-eom-dev
namespace: production
name: mediawiki
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ mediawiki_production_storage }}"
storage: "{{ mediawiki_storage }}"
storageClassName: standard
volumeName: pv-mediawiki-eom-dev
volumeName: mediawiki
- name: Create a deployment
k8s:
@ -53,30 +53,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-mediawiki-eom-dev
namespace: production
name: mediawiki
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: mediawiki
template:
metadata:
labels:
app: eom.dev
app: mediawiki
spec:
containers:
- name: mediawiki
image: mediawiki
volumeMounts:
- name: pv-mediawiki-eom-dev
- name: mediawiki
mountPath: /var/www/html/images
ports:
- containerPort: 80
volumes:
- name: pv-mediawiki-eom-dev
- name: mediawiki
persistentVolumeClaim:
claimName: pvc-mediawiki-eom-dev
claimName: mediawiki
- name: Expose deployment as a service
k8s:
@ -84,11 +84,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-mediawiki-eom-dev
namespace: production
name: mediawiki
namespace: eom
spec:
selector:
app: eom.dev
app: mediawiki
ports:
- port: 80
protocol: TCP

View File

@ -7,16 +7,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nextcloud-pv
name: nextcloud
spec:
capacity:
storage: "{{ nextcloud_production_storage }}"
storage: "{{ nextcloud_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-nextcloud-eom-dev
path: "/data/store-0/eom/nextcloud"
nodeAffinity:
required:
nodeSelectorTerms:
@ -33,16 +33,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim-nextcloud-eom-dev
namespace: production
name: nextcloud
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ nextcloud_production_storage }}"
storage: "{{ nextcloud_storage }}"
storageClassName: standard
volumeName: openldap-db-pv
volumeName: nextcloud
- name: Create a deployment
k8s:
@ -50,29 +50,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-nextcloud-eom-dev
namespace: production
name: nextcloud
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: nextcloud
template:
metadata:
labels:
app: eom.dev
app: nextcloud
spec:
containers:
- name: nextcloud
image: nextcloud
volumeMounts:
- name: pv-nextcloud-eom-dev
- name: nextcloud
mountPath: /var/www/html
ports:
- containerPort: 80
env:
- name: MYSQL_HOST
value: "service.mariadb.eom.dev.production.svc.cluster.local"
value: "mariadb.eom.svc.cluster.local"
- name: MYSQL_DATABASE
value: "nextcloud"
- name: MYSQL_USER
@ -80,9 +80,9 @@
- name: MYSQL_PASSWORD
value: "{{ nextcloud_mariadb_password }}"
volumes:
- name: pv-nextcloud-eom-dev
- name: nextcloud
persistentVolumeClaim:
claimName: pv-claim-nextcloud-eom-dev
claimName: nextcloud
- name: Expose deployment as a service
k8s:
@ -90,11 +90,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-nextcloud-eom-dev
namespace: production
name: nextcloud
namespace: eom
spec:
selector:
app: eom.dev
app: nextcloud
ports:
- port: 80
protocol: TCP

View File

@ -1,94 +0,0 @@
---
# tasks file for deploy-postfix.yml
- name: Create persistent volume for postfix
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolume
metadata:
name: postfix-pv
spec:
capacity:
storage: {{ postfix_storage }}
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/vault-0/pv.postfix.eom.dev
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- alpha-worker-0
- name: Create persistent volume claim for postfix
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim.postfix.eom.dev
namespace: {{ namespace }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ postfix_storage }}
storageClassName: standard
volumeName: pv.postfix.eom.dev
- name: Create a deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment.postfix.eom.dev
namespace: {{ namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
template:
metadata:
labels:
app: eom.dev
spec:
containers:
- name: postfix
image: catatnight/postfix
volumeMounts:
- name: pv.postfix.eom.dev
# TODO: Where is postfix data stored?
mountPath: /usr/src/postfix/files
ports:
- containerPort: 587
volumes:
- name: pv.postfix.eom.dev
persistentVolumeClaim:
claimName: pv-claim.postfix.eom.dev
- name: Expose deployment as a service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: service.postfix.eom.dev
namespace: {{ namespace }}
spec:
selector:
app: eom.dev
ports:
- port: 587
protocol: TCP
name: postfix-port-587
type: ClusterIP

View File

@ -1,19 +1,39 @@
---
# tasks file for deploy-reverse-proxy.yml
- name: Create Cert-Manager Certificate
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: proxy
namespace: eom
spec:
secretName: proxy
issuerRef:
name: letsencrypt
kind: Issuer
group: cert-manager.io
commonName: eom.dev
dnsNames:
- gitea.eom.dev
- mediawiki.eom.dev
- nextcloud.eom.dev
- redmine.eom.dev
- name: Create ConfigMap for httpd
k8s:
state: present
api_version: v1
kind: ConfigMap
name: cm-reverse-proxy-eom-dev-production
namespace: production
name: proxy
namespace: eom
definition:
data:
httpd.conf: "{{ lookup('file', 'httpd.conf') }}"
httpd-ssl.conf: "{{ lookup('file', 'httpd-ssl.conf') }}"
mime.types: "{{ lookup('file', 'mime.types') }}"
server.crt: "{{ lookup('file', 'server.crt') }}"
server.key: "{{ lookup('file', 'server.key') }}"
- name: Create a deployment
k8s:
@ -21,30 +41,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-reverse-proxy-eom-dev-production
namespace: production
name: proxy
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: proxy
template:
metadata:
labels:
app: eom.dev
app: proxy
spec:
containers:
- name: reverse-proxy
- name: proxy
image: httpd
volumeMounts:
- name: httpd-conf
mountPath: /usr/local/apache2/conf
- name: letsencrypt
mountPath: /usr/local/apache2/conf/ssl
ports:
- containerPort: 443
volumes:
- name: httpd-conf
configMap:
name: cm-reverse-proxy-eom-dev-production
name: proxy
- name: letsencrypt
secret:
secretName: proxy
- name: Expose deployment as a service
k8s:
@ -52,11 +77,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-reverse-proxy-eom-dev-production
namespace: production
name: proxy
namespace: eom
spec:
selector:
app: eom.dev
app: proxy
ports:
- port: 443
protocol: TCP

View File

@ -7,16 +7,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-redis-eom-dev
name: redis
spec:
capacity:
storage: "{{ redis_production_storage }}"
storage: "{{ redis_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-redis-eom-dev
path: "/data/store-0/eom/redis"
nodeAffinity:
required:
nodeSelectorTerms:
@ -33,16 +33,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-redis-eom-dev
namespace: production
name: redis
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ redis_production_storage }}"
storage: "{{ redis_storage }}"
storageClassName: standard
volumeName: pv-redis-eom-dev
volumeName: redis
- name: Create a deployment
k8s:
@ -50,30 +50,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-redis-eom-dev
namespace: production
name: redis
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: redis
template:
metadata:
labels:
app: eom.dev
app: redis
spec:
containers:
- name: redis
image: redis
volumeMounts:
- name: pv-redis-eom-dev
- name: redis
mountPath: /data
ports:
- containerPort: 6379
volumes:
- name: redis.pv.eom.dev
- name: redis
persistentVolumeClaim:
claimName: pvc-redis-eom-dev
claimName: redis
- name: Expose deployment as a service
k8s:
@ -81,11 +81,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-redis-eom-dev
namespace: production
name: redis
namespace: eom
spec:
selector:
app: eom.dev
app: redis
ports:
- port: 6379
protocol: TCP

View File

@ -7,16 +7,16 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-redmine-eom-dev
name: redmine
spec:
capacity:
storage: "{{ redmine_production_storage }}"
storage: "{{ redmine_storage }}"
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: standard
hostPath:
path: /data/store-0/pv-redmine-eom-dev
path: "/data/store-0/eom/redmine"
nodeAffinity:
required:
nodeSelectorTerms:
@ -33,16 +33,16 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-redmine-eom-dev
namespace: production
name: redmine
namespace: eom
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "{{ redmine_production_storage }}"
storage: "{{ redmine_storage }}"
storageClassName: standard
volumeName: pv-redmine-eom-dev
volumeName: redmine
- name: Create a deployment
k8s:
@ -50,29 +50,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment-redmine-eom-dev
namespace: production
name: redmine
namespace: eom
spec:
replicas: 1
selector:
matchLabels:
app: eom.dev
app: redmine
template:
metadata:
labels:
app: eom.dev
app: redmine
spec:
containers:
- name: redmine
image: redmine
volumeMounts:
- name: pv-redmine-eom-dev
- name: redmine
mountPath: /usr/src/redmine/files
ports:
- containerPort: 3000
env:
- name: REDMINE_DB_MYSQL
value: "service.mariadb.eom.dev.production.svc.cluster.local"
value: "mariadb.eom.svc.cluster.local"
- name: REDMINE_DB_PORT
value: "3306"
- name: REDMINE_DB_DATABASE
@ -82,9 +82,9 @@
- name: REDMINE_DB_PASSWORD
value: "{{ redmine_mariadb_password }}"
volumes:
- name: pv-redmine-eom-dev
- name: redmine
persistentVolumeClaim:
claimName: pvc-redmine-eom-dev
claimName: redmine
- name: Expose deployment as a service
k8s:
@ -92,11 +92,11 @@
apiVersion: v1
kind: Service
metadata:
name: service-redmine-eom-dev
namespace: production
name: redmine
namespace: eom
spec:
selector:
app: eom.dev
app: redmine
ports:
- port: 80
targetPort: 3000

View File

@ -1,7 +1,14 @@
---
# tasks file for ericomeehan.org-suite
- name: Deploy reverse-proxy
include_tasks: deploy-reverse-proxy.yml
- name: Create eom namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: eom
- name: Deploy mariadb
include_tasks: deploy-mariadb.yml
- name: Deploy gitea
@ -12,3 +19,80 @@
include_tasks: deploy-nextcloud.yml
- name: Deploy redmine
include_tasks: deploy-redmine.yml
- name: Deploy influxdb
include_tasks: deploy-influxdb.yml
- name: Deploy grafana
include_tasks: deploy-grafana.yml
- name: Create ingress resource
k8s:
state: present
definition:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
name: eom-ingress
namespace: eom
spec:
ingressClassName: nginx
rules:
- host: gitea.eom.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea
port:
number: 80
- host: grafana.eom.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grafana
port:
number: 80
- host: mediawiki.eom.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: mediawiki
port:
number: 80
- host: nextcloud.eom.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nextcloud
port:
number: 80
- host: redmine.eom.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: redmine
port:
number: 80
tls:
- hosts:
- gitea.eom.dev
- grafana.eom.dev
- influxdb.eom.dev
- mediawiki.eom.dev
- redmine.eom.dev
secretName: eom-certs

View File

@ -1,10 +1,10 @@
CREATE USER 'gitea'@'%' IDENTIFIED BY '{{ gitea_mariadb_password }}';
CREATE DATABASE IF NOT EXISTS gitea
CREATE DATABASE IF NOT EXISTS gitea;
GRANT ALL PRIVILEGES on gitea.* to 'gitea'@'%';
FLUSH privileges;
CREATE USER 'mediawiki'@'%' IDENTIFIED BY '{{ mediawiki_mariadb_password }}';
CREATE DATABASE IF NOT EXISTS mediawiki
CREATE DATABASE IF NOT EXISTS mediawiki;
GRANT ALL PRIVILEGES on mediawiki.* to 'mediawiki'@'%';
FLUSH privileges;
@ -14,6 +14,6 @@ GRANT ALL PRIVILEGES on nextcloud.* to 'nextcloud'@'%';
FLUSH privileges;
CREATE USER 'redmine'@'%' IDENTIFIED BY '{{ redmine_mariadb_password }}';
CREATE DATABASE IF NOT EXISTS redmine
CREATE DATABASE IF NOT EXISTS redmine;
GRANT ALL PRIVILEGES on redmine.* to 'redmine'@'%';
FLUSH privileges;

View File

@ -3,6 +3,7 @@
- name: Install additional user packages
apt:
name:
- curl
- git
- gimp
- gpsd
@ -11,6 +12,7 @@
- openscad
- passwordsafe
- tmux
- w3m
state: present
- name: Append tmux text to zshrc
@ -40,3 +42,4 @@
src: init.lua
dest: /home/eric/.config/nvim/init.lua
# TODO: ansible-galaxy collection install community.kubernetes

View File

@ -1,9 +0,0 @@
---
# tasks file for ericomeehan.gondwanamc
- name: Deploy to testing
include_tasks: deploy-testing.yml
when: target_namespace == "testing" or target_namespace == "both"
- name: Deploy to production
include_tasks: deploy-production.yml
when: target_namespace == "production" or target_namespace == "both"

View File

@ -0,0 +1,109 @@
---
# tasks file for ericomeehan.gondwanamc
# TODO: Create configmaps for configurations
- name: Create gondwanamc namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: gondwanamc
- name: Create persistent volume claim for gondwanamc volume
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gondwanamc
namespace: gondwanamc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: alpha-0-store-0
volumeName: gondwanamc
- name: Manually copy world data to pvc
pause:
prompt: Press enter once world data has been manually copied
- name: Create config map for server.properties
k8s:
state: present
api_version: v1
kind: ConfigMap
name: properties
namespace: gondwanamc
definition:
data:
server.properties: "{{ lookup('file', 'server.properties') }}"
whitelist.json: "{{ lookup('file', 'whitelist.json') }}"
- name: Create a Deployment
k8s:
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: gondwanamc
namespace: gondwanamc
spec:
replicas: 1
selector:
matchLabels:
app: gondwanamc
template:
metadata:
labels:
app: gondwanamc
spec:
containers:
- name: minecraft
image: itzg/minecraft-server
volumeMounts:
- name: properties
mountPath: /data
- name: gondwanamc
mountPath: /data/Gondwana
ports:
- containerPort: 25565
- containerPort: 24454
env:
- name: EULA
value: "TRUE"
- name: TYPE
value: "FABRIC"
- name: MODS
value: "https://download.geysermc.org/v2/projects/geyser/versions/latest/builds/latest/downloads/fabric,https://cdn.modrinth.com/data/bWrNNfkb/versions/D4KXqjtC/Floodgate-Fabric-2.2.3-SNAPSHOT%2Bbuild.28.jar,https://www.curseforge.com/api/v1/mods/306612/files/5510851/download,https://www.curseforge.com/api/v1/mods/416089/files/5500955/download"
volumes:
- name: gondwanamc
persistentVolumeClaim:
claimName: gondwanamc
- name: properties
configMap:
name: properties
- name: Expose Deployment as a Service
k8s:
definition:
apiVersion: v1
kind: Service
metadata:
name: gondwanamc
namespace: gondwanamc
spec:
selector:
app: gondwanamc
ports:
- port: 24454
protocol: TCP
name: gondwanamc-port-24454
- port: 25565
protocol: TCP
name: gondwanamc-port-25565
type: NodePort

View File

@ -0,0 +1,38 @@
---
# tasks file for nvidia-proprietary
- name: Add contrib & non-free repository
replace:
dest: /etc/apt/sources.list
regexp: '^(deb(?!.* contrib).*)'
replace: '\1 contrib non-free'
- name: Update apt
become: yes
apt:
update_cache: yes
- name: Install Linux headers
apt:
name: linux-headers-{{ ansible_kernel }}
state: present
- name: Install Nvidia and CUDA drivers
apt:
state: present
name:
- nvidia-driver
- firmware-misc-nonfree
- nvidia-cuda-dev
- nvidia-cuda-toolkit
register: install_driver
- name: Install tesla drivers
apt:
state: present
name:
- nvidia-tesla-470-driver
when: nvidia_driver_tesla == true
- name: Reboot after driver install
reboot:
when: install_driver.changed and not nvidia_driver_skip_reboot

View File

@ -6,32 +6,55 @@
regexp: '^(deb(?!.* contrib).*)'
replace: '\1 contrib non-free'
- name: Update apt
become: yes
apt:
update_cache: yes
- name: Install Linux headers
apt:
name: linux-headers-{{ ansible_kernel }}
state: present
- name: Install Nvidia and CUDA drivers
apt:
state: present
name:
- nvidia-driver
- firmware-misc-nonfree
- nvidia-cuda-dev
- nvidia-cuda-toolkit
register: install_driver
- name: Download Nvidia driver local repo
get_url:
url: https://us.download.nvidia.com/tesla/550.90.07/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
dest: /tmp/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
- name: Install tesla drivers
- name: Install Nvidia driver local repo
apt:
deb: /tmp/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
state: present
- name: Add Nvidia driver local repo keyring
copy:
remote_src: true
src: /var/nvidia-driver-local-repo-debian12-550.90.07/nvidia-driver-local-3FEEC8FF-keyring.gpg
dest: /usr/share/keyrings/nvidia-driver-local-3FEEC8FF-keyring.gpg
- name: Download CUDA repo
get_url:
url: https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
dest: /tmp/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
- name: Install CUDA repo
apt:
deb: /tmp/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
state: present
- name: Add CUDA repo keyring
copy:
remote_src: true
src: /var/cuda-repo-debian12-12-4-local/cuda-C5AA6424-keyring.gpg
dest: /usr/share/keyrings/cuda-C5AA6424-keyring.gpg
- name: Update package list
apt:
update_cache: yes
- name: Install Nvidia driver and CUDA toolkit
apt:
name:
- nvidia-tesla-470-driver
when: nvidia_driver_tesla == true
- firmware-misc-nonfree
- cuda-toolkit-12-4
- nvidia-driver=550.90.07-1
state: present
register: install_driver
- name: Reboot after driver install
reboot:

View File

@ -0,0 +1,3 @@
skip_list:
- 'yaml'
- 'role-name'

View File

@ -0,0 +1,4 @@
# These are supported funding model platforms
---
github: geerlingguy
patreon: geerlingguy

View File

@ -0,0 +1,57 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 90
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 30
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
onlyLabels: []
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- bug
- pinned
- security
- planned
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Set to true to ignore issues with an assignee (defaults to false)
exemptAssignees: false
# Label to use when marking as stale
staleLabel: stale
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
pulls:
markComment: |-
This pull request has been marked 'stale' due to lack of recent activity. If there is no further activity, the PR will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark pull requests as stale.
unmarkComment: >-
This pull request is no longer marked for closure.
closeComment: >-
This pull request has been closed due to inactivity. If you feel this is in error, please reopen the pull request or file a new PR with the relevant details.
issues:
markComment: |-
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
unmarkComment: >-
This issue is no longer marked for closure.
closeComment: >-
This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.

View File

@ -0,0 +1,67 @@
---
name: CI
'on':
pull_request:
push:
branches:
- master
schedule:
- cron: "30 4 * * 2"
defaults:
run:
working-directory: 'geerlingguy.helm'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v2
with:
path: 'geerlingguy.helm'
- name: Set up Python 3.
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install yamllint
- name: Lint code.
run: |
yamllint .
molecule:
name: Molecule
runs-on: ubuntu-latest
strategy:
matrix:
distro:
- centos8
- ubuntu2004
- debian10
steps:
- name: Check out the codebase.
uses: actions/checkout@v2
with:
path: 'geerlingguy.helm'
- name: Set up Python 3.
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install ansible molecule[docker] docker
- name: Run Molecule tests.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
MOLECULE_DISTRO: ${{ matrix.distro }}

View File

@ -0,0 +1,38 @@
---
# This workflow requires a GALAXY_API_KEY secret present in the GitHub
# repository or organization.
#
# See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
# See: https://github.com/ansible/galaxy/issues/46
name: Release
'on':
push:
tags:
- '*'
defaults:
run:
working-directory: 'geerlingguy.helm'
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v2
with:
path: 'geerlingguy.helm'
- name: Set up Python 3.
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install Ansible.
run: pip3 install ansible-base
- name: Trigger a new import on Galaxy.
run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)

5
roles/geerlingguy.helm/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
*.retry
*/__pycache__
*.pyc
.cache

View File

@ -0,0 +1,10 @@
---
extends: default
rules:
line-length:
max: 200
level: warning
ignore: |
.github/stale.yml

View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2020 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,45 @@
# Ansible Role: Helm
[![CI](https://github.com/geerlingguy/ansible-role-helm/workflows/CI/badge.svg?event=push)](https://github.com/geerlingguy/ansible-role-helm/actions?query=workflow%3ACI)
This role installs the [Helm](https://helm.sh) binary on any supported host.
## Requirements
N/A
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
helm_version: 'v3.2.1'
helm_platform: linux
helm_arch: amd64
Controls for the version of Helm to be installed. See [available Helm releases](https://github.com/helm/helm/releases/). You can upgrade or downgrade versions by changing the `helm_version`.
helm_repo_path: "https://get.helm.sh"
The path to the main Helm repo. Unlessy you need to override this for special reasons (e.g. running on servers without public Internet access), you should leave it as the default.
helm_bin_path: /usr/local/bin/helm
The location where the Helm binary will be installed.
## Dependencies
None.
## Example Playbook
- hosts: all
roles:
- role: geerlingguy.helm
## License
MIT / BSD
## Author Information
This role was created in 2020 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@ -0,0 +1,9 @@
---
# See available releases: https://github.com/helm/helm/releases/
helm_version: 'v3.2.1'
helm_platform: linux
helm_arch: amd64
helm_repo_path: "https://get.helm.sh"
helm_bin_path: /usr/local/bin/helm

View File

@ -0,0 +1,2 @@
install_date: 'Fri 19 Jul 2024 09:40:58 PM '
version: 1.0.1

View File

@ -0,0 +1,47 @@
---
dependencies: []
galaxy_info:
author: geerlingguy
role_name: helm
description: Helm for Kubernetes.
company: Midwestern Mac, LLC
license: license (MIT)
min_ansible_version: 2.7
platforms:
- name: EL
versions:
- all
- name: GenericUNIX
versions:
- all
- name: Fedora
versions:
- all
- name: opensuse
versions:
- all
- name: GenericBSD
versions:
- all
- name: FreeBSD
versions:
- all
- name: Ubuntu
versions:
- all
- name: SLES
versions:
- all
- name: GenericLinux
versions:
- all
- name: Debian
versions:
- all
galaxy_tags:
- kubernetes
- k8s
- cloud
- containers
- helm
- deployment

View File

@ -0,0 +1,13 @@
---
- name: Converge
hosts: all
become: true
pre_tasks:
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=600
when: ansible_os_family == 'Debian'
changed_when: false
roles:
- role: geerlingguy.helm

View File

@ -0,0 +1,17 @@
---
dependency:
name: galaxy
driver:
name: docker
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-centos8}-ansible:latest"
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
privileged: true
pre_build_image: true
provisioner:
name: ansible
playbooks:
converge: ${MOLECULE_PLAYBOOK:-converge.yml}

View File

@ -0,0 +1,18 @@
---
- name: Verify
hosts: all
gather_facts: false
vars_files:
- ../../defaults/main.yml
tasks:
- name: Check Helm version.
command: "{{ helm_bin_path }} version"
changed_when: false
register: helm_verify_version
- name: Verify Helm's version is {{ helm_version }}.
assert:
that:
- helm_version in helm_verify_version.stdout

View File

@ -0,0 +1,31 @@
---
- name: Check if Helm binary exists.
stat:
path: "{{ helm_bin_path }}"
register: helm_check
- name: Check Helm version.
command: "{{ helm_bin_path }} version"
failed_when: false
changed_when: false
register: helm_existing_version
- name: Download helm.
unarchive:
src: "{{ helm_repo_path }}/helm-{{ helm_version }}-{{ helm_platform }}-{{ helm_arch }}.tar.gz"
dest: /tmp
remote_src: true
mode: 0755
register: helm_download
when: >
not helm_check.stat.exists
or helm_version not in helm_existing_version.stdout
- name: Copy helm binary into place.
copy:
src: "/tmp/{{ helm_platform }}-{{ helm_arch }}/helm"
dest: "{{ helm_bin_path }}"
mode: 0755
remote_src: true
become: true
when: helm_download is changed

View File

@ -35,15 +35,13 @@
state: present
- name: Enable prometheus node exporter
service:
name: node_exporter
name: prometheus-node-exporter
state: started
enabld: true
enabled: true
roles:
- role: ericomeehan.ericomeehan
# TODO: prepare raid array during os installation
# TODO: install helm on control plane
- name: Prepare cluster environments
- name: Initialize cluster nodes
hosts: clusters
become: true
pre_tasks:
@ -69,28 +67,87 @@
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- role: geerlingguy.kubernetes
- role: geerlingguy.helm
when: kubernetes_role == 'control_plane'
- role: ericomeehan.nvidia_driver_debian
when: nvidia_driver_needed == true
- name: Prepare cluster environment
hosts: control_plane
become: true
tasks:
- name: Create production namespace
- name: Apply deploy.yaml from ingress-nginx release
k8s:
src: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.10.1/deploy/static/provider/baremetal/deploy.yaml
apply: yes
- name: Wait 10 seconds for ingress-nginx to initialize
wait_for:
timeout: 10
- name: Get the ingress-nginx-controller service ports
k8s_info:
kind: Service
name: ingress-nginx-controller
namespace: ingress-nginx
register: service_details
- name: Print ingress-nginx ports
debug:
var: service_details.resources[0].spec.ports
- name: Manually update port forwarding rules
pause:
prompt: Press enter once port forwarding rules are updated
- name: Apply cert-manager.yaml from cert-manager release
k8s:
src: https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml
apply: yes
- name: Wait 10 seconds for cert-manager to initialize
wait_for:
timeout: 10
- name: Create issuer for letsencrypt staging
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: production
when: kubernetes_role == "control_plane"
- name: Create testing namespace
name: letsencrypt-staging
spec:
acme:
email: eric@eom.dev
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
ingressClassName: nginx
- name: Create issuer for letsencrypt production
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: testing
when: kubernetes_role == "control_plane"
name: letsencrypt-production
spec:
acme:
email: eric@eom.dev
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-production
solvers:
- http01:
ingress:
ingressClassName: nginx
- name: Wait 10 seconds for letsencrypt to initialize
wait_for:
timeout: 10
- name: Deploy services
hosts: alpha-control-plane
become: true
roles:
- role: ericomeehan.eom.dev
- role: ericomeehan.gondwanamc

View File

@ -1,3 +1,3 @@
nodePorts:
gondwanamc-24454: 32454
gondwanamc-25565: 32565
gondwanamc-24454: 30000
gondwanamc-25565: 30000