Compare commits

...

24 Commits

Author SHA1 Message Date
043f4e78bf Added TES3MP 2025-12-24 13:20:58 -05:00
987a6cada4 Update submodules, adding MinIO role 2025-11-08 12:13:12 -05:00
d94e595f21 WikiDeck and eom 2025-10-29 09:23:21 -04:00
4a86fc43a0 Updating roles 2025-10-25 10:14:19 -04:00
9015a48417 PowerEdge R720 QEMU & Kubernetes Setup (#23)
* Added 5 VMs to Alpha cluster.
* Created personal dev node
2025-07-31 21:41:19 +00:00
3e8c8a3566 Submodule for Mastodon 2025-07-20 07:51:34 -04:00
8a96979dd8 Luanti and Mastodon submodules 2025-07-01 09:48:16 -04:00
6260c6ca27 Submodules 2025-06-26 12:58:18 -04:00
a9f7f25b7f Updates 2025-06-26 12:56:40 -04:00
6b6454d907 Matrix Stack
Added ansible-role-matrix-stack submodule.
2025-05-25 22:13:21 -04:00
2ff0bc1502 Nvidia Tesla T4 2025-03-08 11:59:41 -05:00
bf7954c5ae Added ansible-role-libvirt-guest submodule 2025-02-25 22:44:36 -05:00
aa2400cf13 Update gitmodules 2025-02-22 16:22:44 -05:00
b0063cc367 Raspberry Pi 2025-01-13 16:03:20 -05:00
67bcc80664 v1.0.6 2024-11-25 22:15:23 -05:00
cff19d90ad v1.0.5 2024-11-23 21:51:47 -05:00
d844bd3933 v1.0.4 2024-11-16 10:06:31 -05:00
db94484a35 v1.0.3 2024-11-14 10:40:40 -05:00
5ceba22dc8 v1.0.2 2024-11-14 10:31:30 -05:00
1c0f927278 v1.0.1 2024-11-11 15:59:09 -05:00
6e7ee42c1e v1.0.0 2024-11-11 15:46:28 -05:00
3c1ec13720 v0.1.0 2024-10-29 10:48:50 -04:00
160ffca2ee v0.0.20 2024-10-25 10:25:04 -04:00
2f3f4a8d2c v0.0.19 2024-10-10 11:37:23 -04:00
145 changed files with 2790 additions and 368 deletions

View File

@@ -0,0 +1,22 @@
name: deployLibvirtGuests
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Ansible
run: |
apt update -y
apt install python3-pip -y
python3 -m pip install ansible
- name: Run playbook
uses: dawidd6/action-ansible-playbook@v2
with:
playbook: libvirtGuests.yaml
key: ${{secrets.SSH_PRIVATE_KEY}}
vault_password: ${{secrets.VAULT_PASSWORD}}
options: |
--inventory inventories/eom.dev.yaml

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
vault
*.tar.gz
*.qcow2

51
.gitmodules vendored
View File

@@ -1,18 +1,57 @@
[submodule "roles/ericomeehan.eom"]
path = roles/ericomeehan.eom
url = https://git.eom.dev/ansible-role-eom
url = git@gitea.eom.dev:DevOps/ansible-role-eom.git
[submodule "roles/ericomeehan.debian"]
path = roles/ericomeehan.debian
url = https://git.eom.dev/ansible-role-debian
url = git@gitea.eom.dev:DevOps/ansible-role-debian.git
[submodule "roles/ericomeehan.gondwana"]
path = roles/ericomeehan.gondwana
url = https://git.eom.dev/ansible-role-gondwana
url = git@gitea.eom.dev:Gaming/ansible-role-gondwana.git
[submodule "roles/ericomeehan.ericomeehan"]
path = roles/ericomeehan.ericomeehan
url = https://git.eom.dev/ansible-role-ericomeehan
url = git@gitea.eom.dev:eric/ansible-role-ericomeehan.git
[submodule "roles/ericomeehan.nvidia_driver"]
path = roles/ericomeehan.nvidia_driver
url = https://git.eom.dev/ansible-role-nvidia-driver
url = git@gitea.eom.dev:DevOps/ansible-role-nvidia-driver.git
[submodule "roles/ericomeehan.kraken-bot"]
path = roles/ericomeehan.kraken-bot
url = https://git.eom.dev/ansible-role-kraken-bot
url = git@gitea.eom.dev:Finance/ansible-role-kraken-bot.git
[submodule "roles/ericomeehan.libvirt-guest"]
path = roles/ericomeehan.libvirt-guest
url = git@gitea.eom.dev:DevOps/ansible-role-libvirt-guest.git
[submodule "roles/ericomeehan.matrix-stack"]
path = roles/ericomeehan.matrix-stack
url = git@gitea.eom.dev:DevOps/ansible-role-matrix-stack.git
[submodule "roles/ericomeehan.luanti"]
path = roles/ericomeehan.luanti
url = git@gitea.eom.dev:Gaming/ansible-role-luanti.git
[submodule "roles/ericomeehan.mastodon"]
path = roles/ericomeehan.mastodon
url = git@gitea.eom.dev:DevOps/ansible-role-mastodon.git
[submodule "roles/ericomeehan.vintage-story"]
path = roles/ericomeehan.vintage-story
url = git@gitea.eom.dev:Gaming/ansible-role-vintage-story.git
[submodule "roles/ericomeehan.wikideck"]
path = roles/ericomeehan.wikideck
url = git@gitea.eom.dev:WikiDeck/ansible-role-wikideck.git
[submodule "roles/ericomeehan.localai"]
path = roles/ericomeehan.localai
url = git@gitea.eom.dev:DevOps/ansible-role-localai.git
[submodule "roles/ericomeehan.localagi"]
path = roles/ericomeehan.localagi
url = git@gitea.eom.dev:DevOps/ansible-role-localagi.git
[submodule "roles/ericomeehan.localrecall"]
path = roles/ericomeehan.localrecall
url = git@gitea.eom.dev:DevOps/ansible-role-localrecall.git
[submodule "roles/ericomeehan.minio"]
path = roles/ericomeehan.minio
url = git@gitea.eom.dev:DevOps/ansible-role-minio.git
[submodule "roles/ericomeehan.jamulus"]
path = roles/ericomeehan.jamulus
url = git@gitea.eom.dev:DevOps/ansible-role-jamulus.git
[submodule "roles/cleary.tidalcycles"]
path = roles/cleary.tidalcycles
url = https://github.com/cleary/ansible-tidalcycles.git
[submodule "roles/ericomeehan.tes3mp"]
path = roles/ericomeehan.tes3mp
url = git@gitea.eom.dev:Gaming/ansible-role-tes3mp.git

BIN
.localai.yaml.swp Normal file

Binary file not shown.

9
alpha-cluster.yaml Normal file
View File

@@ -0,0 +1,9 @@
---
# Playbook for alpha-cluster
- name: Deploy services to Alpha Cluster
hosts: alpha-control-plane
become: true
vars_files:
secrets.yaml
roles:
- role: ericomeehan.gondwanamc

View File

@@ -1,13 +1,38 @@
---
# Master playbook for eom.dev
- name: Initialize systems
hosts: clusters
- name: Initialize servers
hosts: servers
become: true
roles:
- role: ericomeehan.debian
vars:
is_new_host: true
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
vars:
nvidia_driver_debian_install_tesla_driver: true
when:
- nvidia_driver_needed == true
- role: ericomeehan.qemu-network
- name: Wait for virtual machines to boot
- name: Copy SSH ID to virtual machines
- name: Initialize virtual machines
hosts: vms
become: true
roles:
- role: ericomeehan.debian
vars:
is_new_host: true
- role: ericomeehan.ericomeehan
- name: Initialize cluster nodes
- name: Initialize load balancers
hosts: load_balancers
become: true
- name: Initialize Kubernetes clusters
hosts: clusters
become: true
pre_tasks:
@@ -37,11 +62,8 @@
- role: geerlingguy.containerd
- role: geerlingguy.kubernetes
- role: geerlingguy.helm
when: kubernetes_role == 'control_plane'
- role: ericomeehan.nvidia_driver
vars:
- nvidia_driver_debian_install_tesla_driver: true
when: nvidia_driver_needed == true
when:
- kubernetes_role == 'control-plane'
- name: Deploy services
hosts: alpha-control-plane
@@ -50,8 +72,4 @@
become: true
roles:
- role: ericomeehan.eom
vars:
target_namespace: prod
- role: ericomeehan.gondwana
vars:
target_namespace: prod

View File

@@ -1,8 +1,5 @@
---
# Playbook for deploying a Kubernetes cluster
- name: Create network bridge
- name: Create virtual machines
- name: Prepare cluster environments
hosts: cluster_nodes
become: true

341
discourse_deploy.yaml Normal file
View File

@@ -0,0 +1,341 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: discourse
meta.helm.sh/release-namespace: discourse
creationTimestamp: "2025-11-11T21:42:30Z"
generation: 1
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: discourse
app.kubernetes.io/version: 3.5.0
helm.sh/chart: discourse-17.0.1
name: discourse
namespace: discourse
resourceVersion: "126024847"
uid: 3ab876dd-fba3-4b05-b5b7-4d98e455fc77
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: e5461cf0a1e43f8902c5301b3406945d6a42bf2817fd69f91864bd1690a64b6b
checksum/secrets-database: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/secrets-discourse: 400095440da345a753f05bd9af8e09a98b4aba5b2c80294e9d2b4956a080ef86
checksum/secrets-redis: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
creationTimestamp: null
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: discourse
app.kubernetes.io/version: 3.5.0
helm.sh/chart: discourse-17.0.1
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
topologyKey: kubernetes.io/hostname
weight: 1
automountServiceAccountToken: false
containers:
- args:
- -c
- |
rm -r /opt/bitnami/discourse/plugins/chat
chown -R discourse:root /opt/bitnami/discourse/plugins
/opt/bitnami/scripts/discourse/entrypoint.sh /opt/bitnami/scripts/discourse/run.sh
command:
- /bin/bash
env:
- name: BITNAMI_DEBUG
value: "false"
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
key: discourse-password
name: discourse-discourse
- name: DISCOURSE_PORT_NUMBER
value: "8080"
- name: DISCOURSE_EXTERNAL_HTTP_PORT_NUMBER
value: "80"
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: POSTGRESQL_CLIENT_CREATE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: POSTGRESQL_CLIENT_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: postgres-password
name: discourse-postgresql
- name: DISCOURSE_REDIS_PASSWORD
valueFrom:
secretKeyRef:
key: redis-password
name: discourse-redis
- name: DISCOURSE_SMTP_PASSWORD
valueFrom:
secretKeyRef:
key: smtp-password
name: discourse-discourse
- name: DISCOURSE_DATA_TO_PERSIST
value: public/backups public/uploads
envFrom:
- configMapRef:
name: discourse
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 500
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 5
name: discourse
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 6
httpGet:
path: /srv/status
port: http
scheme: HTTP
initialDelaySeconds: 180
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/discourse
name: discourse-data
subPath: discourse
- mountPath: /opt/bitnami/discourse/plugins
name: empty-dir
subPath: app-plugins-dir
- args:
- /opt/bitnami/scripts/discourse-sidekiq/run.sh
command:
- /opt/bitnami/scripts/discourse/entrypoint.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
key: discourse-password
name: discourse-discourse
- name: DISCOURSE_POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
key: redis-password
name: discourse-redis
- name: DISCOURSE_SMTP_PASSWORD
valueFrom:
secretKeyRef:
key: smtp-password
name: discourse-discourse
- name: DISCOURSE_DATA_TO_PERSIST
value: public/backups public/uploads
envFrom:
- configMapRef:
name: discourse
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pgrep -f ^sidekiq
failureThreshold: 6
initialDelaySeconds: 500
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: sidekiq
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pgrep -f ^sidekiq
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 750m
ephemeral-storage: 2Gi
memory: 768Mi
requests:
cpu: 500m
ephemeral-storage: 50Mi
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/discourse
name: discourse-data
subPath: discourse
- mountPath: /opt/bitnami/discourse/plugins
name: empty-dir
subPath: app-plugins-dir
dnsPolicy: ClusterFirst
initContainers:
- args:
- -c
- |
pushd "/opt/bitnami/discourse" >/dev/null || exit 1
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-adplugin
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-subscriptions
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-activity-pub
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-openid-connect
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/jonmbake/discourse-ldap-auth
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-math
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-post-voting
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-prometheus
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-reactions
RAILS_ENV=production LOAD_PLUGINS=0 bundle exec rake plugin:pull_compatible_all
popd >/dev/null || exit 1
cp -nr --preserve=mode /opt/bitnami/discourse/plugins/* /plugins
command:
- /bin/bash
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
name: install-plugins
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /plugins
name: empty-dir
subPath: app-plugins-dir
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 0
fsGroupChangePolicy: Always
serviceAccount: discourse
serviceAccountName: discourse
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: empty-dir
- name: discourse-data
persistentVolumeClaim:
claimName: discourse
status:
conditions:
- lastTransitionTime: "2025-11-11T21:42:30Z"
lastUpdateTime: "2025-11-11T21:42:30Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2025-11-11T21:42:30Z"
lastUpdateTime: "2025-11-11T21:42:30Z"
message: ReplicaSet "discourse-75db4ff77d" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
replicas: 1
unavailableReplicas: 1
updatedReplicas: 1

71
eom.dev.yaml Normal file
View File

@@ -0,0 +1,71 @@
---
# Master playbook for eom.dev
- name: Initialize servers
hosts: servers
become: true
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- role: ericomeehan.qemu_network
- name: Initialize clusters
hosts: clusters
become: true
pre_tasks:
- name: Update sysctl configuration to enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Update sysctl configuration to enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- role: geerlingguy.kubernetes
- role: geerlingguy.helm
when: kubernetes_role == 'control_plane'
- name: Initialize gateway
hosts: gateway
become: true
roles:
- role: ericomeehan.gateway
- name: Initialize vpn
hosts: vpn
become: true
roles:
- role: ericomeehan.vpn
- name: Initialize nfs
hosts: nfs
become: true
roles:
- role: ericomeehan.nfs
- name: Deploy services
hosts: control-plane
vars_files:
- vars/secrets.yaml
become: true
roles:
- role: ericomeehan.eom
- role: ericomeehan.gondwana
- role: ericomeehan.kraken-bot

View File

@@ -1,10 +1,8 @@
---
- name: Deploy eom.dev one service at a time
hosts: alpha-control-plane
hosts: imac
become: true
vars:
target_namespace: prod
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.eom
- role: ericomeehan.ericomeehan

13
files/01_debian_cloud.cfg Normal file
View File

@@ -0,0 +1,13 @@
apt:
generate_mirrorlists: true
system_info:
default_user:
name: debian
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
lock_passwd: True
gecos: Debian
groups: [adm, audio, cdrom, dialout, dip, floppy, plugdev, sudo, video]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash

71
files/05_logging.cfg Normal file
View File

@@ -0,0 +1,71 @@
## This yaml formated config file handles setting
## logger information. The values that are necessary to be set
## are seen at the bottom. The top '_log' are only used to remove
## redundency in a syslog and fallback-to-file case.
##
## The 'log_cfgs' entry defines a list of logger configs
## Each entry in the list is tried, and the first one that
## works is used. If a log_cfg list entry is an array, it will
## be joined with '\n'.
_log:
- &log_base |
[loggers]
keys=root,cloudinit
[handlers]
keys=consoleHandler,cloudLogHandler
[formatters]
keys=simpleFormatter,arg0Formatter
[logger_root]
level=DEBUG
handlers=consoleHandler,cloudLogHandler
[logger_cloudinit]
level=DEBUG
qualname=cloudinit
handlers=
propagate=1
[handler_consoleHandler]
class=StreamHandler
level=WARNING
formatter=arg0Formatter
args=(sys.stderr,)
[formatter_arg0Formatter]
format=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s
[formatter_simpleFormatter]
format=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s
- &log_file |
[handler_cloudLogHandler]
class=FileHandler
level=DEBUG
formatter=arg0Formatter
args=('/var/log/cloud-init.log', 'a', 'UTF-8')
- &log_syslog |
[handler_cloudLogHandler]
class=handlers.SysLogHandler
level=DEBUG
formatter=simpleFormatter
args=("/dev/log", handlers.SysLogHandler.LOG_USER)
log_cfgs:
# Array entries in this list will be joined into a string
# that defines the configuration.
#
# If you want logs to go to syslog, uncomment the following line.
# - [ *log_base, *log_syslog ]
#
# The default behavior is to just log to a file.
# This mechanism that does not depend on a system service to operate.
- [ *log_base, *log_file ]
# A file path can also be used.
# - /etc/log.conf
# This tells cloud-init to redirect its stdout and stderr to
# 'tee -a /var/log/cloud-init-output.log' so the user can see output
# there without needing to look on the console.
output: {all: '| tee -a /var/log/cloud-init-output.log'}

View File

@@ -1,3 +1,4 @@
---
# Global defaults
docker_needed: false
nvidia_driver_needed: false

5
group_vars/alpha.yaml Normal file
View File

@@ -0,0 +1,5 @@
cluster_name: alpha
metallb_addresses:
- 192.168.1.160-192.168.1.191
metallb_asn: 6501
letsencrypt_url: https://acme-v02.api.letsencrypt.org/directory

5
group_vars/beta.yaml Normal file
View File

@@ -0,0 +1,5 @@
cluster_name: beta
metallb_addresses:
- 192.168.1.192-192.168.1.223
metallb_asn: 6502
letsencrypt_url: https://acme-staging-v02.api.letsencrypt.org/directory

View File

@@ -1,3 +1,7 @@
---
# Group vars for clusters
containerd_config_cgroup_driver_systemd: true
kubernetes_version: '1.31'
kubernetes_version_rehl_package: '1.31.1'
kubernetes_allow_pods_on_control_plane: false
kubernetes_join_command_extra_opts: "--ignore-preflight-errors=Port-10250"

View File

@@ -1,6 +1,7 @@
---
# Group vars for Kubernetes control plane nodes
kubernetes_role: control_plane
helm_version: 'v3.16.3'
open_ports:
- interface: any
protocol: tcp
@@ -20,3 +21,6 @@ open_ports:
- interface: any
protocol: tcp
port: 10259
memory:
unit: MiB
value: 8192

5
group_vars/gamma.yaml Normal file
View File

@@ -0,0 +1,5 @@
cluster_name: gamma
metallb_addresses:
- 192.168.1.96-224.168.1.255
metallb_asn: 6503
letsencrypt_url: https://acme-staging-v02.api.letsencrypt.org/directory

View File

@@ -0,0 +1,6 @@
---
# Group vars for hypervisors
open_ports:
- interface: any
protocol: tcp
port: 22

108
group_vars/vms.yaml Normal file
View File

@@ -0,0 +1,108 @@
---
# Group vars for virtual machines
type: kvm
name: my_vm_name
instance_id: "iid-{{ name }}"
packages:
- nfs-common
- openssh-server
- prometheus-node-exporter
- qemu-guest-agent
memory:
unit: MiB
value: 4096
vcpu:
placement: static
value: 1
os:
type:
arch: x86_64
machine: pc-q35-5.2
value: hvm
boot:
dev: hd
cpu:
mode: host-model
check: none
emulator: /usr/bin/qemu-system-x86_64
disks:
- type: file
device: disk
driver:
name: qemu
type: qcow2
source:
file: /var/lib/libvirt/images/{{ name }}.qcow2
target:
dev: vda
bus: virtio
- type: file
device: cdrom
driver:
name: qemu
type: raw
source:
file: /var/lib/libvirt/images/{{ name }}.iso
target:
dev: sda
bus: sata
interfaces:
- type: network
source:
network: wan
model:
type: virtio
channels:
- type: unix
target:
type: virtio
name: org.qemu.guest_agent.0
address:
type: virtio-serial
controller: 0
bus: 0
port: 1
inputs:
- type: tablet
bus: usb
address:
type: usb
bus: 0
port: 1
- type: mouse
bus: ps2
- type: keyboard
bus: ps2
graphics:
type: spice
autoport: 'yes'
listen:
type: address
image:
compression: 'off'
video:
model:
type: qxl
ram: 65536
vram: 65536
vgamem: 16384
heads: 1
primary: yes
memballoon:
model: virtio
rng:
model: virtio
backend:
model: random
value: /dev/urandom
devices:
emulator: "{{ emulator }}"
disks: "{{ disks }}"
filesystems: "{{ filesystems }}"
interfaces: "{{ interfaces }}"
channels: "{{ channels }}"
inputs: "{{ inputs }}"
graphics: "{{ graphics }}"
video: "{{ video }}"
memballoon: "{{ memballoon }}"
rng: "{{ rng }}"

View File

@@ -14,3 +14,6 @@ open_ports:
- interface: any
protocol: tcp
port: 30000-32767
memory:
unit: MiB
value: 16384

View File

@@ -1,2 +1,6 @@
# Vars file for workstations
open_ports: ""
docker_needed: true
open_ports:
- interface: any
protocol: tcp
port: 22

43
helm.yaml Normal file
View File

@@ -0,0 +1,43 @@
---
# Playbook for deploying helm charts
- name: Deploy WordPress using Helm
hosts: alpha-control-plane
become: true
vars:
wordpress_chart_version: "9.0.1"
wordpress_release_name: "wordpress"
wordpress_database_password: "your_database_password"
wordpress_service_type: "ClusterIP"
wordpress_service_port: 80
wordpress_persistence_enabled: true
wordpress_persistence_storageClass: "standard"
wordpress_persistence_size: "20Gi"
tasks:
- name: Add Helm repository for WordPress
helm_repo:
name: bitnami
state: present
url: https://charts.bitnami.com/bitnami
- name: Install WordPress Helm chart
helm_chart:
name: "{{ wordpress_release_name }}"
chart: "wordpress"
repo: "bitnami"
version: "{{ wordpress_chart_version }}"
values:
global:
imageRegistry: "docker.io"
service:
type: "{{ wordpress_service_type }}"
port: "{{ wordpress_service_port }}"
wordpressUsername: "your_wordpress_username"
wordpressPassword: "your_wordpress_password"
wordpressEmail: "your_wordpress_email"
wordpressDatabase:
password: "{{ wordpress_database_password }}"
persistence:
enabled: "{{ wordpress_persistence_enabled }}"
storageClass: "{{ wordpress_persistence_storageClass }}"
size: "{{ wordpress_persistence_size }}"

View File

@@ -0,0 +1 @@
name: alpha-control-plane

View File

@@ -1,2 +0,0 @@
---
# Host vars for alpha-control-plane

View File

@@ -0,0 +1,2 @@
name: alpha-worker-0
nvidia_driver_needed: true

View File

@@ -1,4 +0,0 @@
---
# Host vars for alpha-worker-0
nvidia_driver_needed: true
nvidia_driver_tesla: true

View File

@@ -0,0 +1 @@
name: alpha-worker-1

View File

@@ -0,0 +1 @@
name: alpha-worker-10

View File

@@ -0,0 +1 @@
name: alpha-worker-11

View File

@@ -0,0 +1 @@
name: alpha-worker-12

View File

@@ -0,0 +1,4 @@
name: alpha-worker-13
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-14
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-15
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-16
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-17
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1 @@
name: alpha-worker-2

View File

@@ -0,0 +1 @@
name: alpha-worker-3

View File

@@ -0,0 +1 @@
name: alpha-worker-4

View File

@@ -0,0 +1 @@
name: alpha-worker-5

View File

@@ -0,0 +1 @@
name: alpha-worker-6

View File

@@ -0,0 +1 @@
name: alpha-worker-7

View File

@@ -0,0 +1 @@
name: alpha-worker-8

View File

@@ -0,0 +1 @@
name: alpha-worker-9

0
host_vars/alpha.yaml Normal file
View File

View File

@@ -0,0 +1 @@
name: beta-control-plane

View File

@@ -0,0 +1 @@
name: beta-worker-0

View File

@@ -0,0 +1 @@
name: beta-worker-1

View File

@@ -0,0 +1 @@
name: beta-worker-2

View File

@@ -0,0 +1 @@
name: beta-worker-3

1
host_vars/evm.yaml Normal file
View File

@@ -0,0 +1 @@
name: evm

View File

@@ -0,0 +1 @@
name: gamma-control-plane

View File

@@ -1,16 +1,3 @@
# Host vars for inspiron-3670
docker_needed: false
nvidia_driver_needed: true
packages:
- curl
- davfs2
- gimp
- git
- gphoto2
- latexml
- neovim
- passwordsafe
- texlive-full
- thunderbird
- tmux
- torbrowser-launcher
- w3m

View File

@@ -1,3 +1,3 @@
# Host vars for latitude-7230
ansible_connection: local
open_ports: ""

View File

@@ -0,0 +1,35 @@
name: network-file-system
nfs_exports: ["/data *(rw,sync,no_root_squash)"]
mounts:
- [ vdb, /data ]
disks:
- type: file
device: disk
driver:
name: qemu
type: qcow2
source:
file: /var/lib/libvirt/images/{{ name }}.qcow2
target:
dev: vda
bus: virtio
- type: block
device: disk
driver:
name: qemu
type: raw
source:
dev: /dev/poweredge-t640-vg1/store-0
target:
dev: vdb
bus: virtio
- type: file
device: cdrom
driver:
name: qemu
type: raw
source:
file: /var/lib/libvirt/images/{{ name }}.iso
target:
dev: sda
bus: sata

View File

@@ -0,0 +1,15 @@
# Host vars for poweredge-r350
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno8303
- name: lan
forward:
mode: bridge
bridge:
name: lan
dev: eno8403
libvirt_guests: ""

View File

@@ -0,0 +1,17 @@
---
# Host vars for poweredge-r720
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno1
libvirt_guests:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
- evm
nfs_exports: ["/data *(rw,sync,no_root_squash)"]

View File

@@ -0,0 +1,32 @@
---
# Host vars for poweredge-t640
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno1np0
- name: lan
forward:
mode: bridge
bridge:
name: lan
dev: eno2np1
libvirt_guests:
- alpha-control-plane
- alpha-worker-0
- alpha-worker-1
- alpha-worker-2
- alpha-worker-3
- alpha-worker-4
- alpha-worker-5
- alpha-worker-6
- alpha-worker-7
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
nfs_exports: ["/data *(rw,sync,no_root_squash)"]
nvidia_driver_needed: true

View File

@@ -1,17 +0,0 @@
---
all:
children:
workstations:
hosts:
latitude-7230:
ansible-host: 192.168.1.123
inspiron-3670:
ansible-host: 192.168.1.210
imac:
ansible-host: 192.168.1.139
servers:
children:
poweredge-r350:
ansible-host: 192.168.1.137
poweredge-t640:
ansible-host: 192.168.1.138

66
inventories/eom.dev.yml Normal file
View File

@@ -0,0 +1,66 @@
---
all:
children:
workstations:
hosts:
latitude-7230:
latitude-7424:
inspiron-3670:
hypervisors:
hosts:
poweredge-r350:
poweredge-r720:
poweredge-t640:
vms:
children:
user:
hosts:
evm:
clusters:
children:
control_planes:
hosts:
alpha-control-plane:
workers:
hosts:
alpha-worker-0:
alpha-worker-1:
alpha-worker-2:
alpha-worker-3:
alpha-worker-4:
alpha-worker-5:
alpha-worker-6:
alpha-worker-7:
alpha-worker-8:
alpha-worker-9:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:
alpha-worker-13:
alpha-worker-14:
alpha-worker-15:
alpha-worker-16:
alpha-worker-17:
alpha:
hosts:
alpha-control-plane:
alpha-worker-0:
alpha-worker-1:
alpha-worker-2:
alpha-worker-3:
alpha-worker-4:
alpha-worker-5:
alpha-worker-6:
alpha-worker-7:
alpha-worker-8:
alpha-worker-9:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:
alpha-worker-13:
alpha-worker-14:
alpha-worker-15:
alpha-worker-16:
alpha-worker-17:

10
jamulus.yaml Normal file
View File

@@ -0,0 +1,10 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.jamulus
vars:
directoryAddress: "anygenre2.jamulus.io:22124"
serverInfo: "jamulus.eom.dev;Raleigh;US"

7
k8s_cp.yaml Normal file
View File

@@ -0,0 +1,7 @@
---
# Playbook for eom.dev
- name: Alpha Cluster
hosts: alpha-control-plane
become: true
roles:
- role: geerlingguy.kubernetes

60
kubernetes.yaml Normal file
View File

@@ -0,0 +1,60 @@
---
# Playbook for Kubernetes
- name: Initialize Kubernetes clusters
hosts: clusters
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Beta Cluster
hosts: beta
become: true
roles:
- role: geerlingguy.kubernetes
- name: Gamma Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Install Helm
hosts: control_planes
become: true
roles:
- role: geerlingguy.helm

10
libvirtGuests.yaml Normal file
View File

@@ -0,0 +1,10 @@
---
# playbook for libvirtGuests.yaml
- name: Libvirt guests
hosts: poweredge-r720
become: true
roles:
- role: ericomeehan.libvirtguest
vars:
libvirt_networks: {}
libvirt_guests: {}

16
localagi.yaml Normal file
View File

@@ -0,0 +1,16 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localagi
vars:
localagi_pvc_storage: 2Ti
localagi_model: llama3-8b-instruct
localagi_multimodal_model: minicpm-v-2_6
localagi_image_model: sd-1.5-ggml
localagi_localrag_url: https://localrecall.eom.dev/
localagi_llm_api_url: https://localai.eom.dev/
localagi_llm_api_key: "{{ localai_api_keys[0] }}"
localagi_timeout: "300s"

12
localai.yaml Normal file
View File

@@ -0,0 +1,12 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localai
vars:
localai_disable_webui: "true"
localai_watchdog_idle: "true"
localai_watchdog_idle_timeout: "1m"
localai_watchdog_busy: "true"

13
localrecall.yaml Normal file
View File

@@ -0,0 +1,13 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localrecall
vars:
localrecall_collection_db_pvc_size: 2Ti
localrecall_file_assets_pvc_size: 2Ti
localrecall_openai_api_key: "{{ localai_api_keys[1] }}"
localrecall_openai_base_url: https://localai.eom.dev/v1
localrecall_embedding_model: bert-embeddings

288
main.yaml Normal file
View File

@@ -0,0 +1,288 @@
---
# Playbook for eom.dev
- name: Initialize workstations
hosts: workstations
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- role: geerlingguy.docker
when: docker_needed == true
- name: Initialize hypervisors
hosts: hypervisors
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- name: Initialize Network File Systems
hosts: poweredge-t640
become: true
roles:
- role: geerlingguy.nfs
tasks:
- name: Create NFS directories
file:
path: "{{ item }}"
state: directory
loop:
- /data/alpha
- /data/beta
- /data/gamma
- /data/eric
- name: Setup virtualization
hosts: hypervisors
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.libvirt_guests
vars:
doSetup: true
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts: vms
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- name: Initialize Kubernetes clusters
hosts: clusters
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Beta Cluster
hosts: beta
become: true
roles:
- role: geerlingguy.kubernetes
- name: Gamma Cluster
hosts: gamma
become: true
roles:
- role: geerlingguy.kubernetes
- name: Install Helm
hosts: control_planes
become: true
roles:
- role: geerlingguy.helm
- name: Deploy base Kubernetes resources
hosts: control_planes
become: true
tasks:
- name: Add NFS Provisioner repository
kubernetes.core.helm_repository:
name: nfs-subdir-external-provisioner
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
- name: Add MetalLB repository
kubernetes.core.helm_repository:
name: metallb
repo_url: https://metallb.github.io/metallb
- name: Add Ingress Nginx repository
kubernetes.core.helm_repository:
name: ingress-nginx
repo_url: https://kubernetes.github.io/ingress-nginx
- name: Add cert-manager repository
kubernetes.core.helm_repository:
name: jetstack
repo_url: https://charts.jetstack.io
- name: Add bitnami repository
kubernetes.core.helm_repository:
name: bitnami
repo_url: https://charts.bitnami.com/bitnami
- name: Add nvidia repository
kubernetes.core.helm_repository:
name: nvidia
repo_url: https://helm.ngc.nvidia.com/nvidia
- name: Update Helm repos
command: helm repo update
- name: Deploy NFS Provisioner
kubernetes.core.helm:
name: nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: nfs-provisioner
create_namespace: true
values:
nfs:
server: poweredge-t640
path: "/data/{{ cluster_name }}"
storageClass:
defaultClass: true
- name: Deploy MetalLB
kubernetes.core.helm:
name: metallb
chart_ref: metallb/metallb
release_namespace: metallb
create_namespace: true
- name: Wait for MetalLB to initialize
wait_for:
timeout: 120
- name: Deploy BGP Peer
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: bgp-peer
namespace: metallb
spec:
myASN: "{{ metallb_asn }}"
peerASN: 6500
peerAddress: 192.168.1.1
- name: Deploy address pool
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: pool0
namespace: metallb
spec:
addresses: "{{ metallb_addresses }}"
- name: Deploy BGP Advertisement
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: bgp-advertisement
namespace: metallb
spec:
ipAddressPools:
- pool0
- name: Deploy Ingress Nginx
kubernetes.core.helm:
name: ingress-nginx
chart_ref: ingress-nginx/ingress-nginx
release_namespace: ingress-nginx
create_namespace: true
values:
controller:
annotations:
acme.cert-manager.io/http01-edit-in-place: 'true'
extraArgs:
update-status: "false"
- name: Deploy cert-manager
kubernetes.core.helm:
name: cert-manager
chart_ref: jetstack/cert-manager
release_namespace: cert-manager
create_namespace: true
values:
crds:
enabled: true
- name: Deploy Cluster Issuer
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: ca-issuer
spec:
acme:
server: "{{ letsencrypt_url }}"
email: eric@eom.dev
privateKeySecretRef:
name: letsencrypt-issuer-key
solvers:
- http01:
ingress:
ingressClassName: nginx
- name: Deploy nvidia gpu operator
kubernetes.core.helm:
name: gpu-operator
chart_ref: nvidia/gpu-operator
release_namespace: gpu-operator
create_namespace: true
values:
driver:
enabled: false
toolkit:
enabled: false
- name: Port forward HTTP(S) to Ingress Controllers
hosts: localhost
tasks:
- name: Wait for manual tasks
pause:
prompt: "Press Enter to continue..."

9
matrix-stack.yaml Normal file
View File

@@ -0,0 +1,9 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.matrix-stack
vars:
server_name: eom.dev

7
minio.yaml Normal file
View File

@@ -0,0 +1,7 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.minio

5
nfs.yaml Normal file
View File

@@ -0,0 +1,5 @@
- name: Configure NFS
hosts: network-file-system
become: true
roles:
- role: geerlingguy.nfs

23
nvidia-device-plugin.yaml Normal file
View File

@@ -0,0 +1,23 @@
- name: Deploy nvdp
hosts: control_planes
become: true
tasks:
- name: Add nvdp repository
kubernetes.core.helm_repository:
name: nvidia
repo_url: https://helm.ngc.nvidia.com/nvidia
- name: Update Helm repos
command: helm repo update
- name: Deploy nvidia gpu operator
kubernetes.core.helm:
name: gpu-operator
chart_ref: nvidia/gpu-operator
release_namespace: gpu-operator
create_namespace: true
values:
driver:
enabled: false
toolkit:
enabled: false

110
poweredge-r350.yaml Normal file
View File

@@ -0,0 +1,110 @@
---
# Playbook for poweredge-r350
# This is being used to test vm deployments
- name: Deploy virtual machines
hosts: poweredge-r350
become: true
vars_files:
- ../secrets.yaml
pre_tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image for guests
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory for guests
file:
path: "/var/lib/libvirt/configs/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config templates for guests
template:
src: eom_cloud.cfg.j2
dest: "/var/lib/libvirt/configs/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config templates for guests
template:
src: meta-data.j2
dest: "/var/lib/libvirt/configs/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso for guests
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /var/lib/libvirt/configs/{{ item }}/user-data /var/lib/libvirt/configs/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Copy logging config for guests
copy:
src: 05_logging.cfg
dest: "/var/lib/libvirt/configs/{{ item }}/05_logging.cfg"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"

253
poweredge-r720.yaml Normal file
View File

@@ -0,0 +1,253 @@
---
# Playbook for eom.dev
- name: Initialize hypervisors
hosts: poweredge-r720
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- name: Initialize Network File Systems
hosts: poweredge-r720
become: true
roles:
- role: geerlingguy.nfs
tasks:
- name: Create NFS directories
file:
path: "{{ item }}"
state: directory
loop:
- /data/alpha
- /data/beta
- /data/gamma
- name: Deploy virtual machines
hosts: poweredge-r720
become: true
vars_files:
- ../secrets.yaml
pre_tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Copy interfaces template
template:
src: interfaces.j2
dest: /etc/network/interfaces
- name: Reload sysctl configuration
command: sysctl --system
- name: Reload network service
service:
name: networking
state: restarted
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
- evm
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- name: Initialize Kubernetes clusters
hosts:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts:
- alpha-control-plane
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
become: true
roles:
- role: geerlingguy.kubernetes
- name: Deploy base Kubernetes resources
hosts: alpha-control-plane
become: true
tasks:
- name: Deploy NFS Provisioner
kubernetes.core.helm:
name: r720-nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: r720-nfs-provisioner
create_namespace: true
values:
nfs:
server: poweredge-r720
path: "/data/alpha"
storageClass:
defaultClass: false
name: r720-nfs-client

5
poweredge-t640.yaml Normal file
View File

@@ -0,0 +1,5 @@
- name: Install nvidia
hosts: poweredge-t640
become: true
roles:
- role: ericomeehan.nvidia_driver

12
qemu-base.yaml Normal file
View File

@@ -0,0 +1,12 @@
---
# Create base image for QEMU
- name: Install dependencies
- name: Download Debian installation image
- name: Create virtual disk for image
- name: Boot installation media with preseed
- name: Wait for installation completion
- name: Initialize base image
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- name: Shut down base image

20
qemu.yaml Normal file
View File

@@ -0,0 +1,20 @@
---
- name: Test QEMU
hosts: localhost
become: true
tasks:
- name: Deploy test vm
community.libvirt.virt:
name: test
state: running
persistent: true
memory: 4096
vcpus: 1
os_type: linux
disks:
- name: test
size: 8
format: qcow2
cdrom:
- url: https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.7.0-amd64-netinst.iso

9
raspberrypi.yaml Normal file
View File

@@ -0,0 +1,9 @@
---
# Playbook for raspberry pi
- name: Initialize rpi
hosts: raspberrypi
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan

154
reassign.yaml Normal file
View File

@@ -0,0 +1,154 @@
- name: Deploy virtual machines
hosts: poweredge-t640
become: true
vars_files:
- ../secrets.yaml
vars:
libvirt_guests:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
tasks:
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- name: Initialize Kubernetes clusters
hosts:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts:
- alpha
become: true
roles:
- role: geerlingguy.kubernetes

View File

@@ -1,38 +0,0 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@@ -1,5 +0,0 @@
---
# defaults file for ericomeehan.nvidia_driver_debian
nvidia_driver_skip_reboot: false
nvidia_driver_source: nvidia
nvidia_driver_tesla: false

View File

@@ -1,2 +0,0 @@
---
# handlers file for ericomeehan.nvidia_driver_debian

View File

@@ -1,34 +0,0 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@@ -1,9 +0,0 @@
---
# tasks file for ericomeehan.nvidia_driver_debian
- name: Install Nvidia proprietary drivers
include_tasks: nvidia-proprietary.yml
when: nvidia_driver_source == "nvidia"
- name: Install nouveau and mesa drivers
include_tasks: nouveau-mesa.yml
when: nvidia_driver_source == "nouveau"

Some files were not shown because too many files have changed in this diff Show More