Compare commits

..

14 Commits

Author SHA1 Message Date
043f4e78bf Added TES3MP 2025-12-24 13:20:58 -05:00
987a6cada4 Update submodules, adding MinIO role 2025-11-08 12:13:12 -05:00
d94e595f21 WikiDeck and eom 2025-10-29 09:23:21 -04:00
4a86fc43a0 Updating roles 2025-10-25 10:14:19 -04:00
9015a48417 PowerEdge R720 QEMU & Kubernetes Setup (#23)
* Added 5 VMs to Alpha cluster.
* Created personal dev node
2025-07-31 21:41:19 +00:00
3e8c8a3566 Submodule for Mastodon 2025-07-20 07:51:34 -04:00
8a96979dd8 Luanti and Mastodon submodules 2025-07-01 09:48:16 -04:00
6260c6ca27 Submodules 2025-06-26 12:58:18 -04:00
a9f7f25b7f Updates 2025-06-26 12:56:40 -04:00
6b6454d907 Matrix Stack
Added ansible-role-matrix-stack submodule.
2025-05-25 22:13:21 -04:00
2ff0bc1502 Nvidia Tesla T4 2025-03-08 11:59:41 -05:00
bf7954c5ae Added ansible-role-libvirt-guest submodule 2025-02-25 22:44:36 -05:00
aa2400cf13 Update gitmodules 2025-02-22 16:22:44 -05:00
b0063cc367 Raspberry Pi 2025-01-13 16:03:20 -05:00
47 changed files with 920 additions and 152 deletions

View File

@@ -0,0 +1,22 @@
name: deployLibvirtGuests
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Ansible
run: |
apt update -y
apt install python3-pip -y
python3 -m pip install ansible
- name: Run playbook
uses: dawidd6/action-ansible-playbook@v2
with:
playbook: libvirtGuests.yaml
key: ${{secrets.SSH_PRIVATE_KEY}}
vault_password: ${{secrets.VAULT_PASSWORD}}
options: |
--inventory inventories/eom.dev.yaml

54
.gitmodules vendored
View File

@@ -1,21 +1,57 @@
[submodule "roles/ericomeehan.eom"]
path = roles/ericomeehan.eom
url = git@git.eom.dev:DevOps/ansible-role-eom.git
url = git@gitea.eom.dev:DevOps/ansible-role-eom.git
[submodule "roles/ericomeehan.debian"]
path = roles/ericomeehan.debian
url = git@git.eom.dev:DevOps/ansible-role-debian.git
url = git@gitea.eom.dev:DevOps/ansible-role-debian.git
[submodule "roles/ericomeehan.gondwana"]
path = roles/ericomeehan.gondwana
url = git@git.eom.dev:eom/ansible-role-gondwana.git
url = git@gitea.eom.dev:Gaming/ansible-role-gondwana.git
[submodule "roles/ericomeehan.ericomeehan"]
path = roles/ericomeehan.ericomeehan
url = git@git.eom.dev:eric/ansible-role-ericomeehan.git
url = git@gitea.eom.dev:eric/ansible-role-ericomeehan.git
[submodule "roles/ericomeehan.nvidia_driver"]
path = roles/ericomeehan.nvidia_driver
url = git@git.eom.dev:DevOps/ansible-role-nvidia-driver.git
url = git@gitea.eom.dev:DevOps/ansible-role-nvidia-driver.git
[submodule "roles/ericomeehan.kraken-bot"]
path = roles/ericomeehan.kraken-bot
url = git@git.eom.dev:kraken/ansible-role-kraken-bot.git
[submodule "roles/ericomeehan.qemu-network"]
path = roles/ericomeehan.qemu-network
url = https://git.eom.dev/ansible-role-qemu-network
url = git@gitea.eom.dev:Finance/ansible-role-kraken-bot.git
[submodule "roles/ericomeehan.libvirt-guest"]
path = roles/ericomeehan.libvirt-guest
url = git@gitea.eom.dev:DevOps/ansible-role-libvirt-guest.git
[submodule "roles/ericomeehan.matrix-stack"]
path = roles/ericomeehan.matrix-stack
url = git@gitea.eom.dev:DevOps/ansible-role-matrix-stack.git
[submodule "roles/ericomeehan.luanti"]
path = roles/ericomeehan.luanti
url = git@gitea.eom.dev:Gaming/ansible-role-luanti.git
[submodule "roles/ericomeehan.mastodon"]
path = roles/ericomeehan.mastodon
url = git@gitea.eom.dev:DevOps/ansible-role-mastodon.git
[submodule "roles/ericomeehan.vintage-story"]
path = roles/ericomeehan.vintage-story
url = git@gitea.eom.dev:Gaming/ansible-role-vintage-story.git
[submodule "roles/ericomeehan.wikideck"]
path = roles/ericomeehan.wikideck
url = git@gitea.eom.dev:WikiDeck/ansible-role-wikideck.git
[submodule "roles/ericomeehan.localai"]
path = roles/ericomeehan.localai
url = git@gitea.eom.dev:DevOps/ansible-role-localai.git
[submodule "roles/ericomeehan.localagi"]
path = roles/ericomeehan.localagi
url = git@gitea.eom.dev:DevOps/ansible-role-localagi.git
[submodule "roles/ericomeehan.localrecall"]
path = roles/ericomeehan.localrecall
url = git@gitea.eom.dev:DevOps/ansible-role-localrecall.git
[submodule "roles/ericomeehan.minio"]
path = roles/ericomeehan.minio
url = git@gitea.eom.dev:DevOps/ansible-role-minio.git
[submodule "roles/ericomeehan.jamulus"]
path = roles/ericomeehan.jamulus
url = git@gitea.eom.dev:DevOps/ansible-role-jamulus.git
[submodule "roles/cleary.tidalcycles"]
path = roles/cleary.tidalcycles
url = https://github.com/cleary/ansible-tidalcycles.git
[submodule "roles/ericomeehan.tes3mp"]
path = roles/ericomeehan.tes3mp
url = git@gitea.eom.dev:Gaming/ansible-role-tes3mp.git

BIN
.localai.yaml.swp Normal file

Binary file not shown.

341
discourse_deploy.yaml Normal file
View File

@@ -0,0 +1,341 @@
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
meta.helm.sh/release-name: discourse
meta.helm.sh/release-namespace: discourse
creationTimestamp: "2025-11-11T21:42:30Z"
generation: 1
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: discourse
app.kubernetes.io/version: 3.5.0
helm.sh/chart: discourse-17.0.1
name: discourse
namespace: discourse
resourceVersion: "126024847"
uid: 3ab876dd-fba3-4b05-b5b7-4d98e455fc77
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
annotations:
checksum/config: e5461cf0a1e43f8902c5301b3406945d6a42bf2817fd69f91864bd1690a64b6b
checksum/secrets-database: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
checksum/secrets-discourse: 400095440da345a753f05bd9af8e09a98b4aba5b2c80294e9d2b4956a080ef86
checksum/secrets-redis: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
creationTimestamp: null
labels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: discourse
app.kubernetes.io/version: 3.5.0
helm.sh/chart: discourse-17.0.1
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: discourse
app.kubernetes.io/name: discourse
topologyKey: kubernetes.io/hostname
weight: 1
automountServiceAccountToken: false
containers:
- args:
- -c
- |
rm -r /opt/bitnami/discourse/plugins/chat
chown -R discourse:root /opt/bitnami/discourse/plugins
/opt/bitnami/scripts/discourse/entrypoint.sh /opt/bitnami/scripts/discourse/run.sh
command:
- /bin/bash
env:
- name: BITNAMI_DEBUG
value: "false"
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
key: discourse-password
name: discourse-discourse
- name: DISCOURSE_PORT_NUMBER
value: "8080"
- name: DISCOURSE_EXTERNAL_HTTP_PORT_NUMBER
value: "80"
- name: DISCOURSE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: POSTGRESQL_CLIENT_CREATE_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: POSTGRESQL_CLIENT_POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: postgres-password
name: discourse-postgresql
- name: DISCOURSE_REDIS_PASSWORD
valueFrom:
secretKeyRef:
key: redis-password
name: discourse-redis
- name: DISCOURSE_SMTP_PASSWORD
valueFrom:
secretKeyRef:
key: smtp-password
name: discourse-discourse
- name: DISCOURSE_DATA_TO_PERSIST
value: public/backups public/uploads
envFrom:
- configMapRef:
name: discourse
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 500
periodSeconds: 10
successThreshold: 1
tcpSocket:
port: http
timeoutSeconds: 5
name: discourse
ports:
- containerPort: 8080
name: http
protocol: TCP
readinessProbe:
failureThreshold: 6
httpGet:
path: /srv/status
port: http
scheme: HTTP
initialDelaySeconds: 180
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/discourse
name: discourse-data
subPath: discourse
- mountPath: /opt/bitnami/discourse/plugins
name: empty-dir
subPath: app-plugins-dir
- args:
- /opt/bitnami/scripts/discourse-sidekiq/run.sh
command:
- /opt/bitnami/scripts/discourse/entrypoint.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: DISCOURSE_PASSWORD
valueFrom:
secretKeyRef:
key: discourse-password
name: discourse-discourse
- name: DISCOURSE_POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: discourse-postgresql
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
key: redis-password
name: discourse-redis
- name: DISCOURSE_SMTP_PASSWORD
valueFrom:
secretKeyRef:
key: smtp-password
name: discourse-discourse
- name: DISCOURSE_DATA_TO_PERSIST
value: public/backups public/uploads
envFrom:
- configMapRef:
name: discourse
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- /bin/sh
- -c
- pgrep -f ^sidekiq
failureThreshold: 6
initialDelaySeconds: 500
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: sidekiq
readinessProbe:
exec:
command:
- /bin/sh
- -c
- pgrep -f ^sidekiq
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
resources:
limits:
cpu: 750m
ephemeral-storage: 2Gi
memory: 768Mi
requests:
cpu: 500m
ephemeral-storage: 50Mi
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/discourse
name: discourse-data
subPath: discourse
- mountPath: /opt/bitnami/discourse/plugins
name: empty-dir
subPath: app-plugins-dir
dnsPolicy: ClusterFirst
initContainers:
- args:
- -c
- |
pushd "/opt/bitnami/discourse" >/dev/null || exit 1
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-adplugin
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-subscriptions
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-activity-pub
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-openid-connect
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/jonmbake/discourse-ldap-auth
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-math
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-post-voting
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-prometheus
RAILS_ENV=production bundle exec rake plugin:install repo=https://github.com/discourse/discourse-reactions
RAILS_ENV=production LOAD_PLUGINS=0 bundle exec rake plugin:pull_compatible_all
popd >/dev/null || exit 1
cp -nr --preserve=mode /opt/bitnami/discourse/plugins/* /plugins
command:
- /bin/bash
image: docker.io/bitnamilegacy/discourse:3.4.7-debian-12-r0
imagePullPolicy: IfNotPresent
name: install-plugins
resources: {}
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- CHOWN
- SYS_CHROOT
- FOWNER
- SETGID
- SETUID
- DAC_OVERRIDE
drop:
- ALL
privileged: false
readOnlyRootFilesystem: false
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /plugins
name: empty-dir
subPath: app-plugins-dir
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 0
fsGroupChangePolicy: Always
serviceAccount: discourse
serviceAccountName: discourse
terminationGracePeriodSeconds: 30
volumes:
- emptyDir: {}
name: empty-dir
- name: discourse-data
persistentVolumeClaim:
claimName: discourse
status:
conditions:
- lastTransitionTime: "2025-11-11T21:42:30Z"
lastUpdateTime: "2025-11-11T21:42:30Z"
message: Deployment does not have minimum availability.
reason: MinimumReplicasUnavailable
status: "False"
type: Available
- lastTransitionTime: "2025-11-11T21:42:30Z"
lastUpdateTime: "2025-11-11T21:42:30Z"
message: ReplicaSet "discourse-75db4ff77d" is progressing.
reason: ReplicaSetUpdated
status: "True"
type: Progressing
observedGeneration: 1
replicas: 1
unavailableReplicas: 1
updatedReplicas: 1

View File

@@ -1 +1,2 @@
name: alpha-worker-0
nvidia_driver_needed: true

View File

@@ -0,0 +1,4 @@
name: alpha-worker-13
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-14
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-15
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-16
vcpu:
placement: static
value: 2

View File

@@ -0,0 +1,4 @@
name: alpha-worker-17
vcpu:
placement: static
value: 2

1
host_vars/evm.yaml Normal file
View File

@@ -0,0 +1 @@
name: evm

View File

@@ -0,0 +1,17 @@
---
# Host vars for poweredge-r720
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno1
libvirt_guests:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
- evm
nfs_exports: ["/data *(rw,sync,no_root_squash)"]

View File

@@ -4,16 +4,20 @@ all:
workstations:
hosts:
latitude-7230:
latitude-7424:
inspiron-3670:
imac:
hypervisors:
hosts:
poweredge-r350:
poweredge-r720:
poweredge-t640:
vms:
children:
user:
hosts:
evm:
clusters:
children:
control_planes:
@@ -34,6 +38,11 @@ all:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:
alpha-worker-13:
alpha-worker-14:
alpha-worker-15:
alpha-worker-16:
alpha-worker-17:
alpha:
hosts:
alpha-control-plane:
@@ -50,3 +59,8 @@ all:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:
alpha-worker-13:
alpha-worker-14:
alpha-worker-15:
alpha-worker-16:
alpha-worker-17:

10
jamulus.yaml Normal file
View File

@@ -0,0 +1,10 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.jamulus
vars:
directoryAddress: "anygenre2.jamulus.io:22124"
serverInfo: "jamulus.eom.dev;Raleigh;US"

7
k8s_cp.yaml Normal file
View File

@@ -0,0 +1,7 @@
---
# Playbook for eom.dev
- name: Alpha Cluster
hosts: alpha-control-plane
become: true
roles:
- role: geerlingguy.kubernetes

10
libvirtGuests.yaml Normal file
View File

@@ -0,0 +1,10 @@
---
# playbook for libvirtGuests.yaml
- name: Libvirt guests
hosts: poweredge-r720
become: true
roles:
- role: ericomeehan.libvirtguest
vars:
libvirt_networks: {}
libvirt_guests: {}

16
localagi.yaml Normal file
View File

@@ -0,0 +1,16 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localagi
vars:
localagi_pvc_storage: 2Ti
localagi_model: llama3-8b-instruct
localagi_multimodal_model: minicpm-v-2_6
localagi_image_model: sd-1.5-ggml
localagi_localrag_url: https://localrecall.eom.dev/
localagi_llm_api_url: https://localai.eom.dev/
localagi_llm_api_key: "{{ localai_api_keys[0] }}"
localagi_timeout: "300s"

12
localai.yaml Normal file
View File

@@ -0,0 +1,12 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localai
vars:
localai_disable_webui: "true"
localai_watchdog_idle: "true"
localai_watchdog_idle_timeout: "1m"
localai_watchdog_busy: "true"

13
localrecall.yaml Normal file
View File

@@ -0,0 +1,13 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.localrecall
vars:
localrecall_collection_db_pvc_size: 2Ti
localrecall_file_assets_pvc_size: 2Ti
localrecall_openai_api_key: "{{ localai_api_keys[1] }}"
localrecall_openai_base_url: https://localai.eom.dev/v1
localrecall_embedding_model: bert-embeddings

159
main.yaml
View File

@@ -40,147 +40,15 @@
- /data/gamma
- /data/eric
- name: Prepare virtualization environments
hosts: hypervisors
become: true
tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- prometheus-libvirt-exporter
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Copy interfaces template
template:
src: interfaces.j2
dest: /etc/network/interfaces
- name: Reload sysctl configuration
command: sysctl --system
- name: Reload network service
service:
name: networking
state: restarted
- name: Deploy virtual machines
- name: Setup virtualization
hosts: hypervisors
become: true
vars_files:
- ../secrets.yaml
tasks:
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
roles:
- role: ericomeehan.libvirt_guests
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
doSetup: true
- name: Wait for manual tasks
hosts: localhost
@@ -196,6 +64,8 @@
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- name: Initialize Kubernetes clusters
hosts: clusters
@@ -285,6 +155,11 @@
name: bitnami
repo_url: https://charts.bitnami.com/bitnami
- name: Add nvidia repository
kubernetes.core.helm_repository:
name: nvidia
repo_url: https://helm.ngc.nvidia.com/nvidia
- name: Update Helm repos
command: helm repo update
@@ -393,6 +268,18 @@
ingress:
ingressClassName: nginx
- name: Deploy nvidia gpu operator
kubernetes.core.helm:
name: gpu-operator
chart_ref: nvidia/gpu-operator
release_namespace: gpu-operator
create_namespace: true
values:
driver:
enabled: false
toolkit:
enabled: false
- name: Port forward HTTP(S) to Ingress Controllers
hosts: localhost
tasks:

9
matrix-stack.yaml Normal file
View File

@@ -0,0 +1,9 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.matrix-stack
vars:
server_name: eom.dev

7
minio.yaml Normal file
View File

@@ -0,0 +1,7 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.minio

23
nvidia-device-plugin.yaml Normal file
View File

@@ -0,0 +1,23 @@
- name: Deploy nvdp
hosts: control_planes
become: true
tasks:
- name: Add nvdp repository
kubernetes.core.helm_repository:
name: nvidia
repo_url: https://helm.ngc.nvidia.com/nvidia
- name: Update Helm repos
command: helm repo update
- name: Deploy nvidia gpu operator
kubernetes.core.helm:
name: gpu-operator
chart_ref: nvidia/gpu-operator
release_namespace: gpu-operator
create_namespace: true
values:
driver:
enabled: false
toolkit:
enabled: false

253
poweredge-r720.yaml Normal file
View File

@@ -0,0 +1,253 @@
---
# Playbook for eom.dev
- name: Initialize hypervisors
hosts: poweredge-r720
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- name: Initialize Network File Systems
hosts: poweredge-r720
become: true
roles:
- role: geerlingguy.nfs
tasks:
- name: Create NFS directories
file:
path: "{{ item }}"
state: directory
loop:
- /data/alpha
- /data/beta
- /data/gamma
- name: Deploy virtual machines
hosts: poweredge-r720
become: true
vars_files:
- ../secrets.yaml
pre_tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Copy interfaces template
template:
src: interfaces.j2
dest: /etc/network/interfaces
- name: Reload sysctl configuration
command: sysctl --system
- name: Reload network service
service:
name: networking
state: restarted
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
- evm
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- name: Initialize Kubernetes clusters
hosts:
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts:
- alpha-control-plane
- alpha-worker-13
- alpha-worker-14
- alpha-worker-15
- alpha-worker-16
- alpha-worker-17
become: true
roles:
- role: geerlingguy.kubernetes
- name: Deploy base Kubernetes resources
hosts: alpha-control-plane
become: true
tasks:
- name: Deploy NFS Provisioner
kubernetes.core.helm:
name: r720-nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: r720-nfs-provisioner
create_namespace: true
values:
nfs:
server: poweredge-r720
path: "/data/alpha"
storageClass:
defaultClass: false
name: r720-nfs-client

9
raspberrypi.yaml Normal file
View File

@@ -0,0 +1,9 @@
---
# Playbook for raspberry pi
- name: Initialize rpi
hosts: raspberrypi
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan

10
tes3mp.yaml Normal file
View File

@@ -0,0 +1,10 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.tes3mp
vars:
TES3MP_SERVER_GENERAL_HOSTNAME: "TES3MP on eom.dev"
TES3MP_SERVER_GENERAL_PASSWORD: "{{ tes3mp_server_general_password }}"

8
tower-plus.yaml Normal file
View File

@@ -0,0 +1,8 @@
---
# Playbook for workstations
- name: Initialize workstations
hosts: localhost
connection: local
become: true
roles:
- role: ericomeehan.nvidia_driver

29
vintage-story.yaml Normal file
View File

@@ -0,0 +1,29 @@
- name: Execute test
hosts: alpha-control-plane
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.vintage-story
vars:
ServerName: "Vintage Story Server on eom.dev"
ServerUrl: vintage-story.eom.dev
ServerDescription: "A Vintage Story survival server hosted by eom.dev.<br>Join our community on the public <a href='https://discourse.eom.dev/c/gaming/vintage-story/63'>Discourse</a> forum and <a href='https://matrix.to/#/#vintage-story:eom.dev/'>Matrix</a> chat.<br>Also join us for live streams from <a href='https://owncast.eom.dev/'>Owncast</a> on eom.dev."
WelcomeMessage: "Welcome to Vintage Story on eom.dev, {0}. May you survive well and prosper.\\nPlease join our external discussions on Discourse and Matrix.\\nhttps://discourse.eom.dev/\\nhttps://matrix.to/#/#vintage-story:eom.dev/"
AdvertiseServer: true
Password: "{{ vintage_story_server_password }}"
WorldConfig:
Seed: null
SaveFileLocation: /gamedata/vs/Saves/season_i.vcdbs
WorldName: Season I
AllowCreativeMode: true
PlayStyle: surviveandbuild
PlayStyleLangCode: surviveandbuild-bands
WorldType: standard
WorldConfiguration: null
MapSizeY: null
CreatedByPlayerName: TopHatRick
DisabledMods: null
RepairMode: false
OnlyWhitelisted: true
StartupCommands: "/whitelist add 3wm/diYjPGuydHNvOd6H0fDy \\n /op TopHatRick"