Compare commits

...

10 Commits

Author SHA1 Message Date
67bcc80664 v1.0.6 2024-11-25 22:15:23 -05:00
cff19d90ad v1.0.5 2024-11-23 21:51:47 -05:00
d844bd3933 v1.0.4 2024-11-16 10:06:31 -05:00
db94484a35 v1.0.3 2024-11-14 10:40:40 -05:00
5ceba22dc8 v1.0.2 2024-11-14 10:31:30 -05:00
1c0f927278 v1.0.1 2024-11-11 15:59:09 -05:00
6e7ee42c1e v1.0.0 2024-11-11 15:46:28 -05:00
3c1ec13720 v0.1.0 2024-10-29 10:48:50 -04:00
160ffca2ee v0.0.20 2024-10-25 10:25:04 -04:00
2f3f4a8d2c v0.0.19 2024-10-10 11:37:23 -04:00
107 changed files with 2021 additions and 367 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
vault vault
*.tar.gz *.tar.gz
*.qcow2

15
.gitmodules vendored
View File

@ -1,18 +1,21 @@
[submodule "roles/ericomeehan.eom"] [submodule "roles/ericomeehan.eom"]
path = roles/ericomeehan.eom path = roles/ericomeehan.eom
url = https://git.eom.dev/ansible-role-eom url = git@git.eom.dev:DevOps/ansible-role-eom.git
[submodule "roles/ericomeehan.debian"] [submodule "roles/ericomeehan.debian"]
path = roles/ericomeehan.debian path = roles/ericomeehan.debian
url = https://git.eom.dev/ansible-role-debian url = git@git.eom.dev:DevOps/ansible-role-debian.git
[submodule "roles/ericomeehan.gondwana"] [submodule "roles/ericomeehan.gondwana"]
path = roles/ericomeehan.gondwana path = roles/ericomeehan.gondwana
url = https://git.eom.dev/ansible-role-gondwana url = git@git.eom.dev:eom/ansible-role-gondwana.git
[submodule "roles/ericomeehan.ericomeehan"] [submodule "roles/ericomeehan.ericomeehan"]
path = roles/ericomeehan.ericomeehan path = roles/ericomeehan.ericomeehan
url = https://git.eom.dev/ansible-role-ericomeehan url = git@git.eom.dev:eric/ansible-role-ericomeehan.git
[submodule "roles/ericomeehan.nvidia_driver"] [submodule "roles/ericomeehan.nvidia_driver"]
path = roles/ericomeehan.nvidia_driver path = roles/ericomeehan.nvidia_driver
url = https://git.eom.dev/ansible-role-nvidia-driver url = git@git.eom.dev:DevOps/ansible-role-nvidia-driver.git
[submodule "roles/ericomeehan.kraken-bot"] [submodule "roles/ericomeehan.kraken-bot"]
path = roles/ericomeehan.kraken-bot path = roles/ericomeehan.kraken-bot
url = https://git.eom.dev/ansible-role-kraken-bot url = git@git.eom.dev:kraken/ansible-role-kraken-bot.git
[submodule "roles/ericomeehan.qemu-network"]
path = roles/ericomeehan.qemu-network
url = https://git.eom.dev/ansible-role-qemu-network

9
alpha-cluster.yaml Normal file
View File

@ -0,0 +1,9 @@
---
# Playbook for alpha-cluster
- name: Deploy services to Alpha Cluster
hosts: alpha-control-plane
become: true
vars_files:
secrets.yaml
roles:
- role: ericomeehan.gondwanamc

View File

@ -1,13 +1,38 @@
--- ---
# Master playbook for eom.dev # Master playbook for eom.dev
- name: Initialize systems - name: Initialize servers
hosts: clusters hosts: servers
become: true become: true
roles: roles:
- role: ericomeehan.debian - role: ericomeehan.debian
vars:
is_new_host: true
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
vars:
nvidia_driver_debian_install_tesla_driver: true
when:
- nvidia_driver_needed == true
- role: ericomeehan.qemu-network
- name: Wait for virtual machines to boot
- name: Copy SSH ID to virtual machines
- name: Initialize virtual machines
hosts: vms
become: true
roles:
- role: ericomeehan.debian
vars:
is_new_host: true
- role: ericomeehan.ericomeehan - role: ericomeehan.ericomeehan
- name: Initialize cluster nodes - name: Initialize load balancers
hosts: load_balancers
become: true
- name: Initialize Kubernetes clusters
hosts: clusters hosts: clusters
become: true become: true
pre_tasks: pre_tasks:
@ -37,11 +62,8 @@
- role: geerlingguy.containerd - role: geerlingguy.containerd
- role: geerlingguy.kubernetes - role: geerlingguy.kubernetes
- role: geerlingguy.helm - role: geerlingguy.helm
when: kubernetes_role == 'control_plane' when:
- role: ericomeehan.nvidia_driver - kubernetes_role == 'control-plane'
vars:
- nvidia_driver_debian_install_tesla_driver: true
when: nvidia_driver_needed == true
- name: Deploy services - name: Deploy services
hosts: alpha-control-plane hosts: alpha-control-plane
@ -50,8 +72,4 @@
become: true become: true
roles: roles:
- role: ericomeehan.eom - role: ericomeehan.eom
vars:
target_namespace: prod
- role: ericomeehan.gondwana - role: ericomeehan.gondwana
vars:
target_namespace: prod

View File

@ -1,8 +1,5 @@
--- ---
# Playbook for deploying a Kubernetes cluster # Playbook for deploying a Kubernetes cluster
- name: Create network bridge
- name: Create virtual machines
- name: Prepare cluster environments - name: Prepare cluster environments
hosts: cluster_nodes hosts: cluster_nodes
become: true become: true

71
eom.dev.yaml Normal file
View File

@ -0,0 +1,71 @@
---
# Master playbook for eom.dev
- name: Initialize servers
hosts: servers
become: true
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- role: ericomeehan.qemu_network
- name: Initialize clusters
hosts: clusters
become: true
pre_tasks:
- name: Update sysctl configuration to enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Update sysctl configuration to enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- role: geerlingguy.kubernetes
- role: geerlingguy.helm
when: kubernetes_role == 'control_plane'
- name: Initialize gateway
hosts: gateway
become: true
roles:
- role: ericomeehan.gateway
- name: Initialize vpn
hosts: vpn
become: true
roles:
- role: ericomeehan.vpn
- name: Initialize nfs
hosts: nfs
become: true
roles:
- role: ericomeehan.nfs
- name: Deploy services
hosts: control-plane
vars_files:
- vars/secrets.yaml
become: true
roles:
- role: ericomeehan.eom
- role: ericomeehan.gondwana
- role: ericomeehan.kraken-bot

View File

@ -1,10 +1,8 @@
--- ---
- name: Deploy eom.dev one service at a time - name: Deploy eom.dev one service at a time
hosts: alpha-control-plane hosts: imac
become: true become: true
vars:
target_namespace: prod
vars_files: vars_files:
- ../secrets.yaml - ../secrets.yaml
roles: roles:
- role: ericomeehan.eom - role: ericomeehan.ericomeehan

13
files/01_debian_cloud.cfg Normal file
View File

@ -0,0 +1,13 @@
apt:
generate_mirrorlists: true
system_info:
default_user:
name: debian
sudo: ALL=(ALL) NOPASSWD:ALL
shell: /bin/bash
lock_passwd: True
gecos: Debian
groups: [adm, audio, cdrom, dialout, dip, floppy, plugdev, sudo, video]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash

71
files/05_logging.cfg Normal file
View File

@ -0,0 +1,71 @@
## This yaml formated config file handles setting
## logger information. The values that are necessary to be set
## are seen at the bottom. The top '_log' are only used to remove
## redundency in a syslog and fallback-to-file case.
##
## The 'log_cfgs' entry defines a list of logger configs
## Each entry in the list is tried, and the first one that
## works is used. If a log_cfg list entry is an array, it will
## be joined with '\n'.
_log:
- &log_base |
[loggers]
keys=root,cloudinit
[handlers]
keys=consoleHandler,cloudLogHandler
[formatters]
keys=simpleFormatter,arg0Formatter
[logger_root]
level=DEBUG
handlers=consoleHandler,cloudLogHandler
[logger_cloudinit]
level=DEBUG
qualname=cloudinit
handlers=
propagate=1
[handler_consoleHandler]
class=StreamHandler
level=WARNING
formatter=arg0Formatter
args=(sys.stderr,)
[formatter_arg0Formatter]
format=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s
[formatter_simpleFormatter]
format=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s
- &log_file |
[handler_cloudLogHandler]
class=FileHandler
level=DEBUG
formatter=arg0Formatter
args=('/var/log/cloud-init.log', 'a', 'UTF-8')
- &log_syslog |
[handler_cloudLogHandler]
class=handlers.SysLogHandler
level=DEBUG
formatter=simpleFormatter
args=("/dev/log", handlers.SysLogHandler.LOG_USER)
log_cfgs:
# Array entries in this list will be joined into a string
# that defines the configuration.
#
# If you want logs to go to syslog, uncomment the following line.
# - [ *log_base, *log_syslog ]
#
# The default behavior is to just log to a file.
# This mechanism that does not depend on a system service to operate.
- [ *log_base, *log_file ]
# A file path can also be used.
# - /etc/log.conf
# This tells cloud-init to redirect its stdout and stderr to
# 'tee -a /var/log/cloud-init-output.log' so the user can see output
# there without needing to look on the console.
output: {all: '| tee -a /var/log/cloud-init-output.log'}

View File

@ -1,3 +1,4 @@
--- ---
# Global defaults # Global defaults
docker_needed: false
nvidia_driver_needed: false nvidia_driver_needed: false

5
group_vars/alpha.yaml Normal file
View File

@ -0,0 +1,5 @@
cluster_name: alpha
metallb_addresses:
- 192.168.1.160-192.168.1.191
metallb_asn: 6501
letsencrypt_url: https://acme-v02.api.letsencrypt.org/directory

5
group_vars/beta.yaml Normal file
View File

@ -0,0 +1,5 @@
cluster_name: beta
metallb_addresses:
- 192.168.1.192-192.168.1.223
metallb_asn: 6502
letsencrypt_url: https://acme-staging-v02.api.letsencrypt.org/directory

View File

@ -1,3 +1,7 @@
--- ---
# Group vars for clusters # Group vars for clusters
containerd_config_cgroup_driver_systemd: true containerd_config_cgroup_driver_systemd: true
kubernetes_version: '1.31'
kubernetes_version_rehl_package: '1.31.1'
kubernetes_allow_pods_on_control_plane: false
kubernetes_join_command_extra_opts: "--ignore-preflight-errors=Port-10250"

View File

@ -1,6 +1,7 @@
--- ---
# Group vars for Kubernetes control plane nodes # Group vars for Kubernetes control plane nodes
kubernetes_role: control_plane kubernetes_role: control_plane
helm_version: 'v3.16.3'
open_ports: open_ports:
- interface: any - interface: any
protocol: tcp protocol: tcp
@ -20,3 +21,6 @@ open_ports:
- interface: any - interface: any
protocol: tcp protocol: tcp
port: 10259 port: 10259
memory:
unit: MiB
value: 8192

5
group_vars/gamma.yaml Normal file
View File

@ -0,0 +1,5 @@
cluster_name: gamma
metallb_addresses:
- 192.168.1.96-224.168.1.255
metallb_asn: 6503
letsencrypt_url: https://acme-staging-v02.api.letsencrypt.org/directory

View File

@ -0,0 +1,6 @@
---
# Group vars for hypervisors
open_ports:
- interface: any
protocol: tcp
port: 22

108
group_vars/vms.yaml Normal file
View File

@ -0,0 +1,108 @@
---
# Group vars for virtual machines
type: kvm
name: my_vm_name
instance_id: "iid-{{ name }}"
packages:
- nfs-common
- openssh-server
- prometheus-node-exporter
- qemu-guest-agent
memory:
unit: MiB
value: 4096
vcpu:
placement: static
value: 1
os:
type:
arch: x86_64
machine: pc-q35-5.2
value: hvm
boot:
dev: hd
cpu:
mode: host-model
check: none
emulator: /usr/bin/qemu-system-x86_64
disks:
- type: file
device: disk
driver:
name: qemu
type: qcow2
source:
file: /var/lib/libvirt/images/{{ name }}.qcow2
target:
dev: vda
bus: virtio
- type: file
device: cdrom
driver:
name: qemu
type: raw
source:
file: /var/lib/libvirt/images/{{ name }}.iso
target:
dev: sda
bus: sata
interfaces:
- type: network
source:
network: wan
model:
type: virtio
channels:
- type: unix
target:
type: virtio
name: org.qemu.guest_agent.0
address:
type: virtio-serial
controller: 0
bus: 0
port: 1
inputs:
- type: tablet
bus: usb
address:
type: usb
bus: 0
port: 1
- type: mouse
bus: ps2
- type: keyboard
bus: ps2
graphics:
type: spice
autoport: 'yes'
listen:
type: address
image:
compression: 'off'
video:
model:
type: qxl
ram: 65536
vram: 65536
vgamem: 16384
heads: 1
primary: yes
memballoon:
model: virtio
rng:
model: virtio
backend:
model: random
value: /dev/urandom
devices:
emulator: "{{ emulator }}"
disks: "{{ disks }}"
filesystems: "{{ filesystems }}"
interfaces: "{{ interfaces }}"
channels: "{{ channels }}"
inputs: "{{ inputs }}"
graphics: "{{ graphics }}"
video: "{{ video }}"
memballoon: "{{ memballoon }}"
rng: "{{ rng }}"

View File

@ -14,3 +14,6 @@ open_ports:
- interface: any - interface: any
protocol: tcp protocol: tcp
port: 30000-32767 port: 30000-32767
memory:
unit: MiB
value: 16384

View File

@ -1,2 +1,6 @@
# Vars file for workstations # Vars file for workstations
open_ports: "" docker_needed: true
open_ports:
- interface: any
protocol: tcp
port: 22

43
helm.yaml Normal file
View File

@ -0,0 +1,43 @@
---
# Playbook for deploying helm charts
- name: Deploy WordPress using Helm
hosts: alpha-control-plane
become: true
vars:
wordpress_chart_version: "9.0.1"
wordpress_release_name: "wordpress"
wordpress_database_password: "your_database_password"
wordpress_service_type: "ClusterIP"
wordpress_service_port: 80
wordpress_persistence_enabled: true
wordpress_persistence_storageClass: "standard"
wordpress_persistence_size: "20Gi"
tasks:
- name: Add Helm repository for WordPress
helm_repo:
name: bitnami
state: present
url: https://charts.bitnami.com/bitnami
- name: Install WordPress Helm chart
helm_chart:
name: "{{ wordpress_release_name }}"
chart: "wordpress"
repo: "bitnami"
version: "{{ wordpress_chart_version }}"
values:
global:
imageRegistry: "docker.io"
service:
type: "{{ wordpress_service_type }}"
port: "{{ wordpress_service_port }}"
wordpressUsername: "your_wordpress_username"
wordpressPassword: "your_wordpress_password"
wordpressEmail: "your_wordpress_email"
wordpressDatabase:
password: "{{ wordpress_database_password }}"
persistence:
enabled: "{{ wordpress_persistence_enabled }}"
storageClass: "{{ wordpress_persistence_storageClass }}"
size: "{{ wordpress_persistence_size }}"

View File

@ -0,0 +1 @@
name: alpha-control-plane

View File

@ -1,2 +0,0 @@
---
# Host vars for alpha-control-plane

View File

@ -0,0 +1 @@
name: alpha-worker-0

View File

@ -1,4 +0,0 @@
---
# Host vars for alpha-worker-0
nvidia_driver_needed: true
nvidia_driver_tesla: true

View File

@ -0,0 +1 @@
name: alpha-worker-1

View File

@ -0,0 +1 @@
name: alpha-worker-10

View File

@ -0,0 +1 @@
name: alpha-worker-11

View File

@ -0,0 +1 @@
name: alpha-worker-12

View File

@ -0,0 +1 @@
name: alpha-worker-2

View File

@ -0,0 +1 @@
name: alpha-worker-3

View File

@ -0,0 +1 @@
name: alpha-worker-4

View File

@ -0,0 +1 @@
name: alpha-worker-5

View File

@ -0,0 +1 @@
name: alpha-worker-6

View File

@ -0,0 +1 @@
name: alpha-worker-7

View File

@ -0,0 +1 @@
name: alpha-worker-8

View File

@ -0,0 +1 @@
name: alpha-worker-9

0
host_vars/alpha.yaml Normal file
View File

View File

@ -0,0 +1 @@
name: beta-control-plane

View File

@ -0,0 +1 @@
name: beta-worker-0

View File

@ -0,0 +1 @@
name: beta-worker-1

View File

@ -0,0 +1 @@
name: beta-worker-2

View File

@ -0,0 +1 @@
name: beta-worker-3

View File

@ -0,0 +1 @@
name: gamma-control-plane

View File

@ -1,16 +1,3 @@
# Host vars for inspiron-3670 # Host vars for inspiron-3670
docker_needed: false
nvidia_driver_needed: true nvidia_driver_needed: true
packages:
- curl
- davfs2
- gimp
- git
- gphoto2
- latexml
- neovim
- passwordsafe
- texlive-full
- thunderbird
- tmux
- torbrowser-launcher
- w3m

View File

@ -1,3 +1,3 @@
# Host vars for latitude-7230 # Host vars for latitude-7230
ansible_connection: local ansible_connection: local
open_ports: ""

View File

@ -0,0 +1,35 @@
name: network-file-system
nfs_exports: ["/data *(rw,sync,no_root_squash)"]
mounts:
- [ vdb, /data ]
disks:
- type: file
device: disk
driver:
name: qemu
type: qcow2
source:
file: /var/lib/libvirt/images/{{ name }}.qcow2
target:
dev: vda
bus: virtio
- type: block
device: disk
driver:
name: qemu
type: raw
source:
dev: /dev/poweredge-t640-vg1/store-0
target:
dev: vdb
bus: virtio
- type: file
device: cdrom
driver:
name: qemu
type: raw
source:
file: /var/lib/libvirt/images/{{ name }}.iso
target:
dev: sda
bus: sata

View File

@ -0,0 +1,15 @@
# Host vars for poweredge-r350
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno8303
- name: lan
forward:
mode: bridge
bridge:
name: lan
dev: eno8403
libvirt_guests: ""

View File

@ -0,0 +1,32 @@
---
# Host vars for poweredge-t640
libvirt_networks:
- name: wan
forward:
mode: bridge
bridge:
name: wan
dev: eno1np0
- name: lan
forward:
mode: bridge
bridge:
name: lan
dev: eno2np1
libvirt_guests:
- alpha-control-plane
- alpha-worker-0
- alpha-worker-1
- alpha-worker-2
- alpha-worker-3
- alpha-worker-4
- alpha-worker-5
- alpha-worker-6
- alpha-worker-7
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
nfs_exports: ["/data *(rw,sync,no_root_squash)"]
nvidia_driver_needed: true

View File

@ -1,17 +0,0 @@
---
all:
children:
workstations:
hosts:
latitude-7230:
ansible-host: 192.168.1.123
inspiron-3670:
ansible-host: 192.168.1.210
imac:
ansible-host: 192.168.1.139
servers:
children:
poweredge-r350:
ansible-host: 192.168.1.137
poweredge-t640:
ansible-host: 192.168.1.138

52
inventories/eom.dev.yml Normal file
View File

@ -0,0 +1,52 @@
---
all:
children:
workstations:
hosts:
latitude-7230:
inspiron-3670:
imac:
hypervisors:
hosts:
poweredge-r350:
poweredge-t640:
vms:
children:
clusters:
children:
control_planes:
hosts:
alpha-control-plane:
workers:
hosts:
alpha-worker-0:
alpha-worker-1:
alpha-worker-2:
alpha-worker-3:
alpha-worker-4:
alpha-worker-5:
alpha-worker-6:
alpha-worker-7:
alpha-worker-8:
alpha-worker-9:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:
alpha:
hosts:
alpha-control-plane:
alpha-worker-0:
alpha-worker-1:
alpha-worker-2:
alpha-worker-3:
alpha-worker-4:
alpha-worker-5:
alpha-worker-6:
alpha-worker-7:
alpha-worker-8:
alpha-worker-9:
alpha-worker-10:
alpha-worker-11:
alpha-worker-12:

60
kubernetes.yaml Normal file
View File

@ -0,0 +1,60 @@
---
# Playbook for Kubernetes
- name: Initialize Kubernetes clusters
hosts: clusters
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Beta Cluster
hosts: beta
become: true
roles:
- role: geerlingguy.kubernetes
- name: Gamma Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Install Helm
hosts: control_planes
become: true
roles:
- role: geerlingguy.helm

401
main.yaml Normal file
View File

@ -0,0 +1,401 @@
---
# Playbook for eom.dev
- name: Initialize workstations
hosts: workstations
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- role: geerlingguy.docker
when: docker_needed == true
- name: Initialize hypervisors
hosts: hypervisors
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- role: ericomeehan.nvidia_driver
when: nvidia_driver_needed == true
- name: Initialize Network File Systems
hosts: poweredge-t640
become: true
roles:
- role: geerlingguy.nfs
tasks:
- name: Create NFS directories
file:
path: "{{ item }}"
state: directory
loop:
- /data/alpha
- /data/beta
- /data/gamma
- /data/eric
- name: Prepare virtualization environments
hosts: hypervisors
become: true
tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- prometheus-libvirt-exporter
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Copy interfaces template
template:
src: interfaces.j2
dest: /etc/network/interfaces
- name: Reload sysctl configuration
command: sysctl --system
- name: Reload network service
service:
name: networking
state: restarted
- name: Deploy virtual machines
hosts: hypervisors
become: true
vars_files:
- ../secrets.yaml
tasks:
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts: vms
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- name: Initialize Kubernetes clusters
hosts: clusters
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts: alpha
become: true
roles:
- role: geerlingguy.kubernetes
- name: Beta Cluster
hosts: beta
become: true
roles:
- role: geerlingguy.kubernetes
- name: Gamma Cluster
hosts: gamma
become: true
roles:
- role: geerlingguy.kubernetes
- name: Install Helm
hosts: control_planes
become: true
roles:
- role: geerlingguy.helm
- name: Deploy base Kubernetes resources
hosts: control_planes
become: true
tasks:
- name: Add NFS Provisioner repository
kubernetes.core.helm_repository:
name: nfs-subdir-external-provisioner
repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
- name: Add MetalLB repository
kubernetes.core.helm_repository:
name: metallb
repo_url: https://metallb.github.io/metallb
- name: Add Ingress Nginx repository
kubernetes.core.helm_repository:
name: ingress-nginx
repo_url: https://kubernetes.github.io/ingress-nginx
- name: Add cert-manager repository
kubernetes.core.helm_repository:
name: jetstack
repo_url: https://charts.jetstack.io
- name: Add bitnami repository
kubernetes.core.helm_repository:
name: bitnami
repo_url: https://charts.bitnami.com/bitnami
- name: Update Helm repos
command: helm repo update
- name: Deploy NFS Provisioner
kubernetes.core.helm:
name: nfs-subdir-external-provisioner
chart_ref: nfs-subdir-external-provisioner/nfs-subdir-external-provisioner
release_namespace: nfs-provisioner
create_namespace: true
values:
nfs:
server: poweredge-t640
path: "/data/{{ cluster_name }}"
storageClass:
defaultClass: true
- name: Deploy MetalLB
kubernetes.core.helm:
name: metallb
chart_ref: metallb/metallb
release_namespace: metallb
create_namespace: true
- name: Wait for MetalLB to initialize
wait_for:
timeout: 120
- name: Deploy BGP Peer
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta2
kind: BGPPeer
metadata:
name: bgp-peer
namespace: metallb
spec:
myASN: "{{ metallb_asn }}"
peerASN: 6500
peerAddress: 192.168.1.1
- name: Deploy address pool
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: pool0
namespace: metallb
spec:
addresses: "{{ metallb_addresses }}"
- name: Deploy BGP Advertisement
k8s:
state: present
definition:
apiVersion: metallb.io/v1beta1
kind: BGPAdvertisement
metadata:
name: bgp-advertisement
namespace: metallb
spec:
ipAddressPools:
- pool0
- name: Deploy Ingress Nginx
kubernetes.core.helm:
name: ingress-nginx
chart_ref: ingress-nginx/ingress-nginx
release_namespace: ingress-nginx
create_namespace: true
values:
controller:
annotations:
acme.cert-manager.io/http01-edit-in-place: 'true'
extraArgs:
update-status: "false"
- name: Deploy cert-manager
kubernetes.core.helm:
name: cert-manager
chart_ref: jetstack/cert-manager
release_namespace: cert-manager
create_namespace: true
values:
crds:
enabled: true
- name: Deploy Cluster Issuer
k8s:
state: present
definition:
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: ca-issuer
spec:
acme:
server: "{{ letsencrypt_url }}"
email: eric@eom.dev
privateKeySecretRef:
name: letsencrypt-issuer-key
solvers:
- http01:
ingress:
ingressClassName: nginx
- name: Port forward HTTP(S) to Ingress Controllers
hosts: localhost
tasks:
- name: Wait for manual tasks
pause:
prompt: "Press Enter to continue..."

5
nfs.yaml Normal file
View File

@ -0,0 +1,5 @@
- name: Configure NFS
hosts: network-file-system
become: true
roles:
- role: geerlingguy.nfs

110
poweredge-r350.yaml Normal file
View File

@ -0,0 +1,110 @@
---
# Playbook for poweredge-r350
# This is being used to test vm deployments
- name: Deploy virtual machines
hosts: poweredge-r350
become: true
vars_files:
- ../secrets.yaml
pre_tasks:
- name: Install packages for virtualization
apt:
update_cache: yes
name:
- bridge-utils
- genisoimage
- qemu-utils
- qemu-system-x86
- libvirt-daemon-system
- python3-libvirt
- python3-lxml
state: present
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Define libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: define
xml: "{{ lookup('template', 'libvirt-network.xml.j2') }}"
loop: "{{ libvirt_networks }}"
- name: Create libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
command: create
loop: "{{ libvirt_networks }}"
- name: Autostart libvirt networks
community.libvirt.virt_net:
name: "{{ item.name }}"
autostart: true
loop: "{{ libvirt_networks }}"
- name: Download base image for guests
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory for guests
file:
path: "/var/lib/libvirt/configs/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config templates for guests
template:
src: eom_cloud.cfg.j2
dest: "/var/lib/libvirt/configs/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config templates for guests
template:
src: meta-data.j2
dest: "/var/lib/libvirt/configs/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso for guests
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /var/lib/libvirt/configs/{{ item }}/user-data /var/lib/libvirt/configs/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Copy logging config for guests
copy:
src: 05_logging.cfg
dest: "/var/lib/libvirt/configs/{{ item }}/05_logging.cfg"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"

5
poweredge-t640.yaml Normal file
View File

@ -0,0 +1,5 @@
- name: Install nvidia
hosts: poweredge-t640
become: true
roles:
- role: ericomeehan.nvidia_driver

12
qemu-base.yaml Normal file
View File

@ -0,0 +1,12 @@
---
# Create base image for QEMU
- name: Install dependencies
- name: Download Debian installation image
- name: Create virtual disk for image
- name: Boot installation media with preseed
- name: Wait for installation completion
- name: Initialize base image
roles:
- role: ericomeehan.debian
- role: ericomeehan.ericomeehan
- name: Shut down base image

20
qemu.yaml Normal file
View File

@ -0,0 +1,20 @@
---
- name: Test QEMU
hosts: localhost
become: true
tasks:
- name: Deploy test vm
community.libvirt.virt:
name: test
state: running
persistent: true
memory: 4096
vcpus: 1
os_type: linux
disks:
- name: test
size: 8
format: qcow2
cdrom:
- url: https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.7.0-amd64-netinst.iso

154
reassign.yaml Normal file
View File

@ -0,0 +1,154 @@
- name: Deploy virtual machines
hosts: poweredge-t640
become: true
vars_files:
- ../secrets.yaml
vars:
libvirt_guests:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
tasks:
- name: Download base image
get_url:
url: https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
dest: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
force: true
- name: Copy base image
copy:
src: /var/lib/libvirt/images/debian-12-generic-amd64.qcow2
remote_src: true
dest: "/var/lib/libvirt/images/{{ item }}.qcow2"
force: true
loop: "{{ libvirt_guests }}"
- name: Resize images
command: "qemu-img resize -f qcow2 /var/lib/libvirt/images/{{ item }}.qcow2 16G"
loop: "{{ libvirt_guests }}"
- name: Create cloud-config directory
file:
path: "/tmp/{{ item }}"
state: directory
loop: "{{ libvirt_guests }}"
- name: Copy cloud-config user-data template
template:
src: user-data.j2
dest: "/tmp/{{ domain.name }}/user-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Copy cloud-config meta-data template
template:
src: meta-data.j2
dest: "/tmp/{{ domain.name }}/meta-data"
force: true
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Generate iso
command: "genisoimage -output /var/lib/libvirt/images/{{ item }}.iso -volid cidata -joliet -rock /tmp/{{ item }}/user-data /tmp/{{ item }}/meta-data"
loop: "{{ libvirt_guests }}"
- name: Define libvirt virtual machine
community.libvirt.virt:
command: define
xml: "{{ lookup('template', 'libvirt-vm.xml.j2') }}"
loop: "{{ libvirt_guests }}"
vars:
domain: "{{ hostvars[item] }}"
- name: Create libvirt virtual machine
community.libvirt.virt:
name: "{{ item }}"
command: create
loop: "{{ libvirt_guests }}"
- name: Autostart libvirt virtual machines
community.libvirt.virt:
name: "{{ item }}"
autostart: true
loop: "{{ libvirt_guests }}"
- name: Wait for guest initialization
wait_for:
timeout: 300
- name: Reset libvirt virtual machines for filesystem resize
command: "virsh reset {{ item }}"
loop: "{{ libvirt_guests }}"
- name: Wait for manual tasks
hosts: localhost
tasks:
- name: Trust SSH identities
pause:
prompt: "Press Enter to continue..."
- name: Initialize virtual machines
hosts:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
become: true
vars_files:
- ../secrets.yaml
roles:
- role: ericomeehan.ericomeehan
- name: Initialize Kubernetes clusters
hosts:
- alpha-worker-8
- alpha-worker-9
- alpha-worker-10
- alpha-worker-11
- alpha-worker-12
become: true
pre_tasks:
- name: Enable IPv4 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv4.ip_forward = 1'
state: present
- name: Enable IPv6 packet forwarding
lineinfile:
path: /etc/sysctl.conf
line: 'net.ipv6.conf.all.forwarding = 1'
state: present
- name: Reload sysctl configuration
command: sysctl --system
- name: Enable br_netfilter kernel module
command: modprobe br_netfilter
- name: Add the module to a configuration file for persistence
lineinfile:
path: /etc/modules-load.d/modules.conf
line: "br_netfilter"
- name: Install kubernetes library
apt:
name: python3-kubernetes
state: present
roles:
- role: geerlingguy.containerd
- name: Alpha Cluster
hosts:
- alpha
become: true
roles:
- role: geerlingguy.kubernetes

@ -1 +1 @@
Subproject commit 0236cddce716b4f7c8275648365ed753a7f1b4c7 Subproject commit 62b06fcfad3a3c082211efa4237c205f7d83cea7

@ -1 +1 @@
Subproject commit 2d395340236352edaeb15cea56a9e65cc740453a Subproject commit b599bbae3d906edba175506d9455d55239de9ac0

@ -1 +1 @@
Subproject commit ba6296a2ec7fdbdea8d9676e6423d326b4c3a4d0 Subproject commit 4dc7f7917edb012b15268a4e94c81b55592130a5

@ -1 +1 @@
Subproject commit 0ca51b452a430820c15bb0a8424c6d65eb7db349 Subproject commit c1bed4ec3766ae456ca7e55ec2699e640d76a884

@ -1 +1 @@
Subproject commit 8946eefeb5442761edef6853665074e1306391a0 Subproject commit 7a3c04febc0ab5d6370da362ab80d862bfc3dd39

View File

@ -1,38 +0,0 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -1,5 +0,0 @@
---
# defaults file for ericomeehan.nvidia_driver_debian
nvidia_driver_skip_reboot: false
nvidia_driver_source: nvidia
nvidia_driver_tesla: false

View File

@ -1,2 +0,0 @@
---
# handlers file for ericomeehan.nvidia_driver_debian

View File

@ -1,34 +0,0 @@
galaxy_info:
author: your name
description: your role description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Choose a valid license ID from https://spdx.org - some suggested licenses:
# - BSD-3-Clause (default)
# - MIT
# - GPL-2.0-or-later
# - GPL-3.0-only
# - Apache-2.0
# - CC-BY-4.0
license: license (GPL-2.0-or-later, MIT, etc)
min_ansible_version: 2.1
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -1,9 +0,0 @@
---
# tasks file for ericomeehan.nvidia_driver_debian
- name: Install Nvidia proprietary drivers
include_tasks: nvidia-proprietary.yml
when: nvidia_driver_source == "nvidia"
- name: Install nouveau and mesa drivers
include_tasks: nouveau-mesa.yml
when: nvidia_driver_source == "nouveau"

View File

@ -1,18 +0,0 @@
---
# tasks file for nouveau-mesa
- name: Update apt
become: yes
apt:
update_cache: yes
- name: Install nouveau and mesa
apt:
state: present
name:
- xserver-xorg-video-nouveau
- mesa-utils
register: install_driver
- name: Reboot after driver install
reboot:
when: install_driver.changed and not nvidia_driver_skip_reboot

View File

@ -1,38 +0,0 @@
---
# tasks file for nvidia-proprietary
- name: Add contrib & non-free repository
replace:
dest: /etc/apt/sources.list
regexp: '^(deb(?!.* contrib).*)'
replace: '\1 contrib non-free'
- name: Update apt
become: yes
apt:
update_cache: yes
- name: Install Linux headers
apt:
name: linux-headers-{{ ansible_kernel }}
state: present
- name: Install Nvidia and CUDA drivers
apt:
state: present
name:
- nvidia-driver
- firmware-misc-nonfree
- nvidia-cuda-dev
- nvidia-cuda-toolkit
register: install_driver
- name: Install tesla drivers
apt:
state: present
name:
- nvidia-tesla-470-driver
when: nvidia_driver_tesla == true
- name: Reboot after driver install
reboot:
when: install_driver.changed and not nvidia_driver_skip_reboot

View File

@ -1,61 +0,0 @@
---
# tasks file for nvidia-proprietary
- name: Add contrib & non-free repository
replace:
dest: /etc/apt/sources.list
regexp: '^(deb(?!.* contrib).*)'
replace: '\1 contrib non-free'
- name: Install Linux headers
apt:
name: linux-headers-{{ ansible_kernel }}
state: present
- name: Download Nvidia driver local repo
get_url:
url: https://us.download.nvidia.com/tesla/550.90.07/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
dest: /tmp/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
- name: Install Nvidia driver local repo
apt:
deb: /tmp/nvidia-driver-local-repo-debian12-550.90.07_1.0-1_amd64.deb
state: present
- name: Add Nvidia driver local repo keyring
copy:
remote_src: true
src: /var/nvidia-driver-local-repo-debian12-550.90.07/nvidia-driver-local-3FEEC8FF-keyring.gpg
dest: /usr/share/keyrings/nvidia-driver-local-3FEEC8FF-keyring.gpg
- name: Download CUDA repo
get_url:
url: https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
dest: /tmp/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
- name: Install CUDA repo
apt:
deb: /tmp/cuda-repo-debian12-12-4-local_12.4.1-550.54.15-1_amd64.deb
state: present
- name: Add CUDA repo keyring
copy:
remote_src: true
src: /var/cuda-repo-debian12-12-4-local/cuda-C5AA6424-keyring.gpg
dest: /usr/share/keyrings/cuda-C5AA6424-keyring.gpg
- name: Update package list
apt:
update_cache: yes
- name: Install Nvidia driver and CUDA toolkit
apt:
name:
- firmware-misc-nonfree
- cuda-toolkit-12-4
- nvidia-driver=550.90.07-1
state: present
register: install_driver
- name: Reboot after driver install
reboot:
when: install_driver.changed and not nvidia_driver_skip_reboot

View File

@ -1,2 +0,0 @@
localhost

View File

@ -1,5 +0,0 @@
---
- hosts: localhost
remote_user: root
roles:
- ericomeehan.nvidia_driver_debian

View File

@ -1,2 +0,0 @@
---
# vars file for ericomeehan.nvidia_driver_debian

View File

@ -0,0 +1,3 @@
skip_list:
- 'yaml'
- 'role-name'

View File

@ -0,0 +1,4 @@
# These are supported funding model platforms
---
github: geerlingguy
patreon: geerlingguy

View File

@ -0,0 +1,66 @@
---
name: CI
'on':
pull_request:
push:
branches:
- master
schedule:
- cron: "30 1 * * 3"
defaults:
run:
working-directory: 'geerlingguy.nfs'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.nfs'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install yamllint
- name: Lint code.
run: |
yamllint .
molecule:
name: Molecule
runs-on: ubuntu-latest
strategy:
matrix:
distro:
- rockylinux9
- ubuntu2004
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.nfs'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install test dependencies.
run: pip3 install ansible molecule molecule-plugins[docker] docker
- name: Run Molecule tests.
run: molecule test
env:
PY_COLORS: '1'
ANSIBLE_FORCE_COLOR: '1'
MOLECULE_DISTRO: ${{ matrix.distro }}

View File

@ -0,0 +1,40 @@
---
# This workflow requires a GALAXY_API_KEY secret present in the GitHub
# repository or organization.
#
# See: https://github.com/marketplace/actions/publish-ansible-role-to-galaxy
# See: https://github.com/ansible/galaxy/issues/46
name: Release
'on':
push:
tags:
- '*'
defaults:
run:
working-directory: 'geerlingguy.nfs'
jobs:
release:
name: Release
runs-on: ubuntu-latest
steps:
- name: Check out the codebase.
uses: actions/checkout@v4
with:
path: 'geerlingguy.nfs'
- name: Set up Python 3.
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install Ansible.
run: pip3 install ansible-core
- name: Trigger a new import on Galaxy.
run: >-
ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }}
$(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2)

View File

@ -0,0 +1,34 @@
---
name: Close inactive issues
'on':
schedule:
- cron: "55 19 * * 6" # semi-random time
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v8
with:
days-before-stale: 120
days-before-close: 60
exempt-issue-labels: bug,pinned,security,planned
exempt-pr-labels: bug,pinned,security,planned
stale-issue-label: "stale"
stale-pr-label: "stale"
stale-issue-message: |
This issue has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-issue-message: |
This issue has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
stale-pr-message: |
This pr has been marked 'stale' due to lack of recent activity. If there is no further activity, the issue will be closed in another 30 days. Thank you for your contribution!
Please read [this blog post](https://www.jeffgeerling.com/blog/2020/enabling-stale-issue-bot-on-my-github-repositories) to see the reasons why I mark issues as stale.
close-pr-message: |
This pr has been closed due to inactivity. If you feel this is in error, please reopen the issue or file a new issue with the relevant details.
repo-token: ${{ secrets.GITHUB_TOKEN }}

5
roles/geerlingguy.nfs/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
*.retry
*/__pycache__
*.pyc
.cache

View File

@ -0,0 +1,10 @@
---
extends: default
rules:
line-length:
max: 120
level: warning
ignore: |
.github/workflows/stale.yml

View File

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2017 Jeff Geerling
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,40 @@
# Ansible Role: NFS
[![CI](https://github.com/geerlingguy/ansible-role-nfs/workflows/CI/badge.svg?event=push)](https://github.com/geerlingguy/ansible-role-nfs/actions?query=workflow%3ACI)
Installs NFS utilities on RedHat/CentOS or Debian/Ubuntu.
## Requirements
None.
## Role Variables
Available variables are listed below, along with default values (see `defaults/main.yml`):
nfs_exports: []
A list of exports which will be placed in the `/etc/exports` file. See Ubuntu's simple [Network File System (NFS)](https://ubuntu.com/server/docs/service-nfs) guide for more info and examples. (Simple example: `nfs_exports: [ "/home/public *(rw,sync,no_root_squash)" ]`).
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true
(RedHat/CentOS/Fedora only) The state of the `rpcbind` service, and whether it should be enabled at system boot.
## Dependencies
None.
## Example Playbook
- hosts: db-servers
roles:
- { role: geerlingguy.nfs }
## License
MIT / BSD
## Author Information
This role was created in 2014 by [Jeff Geerling](https://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).

View File

@ -0,0 +1,5 @@
---
nfs_exports: []
nfs_rpcbind_state: started
nfs_rpcbind_enabled: true

View File

@ -0,0 +1,3 @@
---
- name: reload nfs
command: 'exportfs -ra'

View File

@ -0,0 +1,2 @@
install_date: 'Mon 11 Nov 2024 01:39:36 AM '
version: 2.1.0

View File

@ -0,0 +1,27 @@
---
dependencies: []
galaxy_info:
role_name: nfs
author: geerlingguy
description: NFS installation for Linux.
company: "Midwestern Mac, LLC"
license: "license (BSD, MIT)"
min_ansible_version: 2.10
platforms:
- name: Fedora
versions:
- all
- name: Debian
versions:
- all
- name: Ubuntu
versions:
- all
galaxy_tags:
- system
- nfs
- filesystem
- share
- nfsv4
- efs

View File

@ -0,0 +1,13 @@
---
- name: Converge
hosts: all
become: true
pre_tasks:
- name: Update apt cache.
apt: update_cache=yes cache_valid_time=600
when: ansible_os_family == 'Debian'
changed_when: false
roles:
- role: geerlingguy.nfs

View File

@ -0,0 +1,21 @@
---
role_name_check: 1
dependency:
name: galaxy
options:
ignore-errors: true
driver:
name: docker
platforms:
- name: instance
image: "geerlingguy/docker-${MOLECULE_DISTRO:-rockylinux9}-ansible:latest"
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true
pre_build_image: true
provisioner:
name: ansible
playbooks:
converge: ${MOLECULE_PLAYBOOK:-converge.yml}

View File

@ -0,0 +1,36 @@
---
# Include variables and define needed variables.
- name: Include OS-specific variables.
include_vars: "{{ ansible_os_family }}.yml"
- name: Include overrides specific to Fedora.
include_vars: Fedora.yml
when:
- ansible_os_family == 'RedHat'
- ansible_distribution == "Fedora"
# Setup/install tasks.
- include_tasks: setup-RedHat.yml
when: ansible_os_family == 'RedHat'
- include_tasks: setup-Debian.yml
when: ansible_os_family == 'Debian'
- name: Ensure directories to export exist
file: # noqa 208
path: "{{ item }}"
state: directory
with_items: "{{ nfs_exports | map('split') | map('first') | unique }}"
- name: Copy exports file.
template:
src: exports.j2
dest: /etc/exports
owner: root
group: root
mode: 0644
notify: reload nfs
- name: Ensure nfs is running.
service: "name={{ nfs_server_daemon }} state=started enabled=yes"
when: nfs_exports|length

View File

@ -0,0 +1,7 @@
---
- name: Ensure NFS utilities are installed.
apt:
name:
- nfs-common
- nfs-kernel-server
state: present

View File

@ -0,0 +1,9 @@
---
- name: Ensure NFS utilities are installed.
package: name=nfs-utils state=present
- name: Ensure rpcbind is running as configured.
service:
name: rpcbind
state: "{{ nfs_rpcbind_state }}"
enabled: "{{ nfs_rpcbind_enabled }}"

View File

@ -0,0 +1,13 @@
# /etc/exports: the access control list for filesystems which may be exported
# to NFS clients. See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
#
{% for export in nfs_exports %}
{{ export }}
{% endfor %}

View File

@ -0,0 +1,2 @@
---
nfs_server_daemon: nfs-kernel-server

View File

@ -0,0 +1,2 @@
---
nfs_server_daemon: nfs-server

View File

@ -0,0 +1,2 @@
---
nfs_server_daemon: nfs-server

21
templates/interfaces.j2 Normal file
View File

@ -0,0 +1,21 @@
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
source /etc/network/interfaces.d/*
# The loopback network interface
auto lo
iface lo inet loopback
# Libvirt networks
{% for network in libvirt_networks %}
{% if network.bridge.dev is defined %}
auto {{ network.bridge.dev }}
iface {{ network.bridge.dev }} inet manual
auto {{ network.bridge.name }}
iface {{ network.bridge.name }} inet dhcp
bridge_ports {{ network.bridge.dev }}
{% endif %}
{% endfor %}

View File

@ -0,0 +1,14 @@
<network>
<name>{{ item.name }}</name>
<forward mode='{{ item.forward.mode }}'/>
{% if item.bridge.dev is defined %}
<bridge name='{{ item.bridge.name }}' dev='{{ item.bridge.dev }}'/>
{% else %}
<bridge name='{{ item.bridge.name }}'/>
<ip address='{{ item.ip.address }}' netmask='{{ item.ip.netmask }}'>
<dhcp>
<range start='{{ item.ip.dhcp.range.start }}' end='{{ item.ip.dhcp.range.end }}'/>
</dhcp>
</ip>
{% endif %}
</network>

108
templates/libvirt-vm.xml.j2 Normal file
View File

@ -0,0 +1,108 @@
<domain type='{{ domain.type }}'>
<name>{{ domain.name }}</name>
<memory unit='{{ domain.memory.unit }}'>{{ domain.memory.value }}</memory>
<vcpu placement='{{ domain.vcpu.placement }}'>{{ domain.vcpu.value }}</vcpu>
<os>
<type arch='{{ domain.os.type.arch }}' machine='{{ domain.os.type.machine }}'>{{ domain.os.type.value }}</type>
<boot dev='{{ domain.os.boot.dev }}'/>
</os>
<cpu mode='{{ domain.cpu.mode }}' check='{{ domain.cpu.check }}'/>
<devices>
<emulator>{{ domain.devices.emulator }}</emulator>
{% if domain.devices.disks is defined %}
{% for disk in domain.devices.disks %}
<disk type='{{ disk.type }}' device='{{ disk.device }}'>
<driver name='{{ disk.driver.name }}' type='{{ disk.driver.type }}'/>
{% if disk.source.protocol is defined %}
<source protocol='{{ disk.source.protocol }}' name='{{ disk.source.name }}'>
<host name='{{ disk.source.host.name }}' port='{{ disk.source.host.port }}'/>
</source>
{% elif disk.source.dev is defined %}
<source dev='{{ disk.source.dev }}'/>
{% else %}
<source file='{{ disk.source.file }}'/>
{% endif %}
<target dev='{{ disk.target.dev }}' bus='{{ disk.target.bus }}'/>
{% if disk.address is defined %}
<address type='{{ disk.address.type }}' domain='{{ disk.address.domain }}' bus='{{ disk.address.bus }}' slot='{{ disk.address.slot }}' function='{{ disk.address.function }}'/>
{% endif %}
</disk>
{% endfor %}
{% endif %}
{% if domain.filesystems is defined %}
{% for filesystem in domain.filesystems %}
<filesystem type='{{ filesystem.type }}' accessmode='{{ filesystem.accessmode }}'>
<source dir='{{ filesystem.source.dir }}'/>
<target dir='{{ filesystem.target.dir }}'/>
</filesystem>
{% endfor %}
{% endif %}
{% if domain.devices.interfaces is defined %}
{% for interface in domain.devices.interfaces %}
<interface type='{{ interface.type }}'>
<source network='{{ interface.source.network }}'/>
<model type='{{ interface.model.type }}'/>
{% if interface.address is defined %}
<address type='{{ interface.address.type }}' domain='{{ interface.address.domain }}' bus='{{ interface.address.bus }}' slot='{{ interface.address.slot }}' function='{{ interface.address.function }}'/>
{% endif %}
</interface>
{% endfor %}
{% endif %}
{% if domain.devices.channels is defined %}
{% for channel in domain.devices.channels %}
<channel type='{{ channel.type }}'>
<target type='{{ channel.target.type }}' name='{{ channel.target.name }}'/>
{% if channel.address is defined %}
<address type='{{ channel.address.type }}' controller='{{ channel.address.controller }}' bus='{{ channel.address.bus }}' port='{{ channel.address.port }}'/>
{% endif %}
</channel>
{% endfor %}
{% endif %}
{% if domain.devices.inputs is defined %}
{% for input in domain.devices.inputs %}
<input type='{{ input.type }}' bus='{{ input.bus }}'>
{% if input.address is defined %}
<address type='{{ input.address.type }}' bus='{{ input.address.bus }}' port='{{ input.address.port }}'/>
{% endif %}
</input>
{% endfor %}
{% endif %}
{% if domain.devices.graphics is defined %}
<graphics type='{{ domain.devices.graphics.type }}' autoport='{{ domain.devices.graphics.autoport }}'>
<listen type='{{ domain.devices.graphics.listen.type }}'/>
<image compression='{{ domain.devices.graphics.image.compression }}'/>
</graphics>
{% endif %}
{% if domain.devices.video is defined %}
<video>
<model type='{{ domain.devices.video.model.type }}' ram='{{ domain.devices.video.model.ram }}' vram='{{ domain.devices.video.model.vram }}' vgamem='{{ domain.devices.video.model.vgamem }}' heads='{{ domain.devices.video.model.heads }}' primary='{{ domain.devices.video.model.primary }}'/>
{% if domain.devices.video.address is defined %}
<address type='{{ domain.devices.video.address.type }}' domain='{{ domain.devices.video.address.domain }}' bus='{{ domain.devices.video.address.bus }}' slot='{{ domain.devices.video.address.slot }}' function='{{ domain.devices.video.address.function }}'/>
{% endif %}
</video>
{% endif %}
{% if domain.devices.memballoon is defined %}
<memballoon model='{{ domain.devices.memballoon.model }}'>
{% if domain.devices.memballoon.address is defined %}
<address type='{{ domain.devices.memballoon.address.type }}' domain='{{ domain.devices.memballoon.address.domain }}' bus='{{ domain.devices.memballoon.address.bus }}' slot='{{ domain.devices.memballoon.address.slot }}' function='{{ domain.devices.memballoon.address.function }}'/>
{% endif %}
</memballoon>
{% endif %}
{% if domain.devices.rng is defined %}
<rng model='virtio'>
<backend model='random'>/dev/urandom</backend>
{% if domain.devices.rng.address is defined %}
<address type='{{ domain.devices.rng.address.type }}' domain='{{ domain.devices.rng.address.domain }}' bus='{{ domain.devices.rng.address.bus }}' slot='{{ domain.devices.rng.address.slot }}' function='{{ domain.devices.rng.address.function }}'/>
{% endif %}
</rng>
{% endif %}
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
</devices>
</domain>

2
templates/meta-data.j2 Normal file
View File

@ -0,0 +1,2 @@
instance-id: {{ domain.instance_id }}
local-hostname: {{ domain.name }}

Some files were not shown because too many files have changed in this diff Show More