Matrix bots

This commit is contained in:
Eric Meehan 2025-11-12 08:16:29 -05:00
parent b533423c05
commit a5a6e20844
5 changed files with 515 additions and 4 deletions

View File

@ -1,3 +1,10 @@
#SPDX-License-Identifier: MIT-0
---
# defaults file for ansible-role-localai
localai_disable_webui: "false"
localai_api_keys:
- "123abc"
- "abc123"
localai_watchdog_idle: "false"
localai_watchdog_idle_timeout: "15m"
localai_watchdog_busy: "true"

185
tasks/baibot.yaml Normal file
View File

@ -0,0 +1,185 @@
---
# tasks file for baibot
- name: Create Baibot namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: baibot
- name: Create a ConfigMap for Baibot
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: config
namespace: baibot
data:
config.yml: |
homeserver:
# The canonical homeserver domain name
server_name: synapse.eom.dev
url: https://synapse.eom.dev/
user:
mxid_localpart: localai
password: {{ localai_admin_password }}
# The name the bot uses as a display name and when it refers to itself.
# Leave empty to use the default (baibot).
name: LocalAI
encryption:
# An optional passphrase to use for backing up and recovering the bot's encryption keys.
# You can use any string here.
#
# If set to null, the recovery module will not be used and losing your session/database (see persistence)
# will mean you lose access to old messages in encrypted room.
#
# Changing this subsequently will also cause you to lose access to old messages in encrypted rooms.
# If you really need to change this:
# - Set `encryption_recovery_reset_allowed` to `true` and adjust the passphrase
# - Remove your session file and database (see persistence)
# - Restart the bot
# - Then restore `encryption_recovery_reset_allowed` to `false` to prevent accidental resets in the future
recovery_passphrase: {{ localai_matrix_recovery_key }}
# An optional flag to reset the encryption recovery passphrase.
recovery_reset_allowed: false
# Command prefix. Leave empty to use the default (!bai).
command_prefix: "!bai"
room:
# Whether the bot should send an introduction message after joining a room.
post_join_self_introduction_enabled: true
access:
# Space-separated list of MXID patterns which specify who is an admin.
admin_patterns:
- "@eric:eom.dev"
persistence:
# This is unset here, because we expect the configuration to come from an environment variable (BAIBOT_PERSISTENCE_DATA_DIR_PATH).
# In your setup, you may wish to set this to a directory path.
data_dir_path: null
# An optional secret for encrypting the bot's session data (stored in data_dir_path).
# This must be 32-bytes (64 characters when HEX-encoded).
# Generate it with: `openssl rand -hex 32`
# Leave null or empty to avoid using encryption.
# Changing this subsequently requires that you also throw away all data stored in data_dir_path.
session_encryption_key: {{ baibot_session_encryption_key }}
# An optional secret for encrypting bot configuration stored in Matrix's account data.
# This must be 32-bytes (64 characters when HEX-encoded).
# Generate it with: `openssl rand -hex 32`
# Leave null or empty to avoid using encryption.
# Changing this subsequently will make you lose your configuration.
config_encryption_key: {{ baibot_config_encryption_key }}
agents:
# A list of statically-defined agents.
#
# Below are a few common choices on popular providers, preconfigured for development purposes (see docs/development.md).
# You may enable some of the ones you see below or define others.
# You can also leave this list empty and only define agents dynamically (via chat).
#
# Uncomment one or more of these and potentially adjust their configuration (API key, etc).
# Consider setting `initial_global_config.handler.*` to an agent that you enable here.
static_definitions:
- id: localai
provider: localai
config:
base_url: https://localai.eom.dev/v1
api_key: {{ localai_api_keys[1] }}
text_generation:
model_id: llama3-instruct
prompt: "You are a brief, but helpful bot called LocalAI powered by the llama3-8b-instruct model. Format all responses in markdown."
temperature: 1.0
max_response_tokens: 16384
max_context_tokens: 128000
speech_to_text:
model_id: whisper-1
text_to_speech:
model_id: tts-1
voice: onyx
speed: 1.0
response_format: opus
image_generation:
model_id: stablediffusion
style: vivid
# Intentionally defaults to a small value to improve performance
size: 256x256
quality: standard
# Initial global configuration. This only affects the first run of the bot.
# Configuration is later managed at runtime.
initial_global_config:
handler:
catch_all: static/localai
text_generation: static/localai
text_to_speech: static/localai
speech_to_text: static/localai
image_generation: static/localai
# Space-separated list of MXID patterns which specify who can use the bot.
# By default, we let anyone on the homeserver use the bot.
user_patterns:
- "@*:*"
# Controls logging.
#
# Sets all tracing targets (external crates) to warn, and our own logs to debug.
# For even more verbose logging, one may also use trace.
#
# matrix_sdk_crypto may be chatty and could be added with an error level.
#
# Learn more here: https://stackoverflow.com/a/73735203
logging: warn,mxlink=debug,baibot=debug
- name: Create a persistent volume claim for Baibot
k8s:
state: present
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data
namespace: baibot
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 256Gi
- name: Create a Deployment for Baibot
k8s:
definition:
apiVersion: v1
kind: Deployment
metadata:
name: baibot
namespace: baibot
spec:
replicas: 1
selector:
matchLabels:
app: baibot
template:
metadata:
labels:
app: baibot
spec:
containers:
- name: baibot
image: ghcr.io/etkecc/baibot
tag: v1.0.0
env:
- name: BAIBOT_PERSISTENCE_DATA_DIR_PATH
value: /data
- name: BAIBOT_CONFIG_FILE_PATH
value: /config/config.yml
volumeMounts:
- name: config
mountPath: /config
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: data
- name: config
configMap:
name: config

View File

@ -24,15 +24,15 @@
runtimeClassName: nvidia
secretEnv:
- name: LOCALAI_DISABLE_WEBUI
value: "true"
value: "{{ localai_disable_webui }}"
- name: LOCALAI_API_KEY
value: "{{ localai_api_keys | join(',') }}"
- name: LOCALAI_WATCHDOG_IDLE
value: "true"
value: "{{ localai_watchdog_idle }}"
- name: LOCALAI_WATCHDOG_IDLE_TIMEOUT
value: "5m"
value: "{{ localai_watchdog_idle_timeout }}"
- name: LOCALAI_WATCHDOG_BUSY
value: "true"
value: "{{ localai_watchdog_busy }}"
resources:
limits:
nvidia.com/gpu: 1
@ -46,11 +46,14 @@
operator: Equal
value: GPU
effect: NoSchedule
service:
externalTrafficPolicy: Local
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
hosts:
- host: localai.eom.dev
paths:
@ -60,3 +63,4 @@
- secretName: localai-tls
hosts:
- localai.eom.dev

258
tasks/matrix-gptbot.yaml Normal file
View File

@ -0,0 +1,258 @@
---
# tasks file for matrix-gptbot
- name: Create Matrix ChatGPT Bot namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: matrix-gptbot
- name: Create a Deployment for Matrix ChatGPT Bot
k8s:
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: config
namespace: matrix-gptbot
data:
config.ini: |
# Copy this file to config.ini and replace the values below to match your needs
#
# The values that are not commented have to be set, everything else comes with
# sensible defaults.
###############################################################################
[GPTBot]
# Some way for the user to contact you.
# Ideally, either your personal user ID or a support room
# If this is your user ID and Debug is 1, any errors that occur when using the script will be reported to you in detail
#
Operator = @eric:eom.dev
# Enable debug mode
# Will send error tracebacks to you (= Operator above) if an error occurs processing a message from you
# Defaults to 0 (= off)
#
# Debug = 1
# The default room name used by the !newroom command
# Defaults to GPTBot if not set
#
# DefaultRoomName = GPTBot
# Contents of a special message sent to the GPT API with every request.
# Can be used to give the bot some context about the environment it's running in
#
# SystemMessage = You are a helpful bot.
# Force inclusion of the SystemMessage defined above if one is defined on per-room level
# If no custom message is defined for the room, SystemMessage is always included
#
# ForceSystemMessage = 0
# Path to a custom logo
# Used as room/space image and profile picture
# Defaults to logo.png in assets directory
#
# Logo = assets/logo.png
# Display name for the bot
#
DisplayName = LocalAI
# A list of allowed users
# If not defined, everyone is allowed to use the bot (so you should really define this)
# Use the "*:homeserver.matrix" syntax to allow everyone on a given homeserver
# Alternatively, you can also specify a room ID to allow everyone in the room to use the bot within that room
#
# AllowedUsers = ["*:matrix.local", "!roomid:matrix.local"]
# Minimum level of log messages that should be printed
# Available log levels in ascending order: trace, debug, info, warning, error, critical
# Defaults to info
#
LogLevel = info
###############################################################################
[OpenAI]
# The Chat Completion model you want to use.
#
Model = falcon3-3b-instruct
# The Image Generation model you want to use.
#
# ImageModel = dall-e-3
# Your OpenAI API key
#
# Find this in your OpenAI account:
# https://platform.openai.com/account/api-keys
#
# This may not be required for self-hosted models in that case, just leave it
# as it is.
#
APIKey = {{ localai_api_keys[1] }}
# The maximum amount of input sent to the API
#
# In conjunction with MaxMessage, this determines how much context (= previous
# messages) you can send with your query.
#
# If you set this too high, the responses you receive will become shorter the
# longer the conversation gets.
#
# https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
#
# MaxTokens = 3000
# The maximum number of messages in the room that will be considered as context
#
# By default, the last (up to) 20 messages will be sent as context, in addition
# to the system message and the current query itself.
#
# MaxMessages = 20
# The base URL of the OpenAI API
#
# Setting this allows you to use a self-hosted AI model for chat completions
# using something like llama-cpp-python or ollama
#
BaseURL = https://localai.eom.dev/v1/
# Whether to force the use of tools in the chat completion model
#
# This will make the bot allow the use of tools in the chat completion model,
# even if the model you are using isn't known to support tools. This is useful
# if you are using a self-hosted model that supports tools, but the bot doesn't
# know about it.
#
# ForceTools = 1
# Whether a dedicated model should be used for tools
#
# This will make the bot use a dedicated model for tools. This is useful if you
# want to use a model that doesn't support tools, but still want to be able to
# use tools.
#
# ToolModel = gpt-4o
# Whether to emulate tools in the chat completion model
#
# This will make the bot use the default model to *emulate* tools. This is
# useful if you want to use a model that doesn't support tools, but still want
# to be able to use tools. However, this may cause all kinds of weird results.
#
# EmulateTools = 0
# Force vision in the chat completion model
#
# By default, the bot only supports image recognition in known vision models.
# If you set this to 1, the bot will assume that the model you're using supports
# vision, and will send images to the model as well. This may be required for
# some self-hosted models.
#
# ForceVision = 0
# Maximum width and height of images sent to the API if vision is enabled
#
# The OpenAI API has a limit of 2000 pixels for the long side of an image, and
# 768 pixels for the short side. You may have to adjust these values if you're
# using a self-hosted model that has different limits. You can also set these
# to 0 to disable image resizing.
#
# MaxImageLongSide = 2000
# MaxImageShortSide = 768
# Whether the used model supports video files as input
#
# If you are using a model that supports video files as input, set this to 1.
# This will make the bot send video files to the model as well as images.
# This may be possible with some self-hosted models, but is not supported by
# the OpenAI API at this time.
#
# ForceVideoInput = 0
# Advanced settings for the OpenAI API
#
# These settings are not required for normal operation, but can be used to
# tweak the behavior of the bot.
#
# Note: These settings are not validated by the bot, so make sure they are
# correct before setting them, or the bot may not work as expected.
#
# For more information, see the OpenAI documentation:
# https://platform.openai.com/docs/api-reference/chat/create
#
# Temperature = 1
# TopP = 1
# FrequencyPenalty = 0
# PresencePenalty = 0
###############################################################################
[WolframAlpha]
# An API key for Wolfram|Alpha
# Request one at https://developer.wolframalpha.com
#
# Leave unset to disable Wolfram|Alpha integration (`!gptbot calculate`)
#
#APIKey = YOUR-APIKEY
###############################################################################
[Matrix]
# The URL to your Matrix homeserver
#
# If you are using Pantalaimon, this should be the URL of your Pantalaimon
# instance, not the Matrix homeserver itself.
#
Homeserver = https://synapse.eom.dev/
# An Access Token for the user your bot runs as
#
# See https://www.matrix.org/docs/guides/client-server-api#login
# for information on how to obtain this value
#
# AccessToken = syt_yoursynapsetoken
# Instead of an Access Token, you can also use a User ID and password
# to log in. Upon first run, the bot will automatically turn this into
# an Access Token and store it in the config file, and remove the
# password from the config file.
#
# This is particularly useful if you are using Pantalaimon, where this
# is the only (easy) way to generate an Access Token.
#
UserID = @localai:eom.dev
Password = {{ localai_admin_password }}
###############################################################################
[Database]
# Path of the main database
# Used to "remember" settings, etc.
#
Path = database.db
###############################################################################
[TrackingMore]
# API key for TrackingMore
# If not defined, the bot will not be able to provide parcel tracking
#
# APIKey = abcde-fghij-klmnop
###############################################################################
[OpenWeatherMap]
# API key for OpenWeatherMap
# If not defined, the bot will be unable to provide weather information
#
# APIKey = __________________________
###############################################################################
- name: Create a Deployment for Matrix ChatGPT Bot
k8s:
definition:
apiVersion: v1
kind: Deployment
metadata:
name: matrix-gptbot
namespace: matrix-gptbot
spec:
replicas: 1
selector:
matchLabels:
app: matrix-gptbot
template:
metadata:
labels:
app: matrix-gptbot
spec:
containers:
- name: matrix-gptbot
image: ericomeehan/matrix-gptbot
command:
- python
args:
- "-m"
- gptbot
- "-c"
- /config/config.ini
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: config

View File

@ -0,0 +1,57 @@
---
# tasks file for matrixchatgptbot
- name: Create Matrix ChatGPT Bot namespace
k8s:
state: present
definition:
apiVersion: v1
kind: Namespace
metadata:
name: matrixchatgptbot
- name: Create a Deployment for Matrix ChatGPT Bot
k8s:
definition:
apiVersion: v1
kind: Deployment
metadata:
name: matrixchatgptbot
namespace: matrixchatgptbot
spec:
replicas: 1
selector:
matchLabels:
app: matrixchatgptbot
template:
metadata:
labels:
app: matrixchatgptbot
spec:
containers:
- name: matrixchatgptbot
image: ericomeehan/matrixchatgptbot
imagePullPolicy: Always
env:
- name: HOMESERVER
value: https://synapse.eom.dev/
- name: USER_ID
value: "@localai:eom.dev"
- name: PASSWORD
value: "{{ localai_admin_password }}"
- name: DEVICE_ID
value: "MatrixChatGPTBot"
- name: ROOM_ID
value: "!SVlxBQRtoGTXOgcAzW:eom.dev"
- name: OPENAI_API_KEY
value: "{{ localai_api_keys[1] }}"
- name: GPT_API_ENDPOINT
value: https://localai.eom.dev/v1/chat/completions
- name: GPT_MODEL
value: falcon3-3b-instruct
- name: SYSTEM_PROMPT
value: "You are LocalAI, a large language model running on eom.dev. Respond conversationally."
- name: IMAGE_GENERATION_ENDPOINT
value: https://localai.eom.dev/v1/images/generations
- name: IMAGE_GENERATION_BACKEND
value: localai