OpenAI functions and clearer parameters

This commit is contained in:
Eric Meehan 2025-03-25 21:57:00 -04:00
parent c64a9dd7db
commit dba02ae5ea
2 changed files with 38 additions and 27 deletions

View File

@ -1,2 +1,5 @@
OPENAI_API_KEY=
OPENAI_BASE_URL=
OPENAI_API_KEY =
OPENAI_BASE_URL =
OPENAI_RESPONSES_INSTRUCTIONS =
SEGMENT_DURATION =
TMP_AUDIO_PATH =

58
app.py
View File

@ -1,40 +1,48 @@
import argparse
import asyncio
import os
from dotenv import load_dotenv
from moviepy import VideoFileClip
from openai import OpenAI
from pydub import AudioSegment
from pydub.playback import play
DEFAULT_RESPONSES_INSTRUCTIONS = "You will be provided a video transcription for which you are to generate a blog post in Markdown format summarizing the video's contents."
TMP_AUDIO_PATH = "/tmp/video-summary-bot-tmp-audio.wav"
PROMPT = "You will be provided a video transcription for which you are to generate a blog post in Markdown format summarizing the video contents."
async def main():
parser = argparse.ArgumentParser(description="Use AI models to summarize videos")
parser.add_argument('--video-file', type=str, help="Path to the video to be summarized")
parser.add_argument('--segment-duration', type=int, help="Lenght of audio segments")
args = parser.parse_args()
load_dotenv()
VideoFileClip(args.video_file).audio.write_audiofile(TMP_AUDIO_PATH)
audio = AudioSegment.from_wav(TMP_AUDIO_PATH)
segments = [audio[i:i + args.segment_duration] for i in range(0, len(audio), args.segment_duration)]
# TODO: Test OpenAI
client = OpenAI()
transcription = ' '.join([
await client.audio.transcriptions.create(
model=args.transcription_model,
def main(args):
openai_client = OpenAI()
return summarize_transcription(
openai_client,
transcribe_audio(
openai_client,
get_audio_from_video(args.video_file_path)
)
)
def get_audio_from_video(video_file_path):
tmp_audio_path = os.getenv('TMP_AUDIO_PATH', '/tmp/video_summary_bot_tmp_audio.wav')
VideoFileClip(video_file_path).audio.write_audiofile(tmp_audio_path)
return AudioSegment.from_wav(tmp_audio_path)
def transcribe_audio(openai_client, audio):
segment_duration = int(os.getenv('SEGMENT_DURATION', 30000)),
transcription_model = os.getenv('OPENAI_TRANSCRIPTION_MODEL', 'whisper-1')
return ' '.join([
openai_client.audio.transcriptions.create(
model=transcription_model,
file=each
).text for each in segments
).text for each in [audio[i:i + segment_duration] for i in range(0, len(audio), segment_duration)]
])
summary = client.responses.create(
model=args.responses_model,
instructions=DEFAULT_RESPONSES_INSTRUCTIONS,
def summarize_transcription(openai_client, transcription):
responses_model = os.getenv('OPENAI_RESPONSES_MODEL', 'whisper-1')
return client.responses.create(
model=responses_model,
instructions=PROMPT,
input=transcription
)
return summary
if __name__ == '__main__':
summary = main()
print(summary)
load_dotenv()
parser = argparse.ArgumentParser(description="Use AI models to summarize videos")
parser.add_argument('--video-file-path', type=str, help="Path to the video to be summarized")
main(parser.parse_args())