Model prompt as env var
This commit is contained in:
parent
dba02ae5ea
commit
86a82822fa
5
app.py
5
app.py
@ -6,7 +6,7 @@ from moviepy import VideoFileClip
|
||||
from openai import OpenAI
|
||||
from pydub import AudioSegment
|
||||
|
||||
PROMPT = "You will be provided a video transcription for which you are to generate a blog post in Markdown format summarizing the video contents."
|
||||
DEFAULT_PROMPT = "You will be provided a video transcription for which you are to generate a blog post in Markdown format summarizing the video contents."
|
||||
|
||||
def main(args):
|
||||
openai_client = OpenAI()
|
||||
@ -34,10 +34,11 @@ def transcribe_audio(openai_client, audio):
|
||||
])
|
||||
|
||||
def summarize_transcription(openai_client, transcription):
|
||||
prompt = os.getenv('OPENAI_RESPONSES_PROMPT', DEFAULT_PROMPT)
|
||||
responses_model = os.getenv('OPENAI_RESPONSES_MODEL', 'whisper-1')
|
||||
return client.responses.create(
|
||||
model=responses_model,
|
||||
instructions=PROMPT,
|
||||
instructions=prompt,
|
||||
input=transcription
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user