Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
from gtts import gTTS | |
import tempfile | |
def generate_stage_directions(location, situation, api_key): | |
prompt = ( | |
f"Write detailed stage directions for a scene with 5 characters, set in a {location}. " | |
"You are reading these directions out loud to an audience, so keep the stage directions conversational. " | |
"Do not break it down into different sections. Each character enters one by one, two of them enter as a couple. " | |
"As they enter, tell us their name and describe their physical characteristics, their emotional state and their actions, " | |
"gestures and movements in the scene. Write detailed stage direction on how they interact with the location they are in " | |
"and with each other, with detailed description on their movements, actions and gestures in the scene. Make the overall " | |
"scene highly dramatic, full of twists and turns, with lots of movement by the characters that keep changing positions " | |
"and moving around. At some point, a {situation} happens in the scene. Show the characters interacting with elements of " | |
"the location. Describe in vivid detail their emotion, facial expressions and emotions. You will also write dialogues for " | |
"each character. Keep the dialogues short. Keep the scene mostly non-verbal, with only a few dialogues. Make the scene " | |
"very dramatic, emotional, thrilling. Keep your response limited to 750 words." | |
) | |
openai.api_key = api_key # Set the API key from the user input | |
try: | |
response = openai.Completion.create( | |
engine="text-davinci-003", | |
prompt=prompt, | |
max_tokens=750, | |
temperature=0.7, | |
) | |
stage_directions = response.choices[0].text.strip() | |
response_audio_path = text_to_audio(stage_directions) | |
return response_audio_path | |
except Exception as e: | |
return str(e) | |
def text_to_audio(text): | |
tts = gTTS(text, lang='en') | |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') | |
tts.save(temp_file.name) | |
return temp_file.name | |
# Create Gradio UI | |
iface = gr.Interface( | |
fn=generate_stage_directions, | |
inputs=[ | |
gr.Textbox(label="Location"), | |
gr.Textbox(label="Situation"), | |
gr.Textbox(label="API Key") | |
], | |
outputs=gr.Audio(type='filepath', label="Stage Directions"), | |
live=True, | |
title="DramaDirector", | |
description="Input a location, situation, and your OpenAI API key to generate stage directions.", | |
) | |
iface.launch() | |