Spaces:
Sleeping
Sleeping
Upload 6 files
Browse files- app.py +130 -0
- dalle_module.py +22 -0
- gpt_module.py +24 -0
- stt.py +17 -0
- translation_module.py +18 -0
- tts_module.py +16 -0
app.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from gpt_module import generate_response
|
2 |
+
# from dalle_module import generate_image
|
3 |
+
# from translation_module import translation_to_english
|
4 |
+
# from tts_module import text_to_speech
|
5 |
+
# from stt import transcribe_audio
|
6 |
+
|
7 |
+
# def main():
|
8 |
+
# print("Welcome to the ChatGPT CLI application!")
|
9 |
+
# while True:
|
10 |
+
# user_input = input("Enter command: ")
|
11 |
+
|
12 |
+
# if user_input.startswith('/imagine'):
|
13 |
+
# img = user_input.replace('/imagine', '').strip()
|
14 |
+
# print(generate_image(img))
|
15 |
+
|
16 |
+
# elif user_input.startswith('/chat'):
|
17 |
+
# prompt = user_input.replace('/chat', '').strip()
|
18 |
+
# print(generate_response(prompt))
|
19 |
+
|
20 |
+
# elif user_input.startswith('/translate'):
|
21 |
+
# tra = user_input.replace('/translate', '').strip()
|
22 |
+
# print(translation_to_english(tra))
|
23 |
+
|
24 |
+
# elif user_input.startswith('/voice'):
|
25 |
+
# text = user_input.replace('/voice', '').strip()
|
26 |
+
# print(text_to_speech(text))
|
27 |
+
|
28 |
+
# elif user_input.startswith('/transcribe'):
|
29 |
+
# print("Please upload the audio file.")
|
30 |
+
# audio_file = input("Enter the path to the audio file: ")
|
31 |
+
# print(transcribe_audio(audio_file))
|
32 |
+
|
33 |
+
# elif user_input.startswith('/help'):
|
34 |
+
# print_help()
|
35 |
+
|
36 |
+
# else:
|
37 |
+
# print("Unknown command. Type '/help' for a list of commands.")
|
38 |
+
|
39 |
+
# def print_help():
|
40 |
+
# help_text = """
|
41 |
+
# 👉 To generate an image, type '/imagine <prompt>'
|
42 |
+
# 👉 To chat with the model, type '/chat <prompt>'
|
43 |
+
# 👉 To translate text to English, type '/translate <text>'
|
44 |
+
# 👉 For Text to Speech, type '/voice <some text>'
|
45 |
+
# 👉 To transcribe audio to text, type '/transcribe' and follow the prompts
|
46 |
+
# 👉 For help, type '/help'
|
47 |
+
# """
|
48 |
+
# print(help_text)
|
49 |
+
|
50 |
+
# if __name__ == '__main__':
|
51 |
+
# main()
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
import streamlit as st
|
57 |
+
from gpt_module import generate_response
|
58 |
+
from dalle_module import generate_image
|
59 |
+
from translation_module import translation_to_english
|
60 |
+
from tts_module import text_to_speech
|
61 |
+
from stt import transcribe_audio
|
62 |
+
import os
|
63 |
+
|
64 |
+
def main():
|
65 |
+
|
66 |
+
st.sidebar.title("Hey Rehan Assistant")
|
67 |
+
user_input = st.sidebar.selectbox("Select command:", ["/imagine", "/chat", "/translate", "/voice", "/transcribe", "/help"])
|
68 |
+
|
69 |
+
if user_input == "/imagine":
|
70 |
+
st.subheader('Prompt To Image generation', divider='rainbow')
|
71 |
+
prompt = st.text_input("Enter prompt To Generate Image:")
|
72 |
+
if st.button("Generate Image"):
|
73 |
+
st.image(generate_image(prompt))
|
74 |
+
|
75 |
+
elif user_input == "/chat":
|
76 |
+
st.subheader('Hey Rehan AI Assistant', divider='rainbow')
|
77 |
+
prompt = st.text_input("Ask anything You want to know")
|
78 |
+
if st.button("Chat"):
|
79 |
+
response = generate_response(prompt)
|
80 |
+
st.success(response)
|
81 |
+
|
82 |
+
elif user_input == "/translate":
|
83 |
+
st.subheader('Translate Into English', divider='rainbow')
|
84 |
+
audio_file = st.file_uploader("Upload Audio File", type=["mp3", "wav"])
|
85 |
+
if audio_file is not None:
|
86 |
+
# st.audio(uploaded_file, format='audio/wav')
|
87 |
+
if st.button("Translate to English"):
|
88 |
+
result = translation_to_english(audio_file)
|
89 |
+
st.success(result)
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
elif user_input == "/voice":
|
94 |
+
st.subheader('Text to Speech', divider='rainbow')
|
95 |
+
text = st.text_input("Enter text for Text to Speech:")
|
96 |
+
if st.button("Convert to Speech"):
|
97 |
+
audio_bytes = text_to_speech(text)
|
98 |
+
st.audio(audio_bytes, format='audio/wav')
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
elif user_input == "/transcribe":
|
103 |
+
st.subheader('Audio to Text Transcription', divider='rainbow')
|
104 |
+
uploaded_file = st.file_uploader("Upload audio file", type=["mp3", "wav"])
|
105 |
+
if uploaded_file is not None:
|
106 |
+
if st.button("Transcribe Audio"):
|
107 |
+
with open("temp_audio_file.mp3", "wb") as f:
|
108 |
+
f.write(uploaded_file.getvalue())
|
109 |
+
|
110 |
+
transcription = transcribe_audio("temp_audio_file.mp3")
|
111 |
+
st.success(transcription)
|
112 |
+
|
113 |
+
os.remove("temp_audio_file.mp3")
|
114 |
+
|
115 |
+
elif user_input == "/help":
|
116 |
+
print_help()
|
117 |
+
|
118 |
+
def print_help():
|
119 |
+
help_text = """
|
120 |
+
👉 To generate an image, select '/imagine' and enter a prompt.
|
121 |
+
👉 To chat with the model, select '/chat' and enter a prompt.
|
122 |
+
👉 To translate text to English, select '/translate' and enter text.
|
123 |
+
👉 For Text to Speech, select '/voice' and enter some text.
|
124 |
+
👉 To transcribe audio to text, select '/transcribe' and upload the audio file.
|
125 |
+
👉 For help, select '/help'.
|
126 |
+
"""
|
127 |
+
st.write(help_text)
|
128 |
+
|
129 |
+
if __name__ == '__main__':
|
130 |
+
main()
|
dalle_module.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
client = OpenAI(api_key=os.getenv("openai_api_key"))
|
8 |
+
|
9 |
+
|
10 |
+
def generate_image(img):
|
11 |
+
response = client.images.generate(
|
12 |
+
model="dall-e-3",
|
13 |
+
prompt=img,
|
14 |
+
n=1,
|
15 |
+
size="1024x1024"
|
16 |
+
)
|
17 |
+
|
18 |
+
return response.data[0].url
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
gpt_module.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
|
8 |
+
client = OpenAI(api_key = os.getenv("openai_api_key"))
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
def generate_response(prompt):
|
15 |
+
completion = client.chat.completions.create(
|
16 |
+
model="gpt-4-turbo",
|
17 |
+
messages=[
|
18 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
19 |
+
{"role": "user", "content": prompt}
|
20 |
+
]
|
21 |
+
)
|
22 |
+
return completion.choices[0].message.content
|
23 |
+
|
24 |
+
|
stt.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
client = OpenAI(api_key=os.getenv("openai_api_key"))
|
8 |
+
|
9 |
+
def transcribe_audio(audio_file):
|
10 |
+
with open(audio_file, "rb") as audio_file:
|
11 |
+
transcription = client.audio.transcriptions.create(
|
12 |
+
model="whisper-1",
|
13 |
+
file=audio_file,
|
14 |
+
response_format="text"
|
15 |
+
)
|
16 |
+
return transcription
|
17 |
+
|
translation_module.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Load environment variables
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
# Initialize OpenAI client
|
9 |
+
client = OpenAI(api_key=os.getenv("openai_api_key"))
|
10 |
+
|
11 |
+
|
12 |
+
def translation_to_english(audio_file):
|
13 |
+
transcript = client.audio.translations.create(
|
14 |
+
model="whisper-1",
|
15 |
+
file=audio_file
|
16 |
+
)
|
17 |
+
return transcript.text
|
18 |
+
|
tts_module.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
import os
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
client = OpenAI(api_key=os.getenv("openai_api_key"))
|
8 |
+
|
9 |
+
def text_to_speech(text):
|
10 |
+
response = client.audio.speech.create(
|
11 |
+
model="tts-1",
|
12 |
+
voice="alloy",
|
13 |
+
input=text
|
14 |
+
)
|
15 |
+
|
16 |
+
return response.content
|