assignment / app.py
Fanny1366's picture
Update app.py
383ca77 verified
# import part
import streamlit as st
from transformers import pipeline
from gtts import gTTS
import io
# function part
# img2text
def img2text(url):
image_to_text_model = pipeline("image-to-text",
model="Salesforce/blip-image-captioning-base")
text = image_to_text_model(url)[0]["generated_text"]
return text
# text2story
def text2story(text):
story_pipeline = pipeline("text-generation", model="agentica-org/DeepScaleR-1.5B-Preview")
result = story_pipeline(text, max_length=200, num_return_sequences=1)
story_text = result[0]['generated_text']
return story_text
# text2audio
def text2audio(story_text):
tts = gTTS(text=story_text, lang='en')
audio_file = io.BytesIO()
tts.write_to_fp(audio_file)
audio_file.seek(0)
return {'audio': audio_file, 'sampling_rate': 16000}
# main part
st.set_page_config(page_title="Your Image to Audio Story",
page_icon="🦜")
st.header("Turn Your Image to Audio Story")
uploaded_file = st.file_uploader("Select an Image...")
if uploaded_file is not None:
print(uploaded_file)
bytes_data = uploaded_file.getvalue()
with open(uploaded_file.name, "wb") as file:
file.write(bytes_data)
st.image(uploaded_file, caption="Uploaded Image",
use_column_width=True)
# Stage 1: Image to Text
st.text('Processing img2text...')
scenario = img2text(uploaded_file.name)
st.write(scenario)
# Stage 2: Text to Story
st.text('Generating a story...')
story = text2story(scenario)
st.write(story)
# Stage 3: Story to Audio data
st.text('Generating audio data...')
audio_data = text2audio(story)
# Play button
if st.button("Play Audio"):
st.audio(audio_data['audio'],
format="audio/wav",
start_time=0,
sample_rate=audio_data['sampling_rate'])