Spaces:
Sleeping
Sleeping
File size: 2,845 Bytes
9658996 209d752 9658996 6addc00 29f70c6 3e82e77 9658996 a8cae58 e1c95b7 00265bf e1c95b7 6e18675 73bcb94 5826007 cb4915d 10bcf69 8ff2ed6 a8cae58 5fd8ddc 00265bf cb4915d 29f70c6 f7fc874 6addc00 f7fc874 895550f a8cae58 f7fc874 3e82e77 f7fc874 3e82e77 f7fc874 6addc00 3e82e77 63ff105 3e82e77 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import streamlit as st
import os
from transformers import pipeline
import requests
from deep_translator import GoogleTranslator
import io
from PIL import Image
import time
API_TOKEN = os.getenv("hugkey")
st.header("Multi-model project")
st.write("This app will convert regional language sentence into english and also generate text and image related to the context")
text = st.text_input("Enter a text",placeholder="Type a sentence in Your Language")
#tran_API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-mul-en"
#tran_API_URL = "https://api-inference.huggingface.co/models/google-t5/t5-small"
#tran_API_URL = "https://api-inference.huggingface.co/models/facebook/m2m100_418M"
#generate_API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
generate_API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-1B"
image_API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
headers = {"Authorization": f"Bearer {API_TOKEN}"}
def Translate_query(input_text):
translator = GoogleTranslator(target='en')
translation = translator.translate(input_text)
return translation
# payload = {"inputs": f"{input_text}", "parameters": {"forced_bos_token_id": 128000}}
# response = requests.post(tran_API_URL, headers=headers, json=payload)
# return response.json()
def Generate_query(payload):
response = requests.post(generate_API_URL, headers=headers, json=payload)
return response.json()
def Image_query(payload):
response = requests.post(image_API_URL, headers=headers, json=payload)
return response
def stream_data(text_data):
for word in text_data.split(" "):
yield word + " "
time.sleep(0.05)
if st.button("Generate"):
col1,col2 = st.columns(2)
with col1:
translated_output = Translate_query(text)
#translated_output = translated_output[0]["translation_text"]
st.subheader("Text Translation")
st.success(translated_output)
st.subheader("Text Generation")
generated_output = Generate_query({"inputs": translated_output,})
generated_output = generated_output[0]['generated_text']
st.write(stream_data(generated_output))
with col2:
st.subheader("Image Generation")
image_response = Image_query({"inputs": translated_output,})
if image_response.status_code == 200:
image_bytes = image_response.content # Get the binary content
try:
image = Image.open(io.BytesIO(image_bytes))# Open the image using PIL
# Display the image
st.image(image)
except Exception as e:
print("Error opening image:", e)
else:
print("Failed to retrieve image:", image_response.status_code, image_response.text) |