# import gdown # import torch # # https://drive.google.com/drive/folders/17G-ejd4scK1DYko5k0ssXZjEy7P-ClI6 # import streamlit as st # import gdown # import torch # from transformers import pipeline # # Function to download the model from Google Drive # def download_file_from_drive(file_id, output_path): # url = f'https://drive.google.com/uc?id={file_id}' # gdown.download(url, output_path, quiet=False) # # Replace 'YOUR_FILE_ID' with the actual file ID of your model # file_id = '1A2B3C4D5E6F7G8H9I0J' import streamlit as st import requests import torch from transformers import pipeline from transformers import T5ForConditionalGeneration, T5Tokenizer # Replace with your Hugging Face model repository path model_repo_path = 'AbdurRehman313/T5_samsum_model_files' # Load the model and tokenizer model = T5ForConditionalGeneration.from_pretrained(model_repo_path) tokenizer = T5Tokenizer.from_pretrained(model_repo_path) # Initialize the summarization pipeline summarizer = pipeline('summarization', model=model,tokenizer=tokenizer) # Streamlit app layout st.title("Text Summarization App") # User input text_input = st.text_area("Enter text to summarize", height=300) # Summarize the text if st.button("Summarize"): if text_input: with st.spinner("Generating summary..."): try: summary = summarizer(text_input, max_length=150, min_length=30, do_sample=False) st.subheader("Summary") st.write(summary[0]['summary_text']) except Exception as e: st.error(f"Error during summarization: {e}") else: st.warning("Please enter some text to summarize.")