ragonme / app.py
hyeongnym's picture
Create app.py
4f7e18c verified
raw
history blame
11.7 kB
import torch
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import os, gc, logging
from threading import Thread
import random
from datasets import load_dataset
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from typing import List, Tuple, Iterator
import json
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from functools import lru_cache
import pyarrow.parquet as pq
import pypdf
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
from tabulate import tabulate
from pydantic import BaseModel
import unittest
# ๋กœ๊น… ์„ค์ •
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('app.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# ์„ค์ • ํด๋ž˜์Šค
class Config:
def __init__(self):
self.MODEL_ID = "CohereForAI/c4ai-command-r7b-12-2024"
self.MAX_HISTORY = 10
self.MAX_TOKENS = 4096
self.DEFAULT_TEMPERATURE = 0.8
self.HF_TOKEN = os.environ.get("HF_TOKEN", None)
self.MODELS = os.environ.get("MODELS")
config = Config()
# ์ปค์Šคํ…€ ์˜ˆ์™ธ ํด๋ž˜์Šค
class FileProcessingError(Exception):
pass
# ์‘๋‹ต ๋ชจ๋ธ
class ChatResponse(BaseModel):
message: str
status: str
timestamp: datetime
# ํŒŒ์ผ ์ฒ˜๋ฆฌ ํด๋ž˜์Šค
class FileProcessor:
@staticmethod
def process_pdf(file_path):
try:
with ThreadPoolExecutor() as executor:
pdf_reader = pypdf.PdfReader(file_path)
text = extract_text(
file_path,
laparams=LAParams(
line_margin=0.5,
word_margin=0.1,
char_margin=2.0,
all_texts=True
)
)
return text
except Exception as e:
raise FileProcessingError(f"PDF processing error: {str(e)}")
@staticmethod
def process_csv(file_path):
try:
encodings = ['utf-8', 'cp949', 'euc-kr', 'latin1']
for encoding in encodings:
try:
return pd.read_csv(file_path, encoding=encoding)
except UnicodeDecodeError:
continue
raise FileProcessingError("Unable to read CSV with supported encodings")
except Exception as e:
raise FileProcessingError(f"CSV processing error: {str(e)}")
# ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ
@torch.no_grad()
def clear_cuda_memory():
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
# ๋ชจ๋ธ ๋กœ๋“œ
@spaces.GPU
def load_model():
try:
model = AutoModelForCausalLM.from_pretrained(
config.MODEL_ID,
torch_dtype=torch.bfloat16,
device_map="auto",
)
return model
except Exception as e:
logger.error(f"Model loading error: {str(e)}")
raise
# ์ปจํ…์ŠคํŠธ ๊ฒ€์ƒ‰
@lru_cache(maxsize=100)
def find_relevant_context(query, top_k=3):
try:
query_vector = vectorizer.transform([query])
similarities = (query_vector * question_vectors.T).toarray()[0]
top_indices = np.argsort(similarities)[-top_k:][::-1]
relevant_contexts = []
for idx in top_indices:
if similarities[idx] > 0:
relevant_contexts.append({
'question': questions[idx],
'answer': wiki_dataset['train']['answer'][idx],
'similarity': similarities[idx]
})
return relevant_contexts
except Exception as e:
logger.error(f"Context search error: {str(e)}")
return []
# ์ŠคํŠธ๋ฆฌ๋ฐ ์ฑ„ํŒ…
@spaces.GPU
def stream_chat(message: str, history: list, uploaded_file, temperature: float,
max_new_tokens: int, top_p: float, top_k: int, penalty: float) -> Iterator[Tuple[str, list]]:
"""
์ŠคํŠธ๋ฆฌ๋ฐ ์ฑ„ํŒ… ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
Args:
message (str): ์‚ฌ์šฉ์ž ์ž…๋ ฅ ๋ฉ”์‹œ์ง€
history (list): ๋Œ€ํ™” ํžˆ์Šคํ† ๋ฆฌ
uploaded_file: ์—…๋กœ๋“œ๋œ ํŒŒ์ผ
temperature (float): ์ƒ์„ฑ ์˜จ๋„
max_new_tokens (int): ์ตœ๋Œ€ ํ† ํฐ ์ˆ˜
top_p (float): ์ƒ์œ„ p ์ƒ˜ํ”Œ๋ง
top_k (int): ์ƒ์œ„ k ์ƒ˜ํ”Œ๋ง
penalty (float): ๋ฐ˜๋ณต ํŽ˜๋„ํ‹ฐ
Returns:
Iterator[Tuple[str, list]]: ์ƒ์„ฑ๋œ ์‘๋‹ต๊ณผ ์—…๋ฐ์ดํŠธ๋œ ํžˆ์Šคํ† ๋ฆฌ
"""
global model, current_file_context
try:
if model is None:
model = load_model()
logger.info(f'Processing message: {message}')
logger.debug(f'History length: {len(history)}')
# ํŒŒ์ผ ์ฒ˜๋ฆฌ
file_context = ""
if uploaded_file:
try:
file_ext = os.path.splitext(uploaded_file.name)[1].lower()
if file_ext == '.pdf':
content = FileProcessor.process_pdf(uploaded_file.name)
elif file_ext == '.csv':
content = FileProcessor.process_csv(uploaded_file.name)
else:
content = safe_file_read(uploaded_file.name)
file_context = analyze_file_content(content, file_ext)
current_file_context = file_context
except Exception as e:
logger.error(f"File processing error: {str(e)}")
file_context = f"\n\nโŒ File analysis error: {str(e)}"
# ์ปจํ…์ŠคํŠธ ๊ฒ€์ƒ‰ ๋ฐ ํ”„๋กฌํ”„ํŠธ ๊ตฌ์„ฑ
relevant_contexts = find_relevant_context(message)
wiki_context = "\n\n๊ด€๋ จ ์œ„ํ‚คํ”ผ๋””์•„ ์ •๋ณด:\n" + "\n".join([
f"Q: {ctx['question']}\nA: {ctx['answer']}\n์œ ์‚ฌ๋„: {ctx['similarity']:.3f}"
for ctx in relevant_contexts
])
# ํ† ํฐํ™” ๋ฐ ์ƒ์„ฑ
conversation = [
{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
for hist in history[-config.MAX_HISTORY:]
for i, msg in enumerate(hist)
]
final_message = f"{file_context}{wiki_context}\nํ˜„์žฌ ์งˆ๋ฌธ: {message}"
conversation.append({"role": "user", "content": final_message})
inputs = tokenizer(
tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True),
return_tensors="pt"
).to("cuda")
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
inputs,
streamer=streamer,
top_k=top_k,
top_p=top_p,
repetition_penalty=penalty,
max_new_tokens=min(max_new_tokens, 2048),
do_sample=True,
temperature=temperature,
eos_token_id=[255001],
)
clear_cuda_memory()
thread = Thread(target=model.generate, kwargs=generate_kwargs)
thread.start()
buffer = ""
for new_text in streamer:
buffer += new_text
yield "", history + [[message, buffer]]
clear_cuda_memory()
except Exception as e:
logger.error(f"Stream chat error: {str(e)}")
yield "", history + [[message, f"Error: {str(e)}"]]
clear_cuda_memory()
# UI ์ƒ์„ฑ
def create_demo():
with gr.Blocks(css=UPDATED_CSS) as demo:
# UI ์ปดํฌ๋„ŒํŠธ ๊ตฌ์„ฑ
with gr.Column(elem_classes="markdown-style"):
gr.Markdown("""
# ๐Ÿค– RAGOndevice
#### ๐Ÿ“Š RAG: Upload and Analyze Files (TXT, CSV, PDF, Parquet files)
Upload your files for data analysis and learning
""")
chatbot = gr.Chatbot(
value=[],
height=600,
label="GiniGEN AI Assistant",
elem_classes="chat-container"
)
# ์ž…๋ ฅ ์ปดํฌ๋„ŒํŠธ
with gr.Row(elem_classes="input-container"):
with gr.Column(scale=1, min_width=70):
file_upload = gr.File(
type="filepath",
elem_classes="file-upload-icon",
scale=1,
container=True,
interactive=True,
show_label=False
)
with gr.Column(scale=3):
msg = gr.Textbox(
show_label=False,
placeholder="Type your message here... ๐Ÿ’ญ",
container=False,
elem_classes="input-textbox",
scale=1
)
with gr.Column(scale=1, min_width=70):
send = gr.Button(
"Send",
elem_classes="send-button custom-button",
scale=1
)
with gr.Column(scale=1, min_width=70):
clear = gr.Button(
"Clear",
elem_classes="clear-button custom-button",
scale=1
)
# ๊ณ ๊ธ‰ ์„ค์ •
with gr.Accordion("๐ŸŽฎ Advanced Settings", open=False):
with gr.Row():
with gr.Column(scale=1):
temperature = gr.Slider(
minimum=0, maximum=1, step=0.1, value=config.DEFAULT_TEMPERATURE,
label="Creativity Level ๐ŸŽจ"
)
max_new_tokens = gr.Slider(
minimum=128, maximum=8000, step=1, value=4000,
label="Maximum Token Count ๐Ÿ“"
)
with gr.Column(scale=1):
top_p = gr.Slider(
minimum=0.0, maximum=1.0, step=0.1, value=0.8,
label="Diversity Control ๐ŸŽฏ"
)
top_k = gr.Slider(
minimum=1, maximum=20, step=1, value=20,
label="Selection Range ๐Ÿ“Š"
)
penalty = gr.Slider(
minimum=0.0, maximum=2.0, step=0.1, value=1.0,
label="Repetition Penalty ๐Ÿ”„"
)
# ์ด๋ฒคํŠธ ๋ฐ”์ธ๋”ฉ
msg.submit(stream_chat, [msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty], [msg, chatbot])
send.click(stream_chat, [msg, chatbot, file_upload, temperature, max_new_tokens, top_p, top_k, penalty], [msg, chatbot])
clear.click(lambda: ([], None, ""), outputs=[chatbot, file_upload, msg])
return demo
# ๋ฉ”์ธ ์‹คํ–‰
if __name__ == "__main__":
# ์œ„ํ‚คํ”ผ๋””์•„ ๋ฐ์ดํ„ฐ์…‹ ๋กœ๋“œ
wiki_dataset = load_dataset("lcw99/wikipedia-korean-20240501-1million-qna")
logger.info("Wikipedia dataset loaded")
# TF-IDF ๋ฒกํ„ฐ๋ผ์ด์ € ์ดˆ๊ธฐํ™”
questions = wiki_dataset['train']['question'][:10000]
vectorizer = TfidfVectorizer(max_features=1000)
question_vectors = vectorizer.fit_transform(questions)
logger.info("TF-IDF vectorization completed")
# UI ์‹คํ–‰
demo = create_demo()
demo.launch()
# ํ…Œ์ŠคํŠธ ์ฝ”๋“œ
class TestChatBot(unittest.TestCase):
def test_file_processing(self):
# ํ…Œ์ŠคํŠธ ๊ตฌํ˜„
pass
def test_context_search(self):
# ํ…Œ์ŠคํŠธ ๊ตฌํ˜„
pass