Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import gradio as gr
|
4 |
+
from cerebras.cloud.sdk import Cerebras
|
5 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
6 |
+
from langchain_community.vectorstores import FAISS
|
7 |
+
from langchain.schema import Document
|
8 |
+
import numpy as np
|
9 |
+
from langchain_community.document_loaders import TextLoader
|
10 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
11 |
+
from sentence_transformers import SentenceTransformer
|
12 |
+
|
13 |
+
# Initialize Cerebras API client
|
14 |
+
|
15 |
+
Facts = os.getenv("Facto")
|
16 |
+
client = Cerebras(api_key= Facts)
|
17 |
+
|
18 |
+
Newskey = os.getenv("News")
|
19 |
+
# Function to fetch latest news articles from NewsAPI
|
20 |
+
def get_latest_news(query):
|
21 |
+
api_key = Newskey
|
22 |
+
url = f"https://newsapi.org/v2/everything?q={query}&apiKey={api_key}"
|
23 |
+
response = requests.get(url)
|
24 |
+
data = response.json()
|
25 |
+
return [(article["title"], article["url"], article["source"]["name"]) for article in data.get("articles", [])[:5]]
|
26 |
+
|
27 |
+
# Function to update fact_checks.txt with new user input (overwrites previous content)
|
28 |
+
def update_fact_checks_file(query):
|
29 |
+
with open("fact_checks.txt", "w", encoding="utf-8") as file:
|
30 |
+
file.write(f"{query}\n")
|
31 |
+
|
32 |
+
# Function to create a FAISS retriever dynamically
|
33 |
+
def create_faiss_retriever():
|
34 |
+
if not os.path.exists("fact_checks.txt"):
|
35 |
+
open("fact_checks.txt", "w").close() # Create file if it doesn't exist
|
36 |
+
|
37 |
+
loader = TextLoader("fact_checks.txt")
|
38 |
+
documents = loader.load()
|
39 |
+
|
40 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=50)
|
41 |
+
docs = text_splitter.split_documents(documents)
|
42 |
+
|
43 |
+
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
44 |
+
vector_store = FAISS.from_documents(docs, embedding_model)
|
45 |
+
|
46 |
+
return vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 8})
|
47 |
+
|
48 |
+
# Function to clear the fact_checks.txt file after execution
|
49 |
+
def clear_fact_checks_file():
|
50 |
+
open("fact_checks.txt", "w").close()
|
51 |
+
|
52 |
+
# Function to perform fact-checking with Llama 3.3
|
53 |
+
def fact_check_with_llama3(query):
|
54 |
+
# Save query to fact_checks.txt
|
55 |
+
update_fact_checks_file(query)
|
56 |
+
|
57 |
+
# Reload FAISS index with new data
|
58 |
+
retriever = create_faiss_retriever()
|
59 |
+
|
60 |
+
# Retrieve relevant facts from FAISS
|
61 |
+
retrieved_docs = retriever.invoke(query)
|
62 |
+
retrieved_texts = [doc.page_content for doc in retrieved_docs]
|
63 |
+
|
64 |
+
# Fetch real-time news
|
65 |
+
news = get_latest_news(query)
|
66 |
+
|
67 |
+
# Combine all retrieved context
|
68 |
+
context_text = "\n".join(retrieved_texts)
|
69 |
+
|
70 |
+
# Construct prompt for Llama 3.3
|
71 |
+
prompt = f"""
|
72 |
+
Claim: {query}
|
73 |
+
Context: {context_text}
|
74 |
+
Based on the provided context, determine whether the claim is True, False, or Misleading. Provide a concise explanation and cite relevant sources. Don't mention any instance of your knowledge cut-off.
|
75 |
+
"""
|
76 |
+
|
77 |
+
# Call Llama 3.3 API
|
78 |
+
stream = client.chat.completions.create(
|
79 |
+
messages=[{"role": "system", "content": prompt}],
|
80 |
+
model="llama-3.3-70b",
|
81 |
+
stream=True,
|
82 |
+
max_completion_tokens=512,
|
83 |
+
temperature=0.2,
|
84 |
+
top_p=1
|
85 |
+
)
|
86 |
+
|
87 |
+
# Generate AI response
|
88 |
+
result = "".join(chunk.choices[0].delta.content or "" for chunk in stream)
|
89 |
+
|
90 |
+
# Format results with sources
|
91 |
+
sources = "\n".join([f"{title} ({source}): {url}" for title, url, source in news])
|
92 |
+
|
93 |
+
# Clear the file after execution
|
94 |
+
clear_fact_checks_file()
|
95 |
+
|
96 |
+
return result, sources if sources else "No relevant sources found."
|
97 |
+
|
98 |
+
# Gradio Interface
|
99 |
+
def fact_check_interface(query):
|
100 |
+
response, sources = fact_check_with_llama3(query)
|
101 |
+
return response, sources
|
102 |
+
|
103 |
+
gui = gr.Interface(
|
104 |
+
fn=fact_check_interface,
|
105 |
+
inputs=gr.Textbox(placeholder="Enter a claim to fact-check"),
|
106 |
+
outputs=[gr.Textbox(label="Fact-Check Result"), gr.Textbox(label="Sources")],
|
107 |
+
title="Facto - AI Fact-Checking System",
|
108 |
+
description="Enter a claim, and the system will verify it using Llama 3.3 and external knowledge sources, citing relevant sources."
|
109 |
+
)
|
110 |
+
|
111 |
+
gui.launch(debug=True)
|