Spaces:
Sleeping
Sleeping
feat: extract spans and HF_TOKEN import
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ from pathlib import Path
|
|
7 |
import gradio as gr
|
8 |
from leptonai import Client
|
9 |
|
|
|
10 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
11 |
|
12 |
client = Client("https://yb15a7dy-glider.tin.lepton.run", "glider", LEPTON_API_TOKEN)
|
@@ -120,6 +121,24 @@ def format_string(retrieved_context, user_input, model_output, gold_answer):
|
|
120 |
parts.append(f"<GOLD ANSWER>\n{gold_answer}\n</GOLD ANSWER>")
|
121 |
return "\n".join(parts)
|
122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
def model_call(model_output, user_input, gold_answer, retrieved_context, pass_criteria, rubric):
|
124 |
if model_output == "" or user_input == "" or pass_criteria == "":
|
125 |
return "", "", ""
|
|
|
7 |
import gradio as gr
|
8 |
from leptonai import Client
|
9 |
|
10 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
11 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
12 |
|
13 |
client = Client("https://yb15a7dy-glider.tin.lepton.run", "glider", LEPTON_API_TOKEN)
|
|
|
121 |
parts.append(f"<GOLD ANSWER>\n{gold_answer}\n</GOLD ANSWER>")
|
122 |
return "\n".join(parts)
|
123 |
|
124 |
+
def extract_spans(input_string):
|
125 |
+
# Regex patterns to extract content within the reasoning, highlight, and score tags
|
126 |
+
reasoning_pattern = r"<reasoning>\s*(.*?)\s*</reasoning>"
|
127 |
+
highlight_pattern = r"<highlight>\s*(.*?)\s*</highlight>"
|
128 |
+
score_pattern = r"<score>\s*(\d+)\s*</score(?:\>|)"
|
129 |
+
|
130 |
+
# Using re.search to extract the contents based on our defined patterns
|
131 |
+
reasoning_match = re.search(reasoning_pattern, input_string, re.DOTALL)
|
132 |
+
highlight_match = re.search(highlight_pattern, input_string)
|
133 |
+
score_match = re.search(score_pattern, input_string)
|
134 |
+
|
135 |
+
# Extracting the matched groups if present
|
136 |
+
reasoning = reasoning_match.group(1) if reasoning_match else None
|
137 |
+
highlight = highlight_match.group(1).strip() if highlight_match else None
|
138 |
+
score = int(score_match.group(1)) if score_match else None
|
139 |
+
# Return a dictionary with the extracted content
|
140 |
+
return score, reasoning, highlight
|
141 |
+
|
142 |
def model_call(model_output, user_input, gold_answer, retrieved_context, pass_criteria, rubric):
|
143 |
if model_output == "" or user_input == "" or pass_criteria == "":
|
144 |
return "", "", ""
|