Spaces:
Running
on
L40S
Running
on
L40S
miaoyibo
commited on
Commit
·
2689cfa
1
Parent(s):
5d0be2a
add time
Browse files
app.py
CHANGED
@@ -149,6 +149,8 @@ def predict(
|
|
149 |
model_inputs = tokenizer([text_for_model], return_tensors="pt").to(model.device)
|
150 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
151 |
# print("start generating")
|
|
|
|
|
152 |
if temperature > 0:
|
153 |
generation_kwargs = dict(
|
154 |
**model_inputs,
|
@@ -177,12 +179,17 @@ def predict(
|
|
177 |
yield [[prompt, highlight_response]], [["null test", "null test2"]], "Generating file locations..."
|
178 |
|
179 |
gen_thread.join()
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
response = partial_output
|
182 |
|
183 |
raw_answer=post_process(response)
|
184 |
model_found_files = raw_answer.strip().split("\n")
|
185 |
-
print(response)
|
186 |
|
187 |
highlight_response = highlight_thinking(response)
|
188 |
yield [[prompt,highlight_response]], [["null test","null test2"]], "Generate: Success"
|
@@ -194,7 +201,7 @@ def predict(
|
|
194 |
print(file_name)
|
195 |
# pdb.set_trace()
|
196 |
to_open_path = repo_path + "/" + file_name
|
197 |
-
|
198 |
with open(to_open_path, "r", encoding="utf-8") as f:
|
199 |
content = f.read()
|
200 |
contents += f"{file_name}\n{content}\n\n"
|
@@ -214,6 +221,7 @@ def predict(
|
|
214 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
215 |
|
216 |
subprocess.run(["rm", "-rf", repo_path], check=True)
|
|
|
217 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
218 |
if temperature > 0:
|
219 |
generation_kwargs = dict(
|
@@ -244,6 +252,12 @@ def predict(
|
|
244 |
yield [[prompt, highlight_response], [repair_prompt, highlight_response_repair]], [["null test", "null test2"]], "Generating repair suggestion..."
|
245 |
|
246 |
gen_thread.join()
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
# yield response, "null test", "Generate: Success"
|
249 |
yield [[prompt,highlight_response],[repair_prompt,highlight_response_repair]], [["null test","null test2"]], "Generate: Success"
|
|
|
149 |
model_inputs = tokenizer([text_for_model], return_tensors="pt").to(model.device)
|
150 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
151 |
# print("start generating")
|
152 |
+
|
153 |
+
loc_start_time = time.time()
|
154 |
if temperature > 0:
|
155 |
generation_kwargs = dict(
|
156 |
**model_inputs,
|
|
|
179 |
yield [[prompt, highlight_response]], [["null test", "null test2"]], "Generating file locations..."
|
180 |
|
181 |
gen_thread.join()
|
182 |
+
loc_end_time = time.time()
|
183 |
+
loc_time = loc_end_time - loc_start_time
|
184 |
+
|
185 |
+
encoded_answer = tokenizer(partial_output, padding=True, truncation=True, return_tensors='pt')
|
186 |
+
print("loc token/s:",len(encoded_answer['input_ids'][0])/loc_time)
|
187 |
|
188 |
response = partial_output
|
189 |
|
190 |
raw_answer=post_process(response)
|
191 |
model_found_files = raw_answer.strip().split("\n")
|
192 |
+
# print(response)
|
193 |
|
194 |
highlight_response = highlight_thinking(response)
|
195 |
yield [[prompt,highlight_response]], [["null test","null test2"]], "Generate: Success"
|
|
|
201 |
print(file_name)
|
202 |
# pdb.set_trace()
|
203 |
to_open_path = repo_path + "/" + file_name
|
204 |
+
|
205 |
with open(to_open_path, "r", encoding="utf-8") as f:
|
206 |
content = f.read()
|
207 |
contents += f"{file_name}\n{content}\n\n"
|
|
|
221 |
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
222 |
|
223 |
subprocess.run(["rm", "-rf", repo_path], check=True)
|
224 |
+
repair_start_time = time.time()
|
225 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
226 |
if temperature > 0:
|
227 |
generation_kwargs = dict(
|
|
|
252 |
yield [[prompt, highlight_response], [repair_prompt, highlight_response_repair]], [["null test", "null test2"]], "Generating repair suggestion..."
|
253 |
|
254 |
gen_thread.join()
|
255 |
+
repair_end_time = time.time()
|
256 |
+
|
257 |
+
repair_time = repair_end_time - repair_start_time
|
258 |
+
|
259 |
+
encoded_answer = tokenizer(partial_output_repair, padding=True, truncation=True, return_tensors='pt')
|
260 |
+
print("loc token/s:",len(encoded_answer['input_ids'][0])/repair_time)
|
261 |
|
262 |
# yield response, "null test", "Generate: Success"
|
263 |
yield [[prompt,highlight_response],[repair_prompt,highlight_response_repair]], [["null test","null test2"]], "Generate: Success"
|