Tanveerooooooo commited on
Commit
f7da418
Β·
verified Β·
1 Parent(s): f8690c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -41
app.py CHANGED
@@ -1,56 +1,44 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
4
 
5
- # Model configuration: CodeGemma 1.1B
6
- MODEL_NAME = "google/codegemma-1.1b"
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
- # Supported languages and their prompt templates
13
- LANGUAGE_PROMPTS = {
14
- "Python": "Fix this Python code:",
15
- "C": "Fix this C code:",
16
- "C++": "Fix this C++ code:",
17
- "JavaScript": "Fix this JavaScript code:"
18
- }
19
-
20
- def eternos_debugger(code, error, language):
21
  if not code.strip():
22
- return "❌ Please provide code."
23
- prompt = f"{LANGUAGE_PROMPTS[language]}\n{code}\nError:\n{error}\nCorrected code:\n"
 
24
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
25
- outputs = model.generate(
26
- **inputs,
27
- max_new_tokens=256,
28
- temperature=0.1,
29
- top_p=0.9,
30
- do_sample=False,
31
- pad_token_id=tokenizer.eos_token_id
32
- )
33
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
- return response.strip()
35
 
36
- # Gradio user interface
37
- def create_interface():
38
- with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {background-color: #cbedec;}") as demo:
39
- gr.Markdown("## βš™οΈ Eternos β€” AI Code Debugger")
40
- gr.Markdown("Supports Python, C, C++, JavaScript β€” powered by CodeGemma 1.1B")
41
 
42
- with gr.Row():
43
- code_input = gr.Textbox(label="πŸ“ Your Code", lines=12)
44
- error_input = gr.Textbox(label="⚠️ Error Message (optional)", lines=4)
 
 
 
45
 
46
- language_input = gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="🌐 Language", value="Python")
47
- output_code = gr.Code(label="βœ… Suggested Fix")
48
- run_btn = gr.Button("πŸ› οΈ Fix Code")
49
 
50
- run_btn.click(fn=eternos_debugger, inputs=[code_input, error_input, language_input], outputs=output_code)
 
 
 
51
 
52
- return demo
53
 
54
- if __name__ == "__main__":
55
- demo = create_interface()
56
- demo.launch()
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+ from threading import Thread
5
+ import time
6
 
7
+ # Load small and fast model for Python code debugging
8
+ MODEL_NAME = "Salesforce/codet5-small"
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
  model.to(device)
13
 
14
+ def debug_python(code, progress=gr.State(0)):
 
 
 
 
 
 
 
 
15
  if not code.strip():
16
+ return "❌ Please enter Python code.", 0
17
+
18
+ prompt = f"Fix this Python code:\n{code}\nCorrected code:\n"
19
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
 
 
 
 
 
 
 
 
 
 
20
 
21
+ def update_progress():
22
+ for i in range(1, 101):
23
+ time.sleep(0.005) # Faster loading feel
24
+ progress.value = i
 
25
 
26
+ Thread(target=update_progress).start()
27
+
28
+ outputs = model.generate(**inputs, max_new_tokens=256, temperature=0.2, do_sample=False)
29
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ progress.value = 100
31
+ return response.strip(), 100
32
 
33
+ with gr.Blocks(css=".gradio-container {background-color: #cbedec;}") as app:
34
+ gr.Markdown("## 🐍 Python Code Debugger (Fast Mode)")
35
+ gr.Markdown("Just paste your Python code. The AI will fix any issues it detects.")
36
 
37
+ code_box = gr.Textbox(label="πŸ“ Your Python Code", lines=12)
38
+ progress_bar = gr.Slider(minimum=0, maximum=100, value=0, interactive=False, label="⏳ Progress (%)")
39
+ output = gr.Code(label="βœ… Suggested Fix")
40
+ fix_btn = gr.Button("πŸ› οΈ Debug Code")
41
 
42
+ fix_btn.click(fn=debug_python, inputs=[code_box], outputs=[output, progress_bar])
43
 
44
+ app.launch()