File size: 7,821 Bytes
59d87f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
import os
from langchain_core.pydantic_v1 import BaseModel, Field
import gradio as gr
class code(BaseModel):
"""Code output"""
prefix: str = Field(description="Description of the problem and approach")
imports: str = Field(description="Code block import statements")
code: str = Field(description="Code block not including import statements")
possible_errors: str = Field(description="Description of potential error and vulnerabilities in the code")
description = "Schema for code solutions to questions about Code."
class revision(BaseModel):
"""Revision output"""
imports_revision: str = Field(description="Revision of imports")
code_revision: str = Field(description="Revision of code")
overall_evaluation: str = Field(description="Thorough evaluation of the imports and of the code")
description = "Schema for code solutions to questions about Code."
def dict_to_string(d):
# Create a list of strings in the form '-key value'
parts = [f"{key}:\n\t{value}" for key, value in d.items()]
# Join the parts with a space separator
result = '\n'.join(parts)
return result
def coder_reply_to_string(solution: code):
d = solution.__dict__
return dict_to_string(d)
class CodeGenerator:
def __init__(self, coder, revisor, maxiters):
self.coder = coder
self.revisor = revisor
self.maxiters = maxiters
def generate(self, prompt, context):
i = 0
while i < self.maxiters:
solution = self.coder.invoke({"context": context, "messages": [("user", prompt)]})
revision = self.revisor.invoke({"messages": [("user", coder_reply_to_string(solution))]})
context = coder_reply_to_string(revision)
i+=1
return solution
class modularized_code(BaseModel):
prefix: str = Field(description="Description of the problem and approach")
imports: str = Field(description="Code block import statements")
code: str = Field(description="Modularized code block not including import statements")
description = "Schema for code solutions to questions about Code."
def check_code(solution):
try:
exec(solution.imports)
except Exception as e:
return f"An error occurred during import phase: {e}"
try:
exec(solution.imports+"\n"+solution.code)
return "Code ran succesfully"
except Exception as e:
return f"An error occurred during code execution phase: {e}"
class CodeChecker:
def __init__(self, solution, checker):
self.solution = solution
self.checker = checker
def check_n_refactor(self, iterations):
for i in range(iterations):
try:
success_status = check_code(self.solution)
except Exception as e:
success_status = f"An error occurred while checking the code: {e}"
print("Success status: " + success_status)
if success_status == "Code run succesfully":
return self.solution
else:
self.solution = self.checker.invoke({"code": self.solution.imports + "\n" + self.solution.code, "errors": success_status})
return self.solution
# Grader prompt
code_gen_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a coding assistant with expertise in python. Based on the following context (which can be code or revision suggestions): \n ------- \n {context} \n ------- \n Answer the user
question. Ensure any code you provide can be executed \n
with all required imports and variables defined. First, structure your answer with a description of the code solution. \n
Secondly list the imports. Thirdly list the functioning code block. Finally, describe potential errors one might encounter while executing the code. Here is the user question:""",
),
("placeholder", "{messages}"),
]
)
revisor_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a coding revisor with expertise in python. From the prefix, import, code and potential errors produced by a programmer, you should provide a thorough review with:
- First what would you improve in the imports
- Second what would you improve in the code block
- Third an overall evaluation of the whole code solution produced by the programmer. Here is the programmer solution you should revise:""",
),
("placeholder", "{messages}"),
]
)
modules_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""Your job is to refactor code in a modularized way, and you have expertise in python. From the prefix, import, code and potential errors produced by a programmer, you should provide a refactored and modularized code with:
- A description of what you did
- The imports
- The refactored and modularized code.
Orient code generation on this context (which may be user's request or some revision suggestions):
\n\n-----------\n{context}\n-------------\n\n
Here is the programmer solution you should refactor and modularize:""",
),
("placeholder", "{messages}"),
]
)
checker_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""Your job is to refactor code based on these errors:
----------
{errors}
----------
This is the code to refactor:
----------
{code}
----------
You should then reply with:
- A description of what you did
- The imports
- The refactored code""",
),
]
)
import time
def reply(message, history, api_key, context):
os.environ["OPENAI_API_KEY"] = api_key
expt_llm = "gpt-4o"
llm = ChatOpenAI(temperature=0, model=expt_llm)
code_gen_chain = code_gen_prompt | llm.with_structured_output(code)
code_revision_chain = revisor_prompt | llm.with_structured_output(revision)
aicodegenerator = CodeGenerator(code_gen_chain, code_revision_chain, 5)
solution = aicodegenerator.generate(prompt=message, context=context)
code_modules_chain = modules_prompt | llm.with_structured_output(modularized_code)
aicodemodularizer = CodeGenerator(code_modules_chain, code_revision_chain, 5)
modules_solution = aicodemodularizer.generate(prompt=coder_reply_to_string(solution), context=message)
code_checker_chain = checker_prompt | llm.with_structured_output(code)
aicodechecker = CodeChecker(modules_solution, code_checker_chain)
final_solution = aicodechecker.check_n_refactor(5)
response = "The final solution for your code is:\n\n```python" + final_solution.imports + "\n" + final_solution.code + "\n```"
this_hist = ""
for char in response:
this_hist+=char
time.sleep(0.001)
yield this_hist
api_key_user = gr.Textbox(label="OpenAI API key", type="password")
context_user = gr.Textbox(label="Context", info="Add some contextual instructions for the model to know how to generate code", value="def hello_world():\n\tprint('Hello world!')\n\nhello_world()")
chatbot = gr.Chatbot(height=400)
additional_accordion = gr.Accordion(label="Parameters to be set before you start chatting", open=True)
with gr.Blocks() as demo:
gr.HTML("<h1 align='center'>Self-Reviewing Coding Assistant🤖💻</h1>")
gr.Image('coding_assistant.png')
gr.ChatInterface(fn=reply, additional_inputs=[api_key_user, context_user], additional_inputs_accordion=additional_accordion, chatbot=chatbot)
demo.launch(server_name="0.0.0.0", server_port=7860) |