Spaces:
Running
on
L40S
Running
on
L40S
feat: Enable MCP
Browse filesHello! This is an automated PR adding MCP compatibility to your AI App 🤖.
This PR introduces two improvements:
1. Adds docstrings to the functions in the app file that are directly connected to the Gradio UI, for the downstream LLM to use.
2. Enables the Model-Compute-Platform by adding `mcp_server=True` to the `.launch()` call.
No other logic has been changed. Please review and merge if it looks good!Learn more about MCP compatibility in Spaces here: https://huggingface.co/changelog/add-compatible-spaces-to-your-mcp-tools
app.py
CHANGED
@@ -82,17 +82,25 @@ def predict(
|
|
82 |
chunk_size: int = 512,
|
83 |
):
|
84 |
"""
|
85 |
-
|
|
|
|
|
|
|
|
|
86 |
Args:
|
87 |
-
text
|
88 |
-
url
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
max_length_tokens
|
95 |
-
chunk_size (
|
|
|
|
|
|
|
|
|
96 |
"""
|
97 |
print("running the prediction function")
|
98 |
|
@@ -201,7 +209,25 @@ def retry(
|
|
201 |
chunk_size: int = 512,
|
202 |
):
|
203 |
"""
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
"""
|
206 |
if len(history) == 0:
|
207 |
yield (chatbot, history, "Empty context")
|
@@ -258,6 +284,18 @@ def build_demo(args: argparse.Namespace) -> gr.Blocks:
|
|
258 |
retry_btn = gr.Button("🔄 Regenerate")
|
259 |
|
260 |
def respond(message):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
return f"Url and commit hash submitted!"
|
262 |
with gr.Column():
|
263 |
url_box = gr.Textbox(label="Please input a Github url here",placeholder="Input your url", lines=1)
|
@@ -328,7 +366,8 @@ def main(args: argparse.Namespace):
|
|
328 |
favicon_path=favicon_path,
|
329 |
server_name=args.ip,
|
330 |
server_port=args.port,
|
331 |
-
share=True
|
|
|
332 |
)
|
333 |
|
334 |
if __name__ == "__main__":
|
@@ -341,4 +380,4 @@ if __name__ == "__main__":
|
|
341 |
|
342 |
args = parse_args()
|
343 |
print(args)
|
344 |
-
main(args)
|
|
|
82 |
chunk_size: int = 512,
|
83 |
):
|
84 |
"""
|
85 |
+
Generate a response for the given issue description and GitHub repository.
|
86 |
+
|
87 |
+
This function clones a GitHub repository, analyzes its structure, locates relevant files
|
88 |
+
based on the issue description, and generates a repair solution using the Kimi-Dev model.
|
89 |
+
|
90 |
Args:
|
91 |
+
text: The issue description provided by the user
|
92 |
+
url: The GitHub repository URL
|
93 |
+
commit_hash: The specific commit hash to checkout
|
94 |
+
chatbot: The current chatbot conversation state
|
95 |
+
history: The conversation history
|
96 |
+
top_p: The top-p sampling parameter for text generation
|
97 |
+
temperature: The temperature parameter for text generation
|
98 |
+
max_length_tokens: Maximum number of tokens to generate
|
99 |
+
chunk_size: Size of chunks for streaming response (default: 512)
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
Generator yielding tuples of (chatbot_state, history_state, status_message) representing
|
103 |
+
the updated conversation state and generation status
|
104 |
"""
|
105 |
print("running the prediction function")
|
106 |
|
|
|
209 |
chunk_size: int = 512,
|
210 |
):
|
211 |
"""
|
212 |
+
Regenerate the response for the previous input.
|
213 |
+
|
214 |
+
This function retries the prediction with the same parameters as the last request,
|
215 |
+
useful when the user wants to get a different response for the same input.
|
216 |
+
|
217 |
+
Args:
|
218 |
+
text: The issue description from the previous request
|
219 |
+
url: The GitHub repository URL from the previous request
|
220 |
+
commit_hash: The commit hash from the previous request
|
221 |
+
chatbot: The current chatbot conversation state
|
222 |
+
history: The conversation history
|
223 |
+
top_p: The top-p sampling parameter for text generation
|
224 |
+
temperature: The temperature parameter for text generation
|
225 |
+
max_length_tokens: Maximum number of tokens to generate
|
226 |
+
chunk_size: Size of chunks for streaming response (default: 512)
|
227 |
+
|
228 |
+
Returns:
|
229 |
+
Generator yielding tuples of (chatbot_state, history_state, status_message) or
|
230 |
+
yields error state if history is empty
|
231 |
"""
|
232 |
if len(history) == 0:
|
233 |
yield (chatbot, history, "Empty context")
|
|
|
284 |
retry_btn = gr.Button("🔄 Regenerate")
|
285 |
|
286 |
def respond(message):
|
287 |
+
"""
|
288 |
+
Handle the submission of URL and commit hash.
|
289 |
+
|
290 |
+
This function provides feedback when the user submits a GitHub URL and commit hash,
|
291 |
+
confirming that the information has been received.
|
292 |
+
|
293 |
+
Args:
|
294 |
+
message: The input message (not used in current implementation)
|
295 |
+
|
296 |
+
Returns:
|
297 |
+
A confirmation message string
|
298 |
+
"""
|
299 |
return f"Url and commit hash submitted!"
|
300 |
with gr.Column():
|
301 |
url_box = gr.Textbox(label="Please input a Github url here",placeholder="Input your url", lines=1)
|
|
|
366 |
favicon_path=favicon_path,
|
367 |
server_name=args.ip,
|
368 |
server_port=args.port,
|
369 |
+
share=True,
|
370 |
+
mcp_server=True
|
371 |
)
|
372 |
|
373 |
if __name__ == "__main__":
|
|
|
380 |
|
381 |
args = parse_args()
|
382 |
print(args)
|
383 |
+
main(args)
|