import gradio as gr
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
from transformers.image_utils import load_image
import torch
import spaces
import re
import json
import base64
import os
from PIL import Image, ImageDraw
from io import BytesIO
from img_utils import smart_resize
SYS_PROMPT = """You are a helpful assistant.
# Tools
You may call one or more functions to assist with the user query.
You are provided with function signatures within XML tags:
{{"type": "function", "function": {{"name": "computer_use", "description": "Use a mouse and keyboard to interact with a computer, and take screenshots.\n* This is an interface to a desktop GUI. You do not have access to a terminal or applications menu. You must click on desktop icons to start applications.\n* Some applications may take time to start or process actions, so you may need to wait and take successive screenshots to see the results of your actions. E.g. if you click on Firefox and a window doesn't open, try wait and taking another screenshot.\n* The screen's resolution is {width}x{height}.\n* Whenever you intend to move the cursor to click on an element like an icon, you should consult a screenshot to determine the coordinates of the element before moving the cursor.\n* If you tried clicking on a program or link but it failed to load, even after waiting, try adjusting your cursor position so that the tip of the cursor visually falls on the element that you want to click.\n* Make sure to click any buttons, links, icons, etc with the cursor tip in the center of the element. Don't click boxes on their edges unless asked.", "parameters": {{"properties": {{"action": {{"description": "The action to perform. The available actions are:\n* `key`: Performs key down presses on the arguments passed in order, then performs key releases in reverse order.\n* `type`: Type a string of text on the keyboard.\n* `mouse_move`: Move the cursor to a specified (x, y) pixel coordinate on the screen.\n* `left_click`: Click the left mouse button.\n* `left_click_drag`: Click and drag the cursor to a specified (x, y) pixel coordinate on the screen.\n* `right_click`: Click the right mouse button.\n* `middle_click`: Click the middle mouse button.\n* `double_click`: Double-click the left mouse button.\n* `scroll`: Performs a scroll of the mouse scroll wheel.\n* `wait`: Wait specified seconds for the change to happen.\n* `terminate`: Terminate the current task and report its completion status.", "enum": ["key", "type", "mouse_move", "left_click", "left_click_drag", "right_click", "middle_click", "double_click", "scroll", "wait", "terminate"], "type": "string"}}, "keys": {{"description": "Required only by `action=key`.", "type": "array"}}, "text": {{"description": "Required only by `action=type`.", "type": "string"}}, "coordinate": {{"description": "(x, y): The x (pixels from the left edge) and y (pixels from the top edge) coordinates to move the mouse to. Required only by `action=mouse_move`, `action=left_click_drag`, `action=left_click`, `action=right_click`, `action=double_click`.", "type": "array"}}, "pixels": {{"description": "The amount of scrolling to perform. Positive values scroll up, negative values scroll down. Required only by `action=scroll`.", "type": "number"}}, "time": {{"description": "The seconds to wait. Required only by `action=wait`.", "type": "number"}}, "status": {{"description": "The status of the task. Required only by `action=terminate`.", "type": "string", "enum": ["success", "failure"]}}}}, "required": ["action"], "type": "object"}}}}}}
For each function call, return a json object with function name and arguments within XML tags:
{{"name": , "arguments": }}
"""
MODEL_ID = "xlangai/Jedi-7B-1080p"
processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
MODEL_ID,
trust_remote_code=True,
torch_dtype=torch.bfloat16
).to("cuda").eval()
def image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return img_str
def draw_bounding_boxes(image, bounding_boxes, outline_color="#FF3366", line_width=3):
draw = ImageDraw.Draw(image)
for box in bounding_boxes:
xmin, ymin, xmax, ymax = box
# Draw rounded rectangle
radius = 10 # Corner radius
# Draw the rounded rectangle using arcs and lines
# Top left corner
draw.arc((xmin, ymin, xmin + 2 * radius, ymin + 2 * radius), 180, 270, fill=outline_color, width=line_width)
# Top right corner
draw.arc((xmax - 2 * radius, ymin, xmax, ymin + 2 * radius), 270, 0, fill=outline_color, width=line_width)
# Bottom right corner
draw.arc((xmax - 2 * radius, ymax - 2 * radius, xmax, ymax), 0, 90, fill=outline_color, width=line_width)
# Bottom left corner
draw.arc((xmin, ymax - 2 * radius, xmin + 2 * radius, ymax), 90, 180, fill=outline_color, width=line_width)
# Top line
draw.line((xmin + radius, ymin, xmax - radius, ymin), fill=outline_color, width=line_width)
# Right line
draw.line((xmax, ymin + radius, xmax, ymax - radius), fill=outline_color, width=line_width)
# Bottom line
draw.line((xmin + radius, ymax, xmax - radius, ymax), fill=outline_color, width=line_width)
# Left line
draw.line((xmin, ymin + radius, xmin, ymax - radius), fill=outline_color, width=line_width)
return image
def rescale_bounding_boxes(bounding_boxes, original_width, original_height, scaled_width=1000, scaled_height=1000):
x_scale = original_width / scaled_width
y_scale = original_height / scaled_height
rescaled_boxes = []
for box in bounding_boxes:
xmin, ymin, xmax, ymax = box
rescaled_box = [
xmin * x_scale,
ymin * y_scale,
xmax * x_scale,
ymax * y_scale
]
rescaled_boxes.append(rescaled_box)
return rescaled_boxes
def parse_coordinates(response, resized_width, resized_height, original_width, original_height):
try:
action = json.loads(
response.split("\n")[1].split("\n")[0]
)
action_name = action["name"]
action_type = action["arguments"]["action"]
print(f"action_name: {action_name}, action_type: {action_type}")
if action_type == "wait":
return [-1, -1, -1, -1]
action_args = action["arguments"]["coordinate"]
# Return as [x1, y1, x2, y2] format for bounding box
# convert the coordinates to the original image size
return [action_args[0] / resized_width * original_width, action_args[1] / resized_height * original_height, action_args[0] / resized_width * original_width, action_args[1] / resized_height * original_height]
except Exception as e:
print(f"Error parsing coordinates: {e}\nResponse: {response}")
return None
@spaces.GPU
def run_inference(image, text_input):
if image is None:
return "Please upload an image", "", None
if not text_input:
text_input = "Describe this image in detail"
resized_height, resized_width = smart_resize(image.height, image.width, max_pixels=2700 * 28 * 28)
messages = [
{
"role": "system",
"content": SYS_PROMPT.format(width=resized_width, height=resized_height)
},
{
"role": "user",
"content": [
{"type": "image", "image": image},
{"type": "text", "text": text_input},
],
}
]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = processor(
text=[text],
images=[image],
return_tensors="pt",
padding=True,
).to("cuda")
generated_ids = model.generate(**inputs, max_new_tokens=512)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=False, clean_up_tokenization_spaces=False
)[0]
try:
# Parse coordinates using the function from the reference code
coordinates = parse_coordinates(output_text, resized_width, resized_height, image.width, image.height)
if coordinates is None:
return output_text, "Error parsing coordinates", image
if coordinates == [-1, -1, -1, -1]:
return output_text, "Instruction deemed infeasible by the model", image
# Create a bounding box with a small area around the click point
click_x, click_y = coordinates[0], coordinates[1]
box_size = 20 # 20px box around the click point
box = [
click_x - box_size/2,
click_y - box_size/2,
click_x + box_size/2,
click_y + box_size/2
]
# Draw the bounding box on the image
annotated_image = draw_bounding_boxes(image.copy(), [box])
return output_text, str(coordinates), annotated_image
except Exception as e:
return output_text, "Error parsing coordinates", image
# Load example images
example_images = [
"assets/images/example_0.png",
"assets/images/example_1.jpg",
"assets/images/example_2.png"
]
example_prompts = [
"Select the C9 cell",
"Close the file explorer",
"Click on the word 'underserved'"
]
examples = [[Image.open(img), prompt] for img, prompt in zip(example_images, example_prompts)]
css = """
#output {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown(
"""
# Jedi GUI Grounding Demo
Upload a screenshot and provide a description of an element. In the demo, we use the Jedi-7B model for demostration.
""")
with gr.Row():
with gr.Column():
input_img = gr.Image(label="Input Image", type="pil")
text_input = gr.Textbox(label="Instruction")
submit_btn = gr.Button(value="Submit")
with gr.Column():
model_output_text = gr.Textbox(label="Model Output", lines=5)
model_output_box = gr.Textbox(label="Coordinates", lines=2)
annotated_image = gr.Image(label="Annotated Image")
submit_btn.click(run_inference, [input_img, text_input], [model_output_text, model_output_box, annotated_image])
# Add examples
gr.Examples(
examples=examples,
inputs=[input_img, text_input],
outputs=[model_output_text, model_output_box, annotated_image],
fn=run_inference,
cache_examples=True,
)
demo.launch(debug=True)