Aria-UI commited on
Commit
c496804
·
verified ·
1 Parent(s): a0f8d16

Upload aitw/prepare_trajectory_grounding.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. aitw/prepare_trajectory_grounding.py +427 -0
aitw/prepare_trajectory_grounding.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ import os
4
+ from pathlib import Path
5
+ import re
6
+ from PIL import Image
7
+ from tqdm import tqdm
8
+ import multiprocessing
9
+
10
+ import sys
11
+ sys.path.append('./google-research')
12
+ from android_in_the_wild.action_type import ActionType
13
+ from android_in_the_wild.visualization_utils import is_tap_action
14
+ import numpy as np
15
+
16
+ def is_tap(step):
17
+ return step["results/action_type"][0] == ActionType.DUAL_POINT and is_tap_action(np.array(step["results/yx_touch"]), np.array(step["results/yx_lift"]))
18
+
19
+ def encode_action(action_json):
20
+ """
21
+ Encode different types of actions into human-readable descriptions.
22
+
23
+ Args:
24
+ action_json (dict): A dictionary containing action details
25
+
26
+ Returns:
27
+ str: A human-readable description of the action
28
+ """
29
+ action_type = action_json["results/action_type"][0]
30
+
31
+ if is_tap(action_json):
32
+ return "tap on the screen"
33
+
34
+ elif action_type == ActionType.DUAL_POINT:
35
+ # Check scroll direction by comparing y-values
36
+ start_y, end_y = action_json["results/yx_touch"][1], action_json["results/yx_lift"][1]
37
+ start_x, end_x = action_json["results/yx_touch"][0], action_json["results/yx_lift"][0]
38
+ # first decide scroll vertically or horizontally
39
+ if abs(start_y - end_y) > abs(start_x - end_x):
40
+ if start_y < end_y:
41
+ return "scroll down"
42
+ else:
43
+ return "scroll up"
44
+ else:
45
+ if start_x < end_x:
46
+ return "scroll right"
47
+ else:
48
+ return "scroll left"
49
+
50
+ elif action_type == ActionType.TYPE:
51
+ text_to_type = action_json["results/type_action"][0]
52
+ return f'TYPE "{text_to_type}"'
53
+
54
+ elif action_type == ActionType.PRESS_BACK:
55
+ return "go to the previous screen"
56
+
57
+ elif action_type == ActionType.PRESS_HOME:
58
+ return "go to the home screen"
59
+
60
+ elif action_type == ActionType.PRESS_ENTER:
61
+ return "press the enter key"
62
+
63
+ elif action_type == ActionType.STATUS_TASK_COMPLETE:
64
+ return "task completed"
65
+
66
+ elif action_type == ActionType.STATUS_TASK_IMPOSSIBLE:
67
+ return "task impossible"
68
+
69
+ else:
70
+ raise ValueError(f"Unknown action type: {action_type}")
71
+
72
+ def resize_image(image, scale=0.75):
73
+ """
74
+ Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
75
+
76
+ Args:
77
+ image: PIL Image object
78
+
79
+ Returns:
80
+ Resized PIL Image
81
+ """
82
+ # Get current dimensions
83
+ width, height = image.size
84
+
85
+ # Calculate new dimensions
86
+ new_width = int(width * scale)
87
+ new_height = int(height * scale)
88
+
89
+ # Resize image
90
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
91
+ return resized_image
92
+
93
+ def merge_convs(conversations):
94
+ """
95
+ Merge all successive 'human' conversations comprehensively.
96
+
97
+ Args:
98
+ conversations (list): List of conversation dictionaries
99
+
100
+ Returns:
101
+ list: Processed conversations with all successive human messages merged
102
+
103
+ Raises:
104
+ ValueError: If input is not a list or contains invalid conversation dictionaries
105
+ """
106
+ # Validate input
107
+ if not isinstance(conversations, list):
108
+ raise ValueError("Input must be a list of conversation dictionaries")
109
+
110
+ # Validate each conversation dictionary structure
111
+ for conv in conversations:
112
+ if not isinstance(conv, dict):
113
+ raise ValueError("Each conversation must be a dictionary")
114
+ if 'from' not in conv or 'value' not in conv:
115
+ raise ValueError("Each conversation must have 'from' and 'value' keys")
116
+
117
+ processed_conversations = []
118
+ i = 0
119
+ while i < len(conversations):
120
+ current_conv = conversations[i]
121
+
122
+ # If current conversation is 'human', start merging
123
+ if current_conv['from'] == 'human':
124
+ # Collect all successive human conversations
125
+ merged_value = current_conv['value']
126
+ j = i + 1
127
+ while j < len(conversations) and conversations[j]['from'] == 'human':
128
+ merged_value += '\n\n' + conversations[j]['value']
129
+ j += 1
130
+
131
+ # Update current conversation with merged value
132
+ current_conv['value'] = merged_value
133
+
134
+ # Move index to last non-human conversation
135
+ i = j
136
+ else:
137
+ # For non-human conversations, just add to processed list
138
+ i += 1
139
+
140
+ processed_conversations.append(current_conv)
141
+
142
+ return processed_conversations
143
+
144
+
145
+ def parse_reasoning(input_string):
146
+ input_string = input_string.strip()
147
+ if not input_string.endswith("```"):
148
+ input_string += "```"
149
+ # Regex pattern to match texts between ```A```, ```B```, and ```C```
150
+ pattern = r'```([ABC])\n(.*?)```'
151
+
152
+ # Find all matches
153
+ matches = re.findall(pattern, input_string, re.DOTALL)
154
+
155
+ # Create a dictionary to store parsed texts
156
+ parsed_texts = []
157
+
158
+ # Populate the dictionary
159
+ for _, text in matches:
160
+ parsed_texts.append(text.strip())
161
+
162
+ if len(parsed_texts) != 3:
163
+ # print(input_string)
164
+ return None, None, None
165
+
166
+ caption, reasoning, instruction = parsed_texts
167
+
168
+ return caption, instruction.replace("Task: ", ""), reasoning
169
+
170
+ grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
171
+ grounding_step_ans = "```\n{point_str}\n```"
172
+ act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
173
+ act_step_ans = "The agent's action: {prev_action}"
174
+ user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
175
+ user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
176
+
177
+ resize_ratios_per_window_size = {
178
+ 1: 0.5,
179
+ 2: 0.5,
180
+ 3: 0.5,
181
+ }
182
+
183
+ def process_android_episodes(data, window_size=2):
184
+ """
185
+ Process Android episodes and extract steps with click or long_press actions.
186
+
187
+ Args:
188
+ data (list): List of episode dictionaries
189
+ window_size (int, optional): Number of recent image-included conversations to include.
190
+ Defaults to 3 (current image + 2 previous image-included steps).
191
+
192
+ Returns:
193
+ dict: Dictionary with episode_id as key and list of filtered steps as value
194
+ """
195
+ instructions = []
196
+ for episode in data:
197
+ episode_id = episode["episode_id"]
198
+
199
+ for i, step in enumerate(episode["steps"]):
200
+ is_grounding = step["is_grounding"]
201
+
202
+ if not is_grounding:
203
+ continue
204
+
205
+ if window_size > 0 and i == 0: # skip the first step if window_size > 0
206
+ continue
207
+
208
+ convs = [
209
+ {
210
+ "from": "human",
211
+ "value": user_start_prompt.format(
212
+ ultimate_task=episode["goal_info"][0]
213
+ ),
214
+ },
215
+ ]
216
+
217
+ cur_img_list = [Path(step["image_path"]).resolve()]
218
+
219
+ if window_size > 0:
220
+ window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
221
+
222
+ if i > window_size: # has more history steps larger than window_size
223
+ convs.append(
224
+ {
225
+ "from": "human",
226
+ "value": user_history_instr_prompt.format(
227
+ history_list="\n".join(
228
+ [
229
+ f"\t{j+1}. " + prev_step["step_instruction"]
230
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
231
+ ]
232
+ )
233
+ ),
234
+ },
235
+ )
236
+
237
+ convs.append(
238
+ {
239
+ "from": "human",
240
+ "value": "The recent steps with the GUI images are as follows:\n",
241
+ }
242
+ )
243
+
244
+ for j, win_step_i in enumerate(window_steps):
245
+ if win_step_i["is_grounding"]:
246
+ convs.append(
247
+ {
248
+ "from": "human",
249
+ "value": grounding_step_prompt.format(
250
+ instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
251
+ ),
252
+ }
253
+ )
254
+ convs.append(
255
+ {
256
+ "from": "gpt",
257
+ "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
258
+ }
259
+ )
260
+ else:
261
+ convs.append(
262
+ {
263
+ "from": "human",
264
+ "value": act_step_prompt.format(
265
+ prev_instruction=encode_action(win_step_i), step_idx=i+1-(len(window_steps)-j)
266
+ ),
267
+ }
268
+ )
269
+ convs.append(
270
+ {
271
+ "from": "human",
272
+ "value": act_step_ans.format(
273
+ prev_action=encode_action(win_step_i)
274
+ ),
275
+ }
276
+ )
277
+ win_img_list = [
278
+ Path(win_step["image_path"]).resolve() for win_step in window_steps
279
+ ]
280
+
281
+ if not all([img_path.exists() for img_path in img_list]):
282
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
283
+ continue
284
+
285
+ has_img_broken = False
286
+ for img_path in img_list:
287
+ try:
288
+ Image.open(str(img_path))
289
+ except Exception as e:
290
+ print(f"Error opening image {img_path}: {e}")
291
+ has_img_broken = True
292
+ break
293
+ if has_img_broken:
294
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
295
+ continue
296
+
297
+ resize_scale = resize_ratios_per_window_size[window_size]
298
+ win_img_list_resized = []
299
+ try:
300
+ for img_path in win_img_list:
301
+ new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
302
+ new_save_dir = img_path.parent.parent / f"images_resized"
303
+ new_save_dir.mkdir(parents=True, exist_ok=True)
304
+ new_save_path = new_save_dir / new_save_name
305
+ if new_save_path.exists():
306
+ win_img_list_resized.append(new_save_path)
307
+ continue
308
+ win_img = Image.open(str(img_path))
309
+ win_img = resize_image(win_img, scale=resize_scale)
310
+ win_img.save(str(new_save_path))
311
+ win_img_list_resized.append(new_save_path)
312
+ except Exception as e:
313
+ print(f"Error resizing image: {e}: {win_img_list}")
314
+ continue
315
+
316
+ else:
317
+ convs.append(
318
+ {
319
+ "from": "human",
320
+ "value": user_history_instr_prompt.format(
321
+ history_list="\n".join(
322
+ [
323
+ f"\t{j+1}. " + prev_step["step_instruction"]
324
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
325
+ ]
326
+ )
327
+ ),
328
+ },
329
+ )
330
+
331
+ if window_size > 0:
332
+ img_list = win_img_list_resized + cur_img_list
333
+ else:
334
+ img_list = cur_img_list
335
+
336
+ has_img_broken = False
337
+ for img_path in img_list:
338
+ try:
339
+ Image.open(str(img_path))
340
+ except Exception as e:
341
+ print(f"Error opening image {img_path}: {e}")
342
+ has_img_broken = True
343
+ break
344
+ if has_img_broken:
345
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
346
+ continue
347
+
348
+ # Current step details
349
+ convs.append(
350
+ {
351
+ "from": "human",
352
+ "value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
353
+ }
354
+ )
355
+ convs.append(
356
+ {
357
+ "from": "gpt",
358
+ "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
359
+ }
360
+ )
361
+
362
+ convs = merge_convs(convs)
363
+
364
+ instructions.append(
365
+ {
366
+ "image": [str(img_path) for img_path in img_list],
367
+ "conversations": convs,
368
+ }
369
+ )
370
+
371
+ return instructions
372
+
373
+ # Example usage
374
+ if __name__ == "__main__":
375
+ dataset_directories = {
376
+ 'general': './general_episodes_with_grounding_reasoning',
377
+ 'google_apps': './google_apps_episodes_with_grounding_reasoning',
378
+ 'install': './install_episodes_with_grounding_reasoning_valid',
379
+ 'web_shopping': './web_shopping_episodes_with_grounding_reasoning',
380
+ }
381
+
382
+ episode_data_list = []
383
+ for dataset_name, directory_path in dataset_directories.items():
384
+ episode_data_list.extend(list(Path(directory_path).glob("*.json")))
385
+ episode_data_list = [json.load(open(str(file_path), "r", encoding="utf-8")) for file_path in episode_data_list]
386
+
387
+ episode_data_list_new = []
388
+ for episode_data in tqdm(episode_data_list, desc="Parsing fields..."):
389
+ for step in episode_data:
390
+ step["image_path"] = step["image_path"][0] if isinstance(step["image_path"], list) else step["image_path"]
391
+ if "grounding_reasoning" not in step or not step["grounding_reasoning"]:
392
+ step["step_instruction"] = encode_action(step)
393
+ step["is_grounding"] = False
394
+ continue
395
+
396
+ caption, instruction, reasoning = parse_reasoning(step["grounding_reasoning"])
397
+ step["step_instruction"] = instruction if instruction else encode_action(step)
398
+ step["caption"] = caption
399
+ step["reasoning"] = reasoning
400
+ step["is_grounding"] = not(not instruction)
401
+ step["coord_norm"] = (int(step["results/yx_touch"][1] * 1000), int(step["results/yx_touch"][0] * 1000))
402
+ episode_data = {
403
+ "episode_id": episode_data[0]["episode_id"],
404
+ "goal_info": episode_data[0]["goal_info"],
405
+ "steps": episode_data,
406
+ }
407
+ episode_data_list_new.append(episode_data)
408
+
409
+ # window_size_list = [1, 2, 3]
410
+ window_size_list = [0, 1, 2, 3]
411
+
412
+ def process_episode(args):
413
+ episode, window_size = args
414
+ return process_android_episodes([episode], window_size)
415
+
416
+ instructions = []
417
+ for window_size in window_size_list:
418
+ tasks = [(episode, window_size) for episode in episode_data_list_new]
419
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
420
+ results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
421
+ for result in results:
422
+ instructions.extend(result)
423
+
424
+ print(f"Number of context aware train instructions: {len(instructions)}")
425
+
426
+ with open(f"aitw_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
427
+ json.dump(instructions, file, ensure_ascii=False, indent=4)