prithivMLmods commited on
Commit
d208fc5
·
verified ·
1 Parent(s): e85ee33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -65
app.py CHANGED
@@ -5,6 +5,7 @@ import json
5
  import time
6
  import asyncio
7
  from threading import Thread
 
8
 
9
  import gradio as gr
10
  import spaces
@@ -23,14 +24,105 @@ from transformers import (
23
  AutoTokenizer,
24
  )
25
  from transformers.image_utils import load_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Constants for text generation
28
  MAX_MAX_NEW_TOKENS = 4096
29
  DEFAULT_MAX_NEW_TOKENS = 2048
30
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
31
 
32
- # Let the environment (e.g., Hugging Face Spaces) determine the device.
33
- # This avoids conflicts with the CUDA environment setup by the platform.
34
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
 
36
  print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
@@ -43,12 +135,7 @@ if torch.cuda.is_available():
43
  print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
44
 
45
  print("Using device:", device)
46
- # --- Model Loading ---
47
 
48
- # To address the warnings, we add `use_fast=False` to ensure we use the
49
- # processor version the model was originally saved with.
50
-
51
- # Load DREX-062225-exp
52
  MODEL_ID_X = "prithivMLmods/DREX-062225-exp"
53
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
54
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
@@ -57,7 +144,6 @@ model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
57
  torch_dtype=torch.float16
58
  ).to(device).eval()
59
 
60
- # Load typhoon-ocr-3b
61
  MODEL_ID_T = "scb10x/typhoon-ocr-3b"
62
  processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True, use_fast=False)
63
  model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
@@ -66,7 +152,6 @@ model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
66
  torch_dtype=torch.float16
67
  ).to(device).eval()
68
 
69
- # Load olmOCR-7B-0225-preview
70
  MODEL_ID_O = "allenai/olmOCR-7B-0225-preview"
71
  processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True, use_fast=False)
72
  model_o = Qwen2VLForConditionalGeneration.from_pretrained(
@@ -75,7 +160,6 @@ model_o = Qwen2VLForConditionalGeneration.from_pretrained(
75
  torch_dtype=torch.float16
76
  ).to(device).eval()
77
 
78
- # Load Lumian-VLR-7B-Thinking
79
  MODEL_ID_J = "prithivMLmods/Lumian-VLR-7B-Thinking"
80
  SUBFOLDER = "think-preview"
81
  processor_j = AutoProcessor.from_pretrained(MODEL_ID_J, trust_remote_code=True, subfolder=SUBFOLDER, use_fast=False)
@@ -86,20 +170,14 @@ model_j = Qwen2_5_VLForConditionalGeneration.from_pretrained(
86
  torch_dtype=torch.float16
87
  ).to(device).eval()
88
 
89
- # Load openbmb/MiniCPM-V-4
90
  MODEL_ID_V4 = 'openbmb/MiniCPM-V-4'
91
  model_v4 = AutoModel.from_pretrained(
92
  MODEL_ID_V4,
93
  trust_remote_code=True,
94
  torch_dtype=torch.bfloat16,
95
- # Using 'sdpa' can sometimes cause issues in certain environments,
96
- # letting transformers choose the default is safer.
97
- # attn_implementation='sdpa'
98
  ).eval().to(device)
99
  tokenizer_v4 = AutoTokenizer.from_pretrained(MODEL_ID_V4, trust_remote_code=True, use_fast=False)
100
 
101
- # --- Refactored Model Dictionary ---
102
- # This simplifies model selection in the generation functions.
103
  MODELS = {
104
  "DREX-062225-7B-exp": (processor_x, model_x),
105
  "Typhoon-OCR-3B": (processor_t, model_t),
@@ -109,15 +187,10 @@ MODELS = {
109
 
110
 
111
  def downsample_video(video_path):
112
- """
113
- Downsamples the video to evenly spaced frames.
114
- Each frame is returned as a PIL image along with its timestamp.
115
- """
116
  vidcap = cv2.VideoCapture(video_path)
117
  total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
118
  fps = vidcap.get(cv2.CAP_PROP_FPS)
119
  frames = []
120
- # Use a maximum of 10 frames to avoid excessive memory usage
121
  frame_indices = np.linspace(0, total_frames - 1, min(total_frames, 10), dtype=int)
122
  for i in frame_indices:
123
  vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
@@ -137,14 +210,10 @@ def generate_image(model_name: str, text: str, image: Image.Image,
137
  top_p: float = 0.9,
138
  top_k: int = 50,
139
  repetition_penalty: float = 1.2):
140
- """
141
- Generates responses using the selected model for image input.
142
- """
143
  if image is None:
144
  yield "Please upload an image.", "Please upload an image."
145
  return
146
 
147
- # Handle MiniCPM-V-4 separately due to its different API
148
  if model_name == "openbmb/MiniCPM-V-4":
149
  msgs = [{'role': 'user', 'content': [image, text]}]
150
  try:
@@ -158,13 +227,12 @@ def generate_image(model_name: str, text: str, image: Image.Image,
158
  yield f"Error: {e}", f"Error: {e}"
159
  return
160
 
161
- # Use the dictionary for other models
162
  if model_name not in MODELS:
163
  yield "Invalid model selected.", "Invalid model selected."
164
  return
165
  processor, model = MODELS[model_name]
166
 
167
- messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": text}]}]
168
  prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
169
  inputs = processor(
170
  text=[prompt_full], images=[image], return_tensors="pt", padding=True,
@@ -187,9 +255,6 @@ def generate_video(model_name: str, text: str, video_path: str,
187
  top_p: float = 0.9,
188
  top_k: int = 50,
189
  repetition_penalty: float = 1.2):
190
- """
191
- Generates responses using the selected model for video input.
192
- """
193
  if video_path is None:
194
  yield "Please upload a video.", "Please upload a video."
195
  return
@@ -199,14 +264,11 @@ def generate_video(model_name: str, text: str, video_path: str,
199
  yield "Could not process video.", "Could not process video."
200
  return
201
 
202
- # Handle MiniCPM-V-4 separately
203
  if model_name == "openbmb/MiniCPM-V-4":
204
  images = [frame for frame, ts in frames_with_ts]
205
- # For video, the prompt includes the text and then all the image frames
206
  content = [text] + images
207
  msgs = [{'role': 'user', 'content': content}]
208
  try:
209
- # The .chat API still takes a single image argument, typically the first frame
210
  answer = model_v4.chat(
211
  image=images[0].convert('RGB'), msgs=msgs, tokenizer=tokenizer_v4,
212
  max_new_tokens=max_new_tokens, temperature=temperature,
@@ -217,17 +279,15 @@ def generate_video(model_name: str, text: str, video_path: str,
217
  yield f"Error: {e}", f"Error: {e}"
218
  return
219
 
220
- # Use the dictionary for other models
221
  if model_name not in MODELS:
222
  yield "Invalid model selected.", "Invalid model selected."
223
  return
224
  processor, model = MODELS[model_name]
225
 
226
- # Prepare messages for Qwen-style models
227
  messages = [{"role": "user", "content": [{"type": "text", "text": text}]}]
228
  images_for_processor = []
229
  for frame, timestamp in frames_with_ts:
230
- messages[0]["content"].append({"type": "image", "image": frame})
231
  images_for_processor.append(frame)
232
 
233
  prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
@@ -250,43 +310,34 @@ def generate_video(model_name: str, text: str, video_path: str,
250
  time.sleep(0.01)
251
  yield buffer, buffer
252
 
253
-
254
- # Define examples for image and video inference
255
  image_examples = [
256
- ["Describe the safety measures in the image. Conclude (Safe / Unsafe)..", "images/5.jpg"],
257
- ["Convert this page to doc [markdown] precisely.", "images/3.png"],
258
- ["Convert this page to doc [markdown] precisely.", "images/4.png"],
259
- ["Explain the creativity in the image.", "images/6.jpg"],
260
- ["Convert this page to doc [markdown] precisely.", "images/1.png"],
261
- ["Convert chart to OTSL.", "images/2.png"]
262
  ]
263
 
264
  video_examples = [
265
- ["Explain the video in detail.", "videos/2.mp4"],
266
- ["Explain the ad in detail.", "videos/1.mp4"]
267
  ]
268
 
269
- css = """
270
- .submit-btn { background-color: #2980b9 !important; color: white !important; }
271
- .submit-btn:hover { background-color: #3498db !important; }
272
- .canvas-output { border: 2px solid #4682B4; border-radius: 10px; padding: 20px; }
273
- """
274
-
275
- # Create the Gradio Interface
276
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
277
- gr.Markdown("# **[Multimodal VLM Thinking](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
278
  with gr.Row():
279
- with gr.Column():
280
  with gr.Tabs():
281
  with gr.TabItem("Image Inference"):
282
  image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
283
  image_upload = gr.Image(type="pil", label="Image", height=290)
284
- image_submit = gr.Button("Submit", elem_classes="submit-btn")
285
  gr.Examples(examples=image_examples, inputs=[image_query, image_upload])
286
  with gr.TabItem("Video Inference"):
287
  video_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
288
  video_upload = gr.Video(label="Video", height=290)
289
- video_submit = gr.Button("Submit", elem_classes="submit-btn")
290
  gr.Examples(examples=video_examples, inputs=[video_query, video_upload])
291
 
292
  with gr.Accordion("Advanced options", open=False):
@@ -296,12 +347,11 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
296
  top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
297
  repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
298
 
299
- with gr.Column():
300
- with gr.Column(elem_classes="canvas-output"):
301
- gr.Markdown("## Output")
302
- output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=5, show_copy_button=True)
303
- with gr.Accordion("(Result.md)", open=False):
304
- markdown_output = gr.Markdown(label="(Result.Md)")
305
  model_choice = gr.Radio(
306
  choices=["Lumian-VLR-7B-Thinking", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
307
  label="Select Model",
 
5
  import time
6
  import asyncio
7
  from threading import Thread
8
+ from typing import Iterable
9
 
10
  import gradio as gr
11
  import spaces
 
24
  AutoTokenizer,
25
  )
26
  from transformers.image_utils import load_image
27
+ from gradio.themes import Soft
28
+ from gradio.themes.utils import colors, fonts, sizes
29
+
30
+ colors.light_salmon = colors.Color(
31
+ name="light_salmon",
32
+ c50="#FFF9F2",
33
+ c100="#FFEC C6",
34
+ c200="#FFD9B3",
35
+ c300="#FFC6A0",
36
+ c400="#FFB38D",
37
+ c500="#FFA07A",
38
+ c600="#E6906E",
39
+ c700="#CC8062",
40
+ c800="#B37056",
41
+ c900="#99604A",
42
+ c950="#80503E",
43
+ )
44
+
45
+ colors.red_gray = colors.Color(
46
+ name="red_gray",
47
+ c50="#f7eded", c100="#f5dcdc", c200="#efb4b4", c300="#e78f8f",
48
+ c400="#d96a6a", c500="#c65353", c600="#b24444", c700="#8f3434",
49
+ c800="#732d2d", c900="#5f2626", c950="#4d2020",
50
+ )
51
+
52
+ class LightSalmonTheme(Soft):
53
+ def __init__(
54
+ self,
55
+ *,
56
+ primary_hue: colors.Color | str = colors.gray,
57
+ secondary_hue: colors.Color | str = colors.light_salmon, # Use the new color
58
+ neutral_hue: colors.Color | str = colors.slate,
59
+ text_size: sizes.Size | str = sizes.text_lg,
60
+ font: fonts.Font | str | Iterable[fonts.Font | str] = (
61
+ fonts.GoogleFont("Inconsolata"), "Arial", "sans-serif",
62
+ ),
63
+ font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
64
+ fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
65
+ ),
66
+ ):
67
+ super().__init__(
68
+ primary_hue=primary_hue,
69
+ secondary_hue=secondary_hue,
70
+ neutral_hue=neutral_hue,
71
+ text_size=text_size,
72
+ font=font,
73
+ font_mono=font_mono,
74
+ )
75
+ super().set(
76
+ background_fill_primary="*primary_50",
77
+ background_fill_primary_dark="*primary_900",
78
+ body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
79
+ body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
80
+ button_primary_text_color="black",
81
+ button_primary_text_color_hover="white",
82
+ button_primary_background_fill="linear-gradient(90deg, *secondary_400, *secondary_400)",
83
+ button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_600)",
84
+ button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_800)",
85
+ button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_500)",
86
+ button_secondary_text_color="black",
87
+ button_secondary_text_color_hover="white",
88
+ button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
89
+ button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
90
+ button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
91
+ button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
92
+ button_cancel_background_fill=f"linear-gradient(90deg, {colors.red_gray.c400}, {colors.red_gray.c500})",
93
+ button_cancel_background_fill_dark=f"linear-gradient(90deg, {colors.red_gray.c700}, {colors.red_gray.c800})",
94
+ button_cancel_background_fill_hover=f"linear-gradient(90deg, {colors.red_gray.c500}, {colors.red_gray.c600})",
95
+ button_cancel_background_fill_hover_dark=f"linear-gradient(90deg, {colors.red_gray.c800}, {colors.red_gray.c900})",
96
+ button_cancel_text_color="white",
97
+ button_cancel_text_color_dark="white",
98
+ button_cancel_text_color_hover="white",
99
+ button_cancel_text_color_hover_dark="white",
100
+ slider_color="*secondary_300",
101
+ slider_color_dark="*secondary_600",
102
+ block_title_text_weight="600",
103
+ block_border_width="3px",
104
+ block_shadow="*shadow_drop_lg",
105
+ button_primary_shadow="*shadow_drop_lg",
106
+ button_large_padding="11px",
107
+ color_accent_soft="*primary_100",
108
+ block_label_background_fill="*primary_200",
109
+ )
110
+
111
+ light_salmon_theme = LightSalmonTheme()
112
+
113
+ css = """
114
+ #main-title h1 {
115
+ font-size: 2.3em !important;
116
+ }
117
+ #output-title h2 {
118
+ font-size: 2.1em !important;
119
+ }
120
+ """
121
 
 
122
  MAX_MAX_NEW_TOKENS = 4096
123
  DEFAULT_MAX_NEW_TOKENS = 2048
124
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
125
 
 
 
126
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
127
 
128
  print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
 
135
  print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
136
 
137
  print("Using device:", device)
 
138
 
 
 
 
 
139
  MODEL_ID_X = "prithivMLmods/DREX-062225-exp"
140
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True, use_fast=False)
141
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
144
  torch_dtype=torch.float16
145
  ).to(device).eval()
146
 
 
147
  MODEL_ID_T = "scb10x/typhoon-ocr-3b"
148
  processor_t = AutoProcessor.from_pretrained(MODEL_ID_T, trust_remote_code=True, use_fast=False)
149
  model_t = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
152
  torch_dtype=torch.float16
153
  ).to(device).eval()
154
 
 
155
  MODEL_ID_O = "allenai/olmOCR-7B-0225-preview"
156
  processor_o = AutoProcessor.from_pretrained(MODEL_ID_O, trust_remote_code=True, use_fast=False)
157
  model_o = Qwen2VLForConditionalGeneration.from_pretrained(
 
160
  torch_dtype=torch.float16
161
  ).to(device).eval()
162
 
 
163
  MODEL_ID_J = "prithivMLmods/Lumian-VLR-7B-Thinking"
164
  SUBFOLDER = "think-preview"
165
  processor_j = AutoProcessor.from_pretrained(MODEL_ID_J, trust_remote_code=True, subfolder=SUBFOLDER, use_fast=False)
 
170
  torch_dtype=torch.float16
171
  ).to(device).eval()
172
 
 
173
  MODEL_ID_V4 = 'openbmb/MiniCPM-V-4'
174
  model_v4 = AutoModel.from_pretrained(
175
  MODEL_ID_V4,
176
  trust_remote_code=True,
177
  torch_dtype=torch.bfloat16,
 
 
 
178
  ).eval().to(device)
179
  tokenizer_v4 = AutoTokenizer.from_pretrained(MODEL_ID_V4, trust_remote_code=True, use_fast=False)
180
 
 
 
181
  MODELS = {
182
  "DREX-062225-7B-exp": (processor_x, model_x),
183
  "Typhoon-OCR-3B": (processor_t, model_t),
 
187
 
188
 
189
  def downsample_video(video_path):
 
 
 
 
190
  vidcap = cv2.VideoCapture(video_path)
191
  total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
192
  fps = vidcap.get(cv2.CAP_PROP_FPS)
193
  frames = []
 
194
  frame_indices = np.linspace(0, total_frames - 1, min(total_frames, 10), dtype=int)
195
  for i in frame_indices:
196
  vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
 
210
  top_p: float = 0.9,
211
  top_k: int = 50,
212
  repetition_penalty: float = 1.2):
 
 
 
213
  if image is None:
214
  yield "Please upload an image.", "Please upload an image."
215
  return
216
 
 
217
  if model_name == "openbmb/MiniCPM-V-4":
218
  msgs = [{'role': 'user', 'content': [image, text]}]
219
  try:
 
227
  yield f"Error: {e}", f"Error: {e}"
228
  return
229
 
 
230
  if model_name not in MODELS:
231
  yield "Invalid model selected.", "Invalid model selected."
232
  return
233
  processor, model = MODELS[model_name]
234
 
235
+ messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": text}]}]
236
  prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
237
  inputs = processor(
238
  text=[prompt_full], images=[image], return_tensors="pt", padding=True,
 
255
  top_p: float = 0.9,
256
  top_k: int = 50,
257
  repetition_penalty: float = 1.2):
 
 
 
258
  if video_path is None:
259
  yield "Please upload a video.", "Please upload a video."
260
  return
 
264
  yield "Could not process video.", "Could not process video."
265
  return
266
 
 
267
  if model_name == "openbmb/MiniCPM-V-4":
268
  images = [frame for frame, ts in frames_with_ts]
 
269
  content = [text] + images
270
  msgs = [{'role': 'user', 'content': content}]
271
  try:
 
272
  answer = model_v4.chat(
273
  image=images[0].convert('RGB'), msgs=msgs, tokenizer=tokenizer_v4,
274
  max_new_tokens=max_new_tokens, temperature=temperature,
 
279
  yield f"Error: {e}", f"Error: {e}"
280
  return
281
 
 
282
  if model_name not in MODELS:
283
  yield "Invalid model selected.", "Invalid model selected."
284
  return
285
  processor, model = MODELS[model_name]
286
 
 
287
  messages = [{"role": "user", "content": [{"type": "text", "text": text}]}]
288
  images_for_processor = []
289
  for frame, timestamp in frames_with_ts:
290
+ messages[0]["content"].insert(0, {"type": "image"})
291
  images_for_processor.append(frame)
292
 
293
  prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
310
  time.sleep(0.01)
311
  yield buffer, buffer
312
 
 
 
313
  image_examples = [
314
+ ["Describe the safety measures in the image. Conclude (Safe / Unsafe)..", "examples/images/5.jpg"],
315
+ ["Convert this page to doc [markdown] precisely.", "examples/images/3.png"],
316
+ ["Convert this page to doc [markdown] precisely.", "examples/images/4.png"],
317
+ ["Explain the creativity in the image.", "examples/images/6.jpg"],
318
+ ["Convert this page to doc [markdown] precisely.", "examples/images/1.png"],
319
+ ["Convert chart to OTSL.", "examples/images/2.png"]
320
  ]
321
 
322
  video_examples = [
323
+ ["Explain the video in detail.", "examples/videos/2.mp4"],
324
+ ["Explain the ad in detail.", "examples/videos/1.mp4"]
325
  ]
326
 
327
+ with gr.Blocks(theme=light_salmon_theme, css=css) as demo:
328
+ gr.Markdown("# **Multimodal VLM Thinking**", elem_id="main-title")
 
 
 
 
 
 
 
329
  with gr.Row():
330
+ with gr.Column(scale=2):
331
  with gr.Tabs():
332
  with gr.TabItem("Image Inference"):
333
  image_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
334
  image_upload = gr.Image(type="pil", label="Image", height=290)
335
+ image_submit = gr.Button("Submit", variant="primary")
336
  gr.Examples(examples=image_examples, inputs=[image_query, image_upload])
337
  with gr.TabItem("Video Inference"):
338
  video_query = gr.Textbox(label="Query Input", placeholder="Enter your query here...")
339
  video_upload = gr.Video(label="Video", height=290)
340
+ video_submit = gr.Button("Submit", variant="primary")
341
  gr.Examples(examples=video_examples, inputs=[video_query, video_upload])
342
 
343
  with gr.Accordion("Advanced options", open=False):
 
347
  top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
348
  repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
349
 
350
+ with gr.Column(scale=3):
351
+ gr.Markdown("## Output", elem_id="output-title")
352
+ output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=10, show_copy_button=True)
353
+ with gr.Accordion("(Result.md)", open=False):
354
+ markdown_output = gr.Markdown(label="(Result.Md)")
 
355
  model_choice = gr.Radio(
356
  choices=["Lumian-VLR-7B-Thinking", "openbmb/MiniCPM-V-4", "Typhoon-OCR-3B", "DREX-062225-7B-exp", "olmOCR-7B-0225-preview"],
357
  label="Select Model",