davanstrien HF Staff commited on
Commit
cea7723
·
1 Parent(s): b3cfa7b

Optimize default settings based on performance testing

Browse files

- Increase default batch size from 8 to 32
- Increase default GPU memory utilization from 0.7 to 0.8
- Update README with new defaults and simple performance tip
- These changes provide ~2-3x speedup based on testing

Files changed (4) hide show
  1. README.md +56 -11
  2. __pycache__/dots-ocr.cpython-313.pyc +0 -0
  3. dots-ocr.py +729 -0
  4. nanonets-ocr.py +6 -6
README.md CHANGED
@@ -40,6 +40,16 @@ State-of-the-art document OCR using [nanonets/Nanonets-OCR-s](https://huggingfac
40
  - 🖼️ **Images** - Captions and descriptions included
41
  - ☑️ **Forms** - Checkboxes rendered as ☐/☑
42
 
 
 
 
 
 
 
 
 
 
 
43
  ## 💻 Usage Examples
44
 
45
  ### Run on HuggingFace Jobs (Recommended)
@@ -52,6 +62,14 @@ hf jobs uv run --flavor l4x1 \
52
  https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py \
53
  your-input-dataset your-output-dataset
54
 
 
 
 
 
 
 
 
 
55
  # Real example with UFO dataset 🛸
56
  hf jobs uv run \
57
  --flavor a10g-large \
@@ -62,7 +80,7 @@ hf jobs uv run \
62
  your-username/ufo-ocr \
63
  --image-column image \
64
  --max-model-len 16384 \
65
- --batch-size 64
66
 
67
  # Private dataset with custom settings
68
  hf jobs uv run --flavor l40sx1 \
@@ -96,6 +114,11 @@ uv run nanonets-ocr.py input-dataset output-dataset
96
  # Or run directly from URL
97
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py \
98
  input-dataset output-dataset
 
 
 
 
 
99
  ```
100
 
101
  ## 📁 Works With
@@ -104,15 +127,37 @@ Any HuggingFace dataset containing images - documents, forms, receipts, books, h
104
 
105
  ## 🎛️ Configuration Options
106
 
107
- | Option | Default | Description |
108
- | -------------------------- | ------- | --------------------------- |
109
- | `--image-column` | `image` | Column containing images |
110
- | `--batch-size` | `8` | Images processed together |
111
- | `--max-model-len` | `8192` | Max context length |
112
- | `--max-tokens` | `4096` | Max output tokens |
113
- | `--gpu-memory-utilization` | `0.7` | GPU memory usage |
114
- | `--split` | `train` | Dataset split to process |
115
- | `--max-samples` | None | Limit samples (for testing) |
116
- | `--private` | False | Make output dataset private |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  More OCR VLM Scripts coming soon! Stay tuned for updates!
 
40
  - 🖼️ **Images** - Captions and descriptions included
41
  - ☑️ **Forms** - Checkboxes rendered as ☐/☑
42
 
43
+ ### dots.ocr (`dots-ocr.py`)
44
+
45
+ Advanced document layout analysis and OCR using [rednote-hilab/dots.ocr](https://huggingface.co/rednote-hilab/dots.ocr) that provides:
46
+
47
+ - 🎯 **Layout detection** - Bounding boxes for all document elements
48
+ - 📑 **Category classification** - Text, Title, Table, Formula, Picture, etc.
49
+ - 📖 **Reading order** - Preserves natural reading flow
50
+ - 🌍 **Multilingual support** - Handles multiple languages seamlessly
51
+ - 🔧 **Flexible output** - JSON, structured columns, or markdown
52
+
53
  ## 💻 Usage Examples
54
 
55
  ### Run on HuggingFace Jobs (Recommended)
 
62
  https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py \
63
  your-input-dataset your-output-dataset
64
 
65
+ # Document layout analysis with dots.ocr
66
+ hf jobs uv run --flavor l4x1 \
67
+ https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr.py \
68
+ your-input-dataset your-layout-dataset \
69
+ --mode layout-all \
70
+ --output-format structured \
71
+ --use-transformers # More compatible backend
72
+
73
  # Real example with UFO dataset 🛸
74
  hf jobs uv run \
75
  --flavor a10g-large \
 
80
  your-username/ufo-ocr \
81
  --image-column image \
82
  --max-model-len 16384 \
83
+ --batch-size 128
84
 
85
  # Private dataset with custom settings
86
  hf jobs uv run --flavor l40sx1 \
 
114
  # Or run directly from URL
115
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py \
116
  input-dataset output-dataset
117
+
118
+ # dots.ocr examples
119
+ uv run dots-ocr.py documents analyzed-docs # Full layout + OCR
120
+ uv run dots-ocr.py scans layouts --mode layout-only # Layout only
121
+ uv run dots-ocr.py papers markdown --output-format markdown # As markdown
122
  ```
123
 
124
  ## 📁 Works With
 
127
 
128
  ## 🎛️ Configuration Options
129
 
130
+ ### Common Options (Both Scripts)
131
+
132
+ | Option | Default | Description |
133
+ | -------------------------- | ------- | ----------------------------- |
134
+ | `--image-column` | `image` | Column containing images |
135
+ | `--batch-size` | `32` | Images processed together |
136
+ | `--max-model-len` | `8192`/`24000`* | Max context length |
137
+ | `--max-tokens` | `4096`/`16384`* | Max output tokens |
138
+ | `--gpu-memory-utilization` | `0.8` | GPU memory usage (0.0-1.0) |
139
+ | `--split` | `train` | Dataset split to process |
140
+ | `--max-samples` | None | Limit samples (for testing) |
141
+ | `--private` | False | Make output dataset private |
142
+
143
+ *dots.ocr uses higher defaults (24000/16384)
144
+
145
+ ### dots.ocr Specific Options
146
+
147
+ | Option | Default | Description |
148
+ | ------------------- | ------- | ------------------------------------- |
149
+ | `--mode` | `layout-all` | Processing mode: `layout-all`, `layout-only`, `ocr`, `grounding-ocr` |
150
+ | `--output-format` | `json` | Output format: `json`, `structured`, `markdown` |
151
+ | `--filter-category` | None | Filter by layout category (e.g., `Table`, `Formula`) |
152
+ | `--output-column` | `dots_ocr_output` | Column name for JSON output |
153
+ | `--bbox-column` | `layout_bboxes` | Column for bounding boxes (structured mode) |
154
+ | `--category-column` | `layout_categories` | Column for categories (structured mode) |
155
+ | `--text-column` | `layout_texts` | Column for texts (structured mode) |
156
+ | `--markdown-column` | `markdown` | Column for markdown output |
157
+ | `--use-transformers`| `False` | Use transformers backend instead of vLLM (more compatible) |
158
+
159
+ 💡 **Performance tip**: Increase batch size for faster processing (e.g., `--batch-size 128` for A10G GPUs)
160
+
161
+ ⚠️ **dots.ocr Note**: If you encounter vLLM initialization errors, use `--use-transformers` for a more compatible (but slower) backend.
162
 
163
  More OCR VLM Scripts coming soon! Stay tuned for updates!
__pycache__/dots-ocr.cpython-313.pyc ADDED
Binary file (22.6 kB). View file
 
dots-ocr.py ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "vllm",
8
+ # "transformers>=4.45.0",
9
+ # "qwen-vl-utils",
10
+ # "tqdm",
11
+ # "toolz",
12
+ # "torch",
13
+ # "flash-attn",
14
+ # ]
15
+ #
16
+ # ///
17
+
18
+ """
19
+ Document layout analysis and OCR using dots.ocr with vLLM.
20
+
21
+ This script processes document images through the dots.ocr model to extract
22
+ layout information, text content, or both. Supports multiple output formats
23
+ including JSON, structured columns, and markdown.
24
+
25
+ Features:
26
+ - Layout detection with bounding boxes and categories
27
+ - Text extraction with reading order preservation
28
+ - Multiple prompt modes for different tasks
29
+ - Flexible output formats
30
+ - Multilingual document support
31
+ """
32
+
33
+ import argparse
34
+ import base64
35
+ import io
36
+ import json
37
+ import logging
38
+ import os
39
+ import sys
40
+ from typing import Any, Dict, List, Optional, Union
41
+
42
+ import torch
43
+ from datasets import load_dataset
44
+ from huggingface_hub import login
45
+ from PIL import Image
46
+ from toolz import partition_all
47
+ from tqdm.auto import tqdm
48
+
49
+ # Import both vLLM and transformers - we'll use based on flag
50
+ try:
51
+ from vllm import LLM, SamplingParams
52
+ VLLM_AVAILABLE = True
53
+ except ImportError:
54
+ VLLM_AVAILABLE = False
55
+
56
+ from transformers import AutoModelForCausalLM, AutoProcessor
57
+
58
+ logging.basicConfig(level=logging.INFO)
59
+ logger = logging.getLogger(__name__)
60
+
61
+ # Try to import qwen_vl_utils for transformers backend
62
+ try:
63
+ from qwen_vl_utils import process_vision_info
64
+ QWEN_VL_AVAILABLE = True
65
+ except ImportError:
66
+ QWEN_VL_AVAILABLE = False
67
+ logger.warning("qwen_vl_utils not available, transformers backend may not work properly")
68
+
69
+ # Prompt definitions from dots.ocr's dict_promptmode_to_prompt
70
+ PROMPT_MODES = {
71
+ "layout-all": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.
72
+
73
+ 1. Bbox format: [x1, y1, x2, y2]
74
+
75
+ 2. Layout Categories: The possible categories are ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'].
76
+
77
+ 3. Text Extraction & Formatting Rules:
78
+ - Picture: For the 'Picture' category, the text field should be omitted.
79
+ - Formula: Format its text as LaTeX.
80
+ - Table: Format its text as HTML.
81
+ - All Others (Text, Title, etc.): Format their text as Markdown.
82
+
83
+ 4. Constraints:
84
+ - The output text must be the original text from the image, with no translation.
85
+ - All layout elements must be sorted according to human reading order.
86
+
87
+ 5. Final Output: The entire output must be a single JSON object.
88
+ """,
89
+
90
+ "layout-only": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""",
91
+
92
+ "ocr": """Extract the text content from this image.""",
93
+
94
+ "grounding-ocr": """Extract text from the given bounding box on the image (format: [x1, y1, x2, y2]).\nBounding Box:\n"""
95
+ }
96
+
97
+
98
+ def check_cuda_availability():
99
+ """Check if CUDA is available and exit if not."""
100
+ if not torch.cuda.is_available():
101
+ logger.error("CUDA is not available. This script requires a GPU.")
102
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
103
+ sys.exit(1)
104
+ else:
105
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
106
+
107
+
108
+ def make_dots_message(
109
+ image: Union[Image.Image, Dict[str, Any], str],
110
+ mode: str = "layout-all",
111
+ bbox: Optional[List[int]] = None,
112
+ ) -> List[Dict]:
113
+ """Create chat message for dots.ocr processing."""
114
+ # Convert to PIL Image if needed
115
+ if isinstance(image, Image.Image):
116
+ pil_img = image
117
+ elif isinstance(image, dict) and "bytes" in image:
118
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
119
+ elif isinstance(image, str):
120
+ pil_img = Image.open(image)
121
+ else:
122
+ raise ValueError(f"Unsupported image type: {type(image)}")
123
+
124
+ # Convert to base64 data URI
125
+ buf = io.BytesIO()
126
+ pil_img.save(buf, format="PNG")
127
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
128
+
129
+ # Get prompt for the specified mode
130
+ prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"])
131
+
132
+ # Add bbox for grounding-ocr mode
133
+ if mode == "grounding-ocr" and bbox:
134
+ prompt = prompt + str(bbox)
135
+
136
+ # Return message in vLLM format
137
+ return [
138
+ {
139
+ "role": "user",
140
+ "content": [
141
+ {"type": "image_url", "image_url": {"url": data_uri}},
142
+ {"type": "text", "text": prompt},
143
+ ],
144
+ }
145
+ ]
146
+
147
+
148
+ def parse_dots_output(
149
+ output: str,
150
+ output_format: str = "json",
151
+ filter_category: Optional[str] = None,
152
+ mode: str = "layout-all",
153
+ ) -> Union[str, Dict[str, List]]:
154
+ """Parse dots.ocr output and convert to requested format."""
155
+
156
+ # For simple OCR mode, return text directly
157
+ if mode == "ocr":
158
+ return output.strip()
159
+
160
+ try:
161
+ # Parse JSON output
162
+ data = json.loads(output.strip())
163
+
164
+ # Filter by category if requested
165
+ if filter_category and "categories" in data:
166
+ indices = [i for i, cat in enumerate(data["categories"]) if cat == filter_category]
167
+ filtered_data = {
168
+ "bboxes": [data["bboxes"][i] for i in indices],
169
+ "categories": [data["categories"][i] for i in indices],
170
+ }
171
+
172
+ # Only include texts if present (layout-all mode)
173
+ if "texts" in data:
174
+ filtered_data["texts"] = [data["texts"][i] for i in indices]
175
+
176
+ # Include reading_order if present
177
+ if "reading_order" in data:
178
+ # Filter reading order to only include indices that are in our filtered set
179
+ filtered_reading_order = []
180
+ for group in data.get("reading_order", []):
181
+ filtered_group = [idx for idx in group if idx in indices]
182
+ if filtered_group:
183
+ # Remap indices to new positions
184
+ remapped_group = [indices.index(idx) for idx in filtered_group]
185
+ filtered_reading_order.append(remapped_group)
186
+ if filtered_reading_order:
187
+ filtered_data["reading_order"] = filtered_reading_order
188
+
189
+ data = filtered_data
190
+
191
+ if output_format == "json":
192
+ return json.dumps(data, ensure_ascii=False)
193
+
194
+ elif output_format == "structured":
195
+ # Return structured data for column creation
196
+ result = {
197
+ "bboxes": data.get("bboxes", []),
198
+ "categories": data.get("categories", []),
199
+ }
200
+
201
+ # Only include texts for layout-all mode
202
+ if mode == "layout-all":
203
+ result["texts"] = data.get("texts", [])
204
+ else:
205
+ result["texts"] = []
206
+
207
+ return result
208
+
209
+ elif output_format == "markdown":
210
+ # Convert to markdown format
211
+ # Only works well with layout-all mode
212
+ if mode != "layout-all" or "texts" not in data:
213
+ logger.warning("Markdown format works best with layout-all mode")
214
+ return json.dumps(data, ensure_ascii=False)
215
+
216
+ md_lines = []
217
+ texts = data.get("texts", [])
218
+ categories = data.get("categories", [])
219
+ reading_order = data.get("reading_order", [])
220
+
221
+ # If reading order is provided, use it
222
+ if reading_order:
223
+ for group in reading_order:
224
+ for idx in group:
225
+ if idx < len(texts) and idx < len(categories):
226
+ text = texts[idx]
227
+ category = categories[idx]
228
+ md_lines.append(format_markdown_text(text, category))
229
+ else:
230
+ # Fall back to sequential order
231
+ for text, category in zip(texts, categories):
232
+ md_lines.append(format_markdown_text(text, category))
233
+
234
+ return "\n".join(md_lines)
235
+
236
+ except json.JSONDecodeError as e:
237
+ logger.warning(f"Failed to parse JSON output: {e}")
238
+ return output.strip()
239
+ except Exception as e:
240
+ logger.error(f"Error parsing output: {e}")
241
+ return output.strip()
242
+
243
+
244
+ def format_markdown_text(text: str, category: str) -> str:
245
+ """Format text based on its category for markdown output."""
246
+ if category == "Title":
247
+ return f"# {text}\n"
248
+ elif category == "Section-header":
249
+ return f"## {text}\n"
250
+ elif category == "List-item":
251
+ return f"- {text}"
252
+ elif category == "Page-header" or category == "Page-footer":
253
+ return f"_{text}_\n"
254
+ elif category == "Caption":
255
+ return f"**{text}**\n"
256
+ elif category == "Footnote":
257
+ return f"[^{text}]\n"
258
+ elif category == "Table":
259
+ # Tables are already in HTML format from dots.ocr
260
+ return f"\n{text}\n"
261
+ elif category == "Formula":
262
+ # Formulas are already in LaTeX format
263
+ return f"\n${text}$\n"
264
+ elif category == "Picture":
265
+ # Pictures don't have text in dots.ocr output
266
+ return "\n![Image]()\n"
267
+ else: # Text and any other categories
268
+ return f"{text}\n"
269
+
270
+
271
+ def process_with_transformers(
272
+ images: List[Union[Image.Image, Dict[str, Any], str]],
273
+ model,
274
+ processor,
275
+ mode: str = "layout-all",
276
+ max_new_tokens: int = 16384,
277
+ ) -> List[str]:
278
+ """Process images using transformers instead of vLLM."""
279
+ outputs = []
280
+
281
+ for image in tqdm(images, desc="Processing with transformers"):
282
+ # Convert to PIL Image if needed
283
+ if isinstance(image, dict) and "bytes" in image:
284
+ pil_image = Image.open(io.BytesIO(image["bytes"]))
285
+ elif isinstance(image, str):
286
+ pil_image = Image.open(image)
287
+ else:
288
+ pil_image = image
289
+
290
+ # Get prompt for the mode
291
+ prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"])
292
+
293
+ # Create messages in the format expected by dots.ocr
294
+ messages = [
295
+ {
296
+ "role": "user",
297
+ "content": [
298
+ {"type": "image", "image": pil_image},
299
+ {"type": "text", "text": prompt}
300
+ ]
301
+ }
302
+ ]
303
+
304
+ # Preparation for inference (following demo code)
305
+ text = processor.apply_chat_template(
306
+ messages,
307
+ tokenize=False,
308
+ add_generation_prompt=True
309
+ )
310
+
311
+ if QWEN_VL_AVAILABLE:
312
+ # Use process_vision_info as shown in demo
313
+ image_inputs, video_inputs = process_vision_info(messages)
314
+ inputs = processor(
315
+ text=[text],
316
+ images=image_inputs,
317
+ videos=video_inputs,
318
+ padding=True,
319
+ return_tensors="pt",
320
+ )
321
+ else:
322
+ # Fallback approach without qwen_vl_utils
323
+ inputs = processor(
324
+ text=text,
325
+ images=[pil_image],
326
+ return_tensors="pt",
327
+ )
328
+
329
+ inputs = inputs.to(model.device)
330
+
331
+ # Generate
332
+ with torch.no_grad():
333
+ generated_ids = model.generate(
334
+ **inputs,
335
+ max_new_tokens=max_new_tokens,
336
+ temperature=0.0,
337
+ do_sample=False,
338
+ )
339
+
340
+ # Decode output (following demo code)
341
+ generated_ids_trimmed = [
342
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
343
+ ]
344
+ output_text = processor.batch_decode(
345
+ generated_ids_trimmed,
346
+ skip_special_tokens=True,
347
+ clean_up_tokenization_spaces=False
348
+ )[0]
349
+
350
+ outputs.append(output_text.strip())
351
+
352
+ return outputs
353
+
354
+
355
+ def main(
356
+ input_dataset: str,
357
+ output_dataset: str,
358
+ image_column: str = "image",
359
+ mode: str = "layout-all",
360
+ output_format: str = "json",
361
+ filter_category: Optional[str] = None,
362
+ batch_size: int = 32,
363
+ model: str = "rednote-hilab/dots.ocr",
364
+ max_model_len: int = 24000,
365
+ max_tokens: int = 16384,
366
+ gpu_memory_utilization: float = 0.8,
367
+ hf_token: Optional[str] = None,
368
+ split: str = "train",
369
+ max_samples: Optional[int] = None,
370
+ private: bool = False,
371
+ use_transformers: bool = False,
372
+ # Column name parameters
373
+ output_column: str = "dots_ocr_output",
374
+ bbox_column: str = "layout_bboxes",
375
+ category_column: str = "layout_categories",
376
+ text_column: str = "layout_texts",
377
+ markdown_column: str = "markdown",
378
+ ):
379
+ """Process images from HF dataset through dots.ocr model."""
380
+
381
+ # Check CUDA availability first
382
+ check_cuda_availability()
383
+
384
+ # Enable HF_TRANSFER for faster downloads
385
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
386
+
387
+ # Login to HF if token provided
388
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
389
+ if HF_TOKEN:
390
+ login(token=HF_TOKEN)
391
+
392
+ # Load dataset
393
+ logger.info(f"Loading dataset: {input_dataset}")
394
+ dataset = load_dataset(input_dataset, split=split)
395
+
396
+ # Validate image column
397
+ if image_column not in dataset.column_names:
398
+ raise ValueError(
399
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
400
+ )
401
+
402
+ # Limit samples if requested
403
+ if max_samples:
404
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
405
+ logger.info(f"Limited to {len(dataset)} samples")
406
+
407
+ # Process images in batches
408
+ all_outputs = []
409
+
410
+ if use_transformers or not VLLM_AVAILABLE:
411
+ # Use transformers
412
+ if not use_transformers and not VLLM_AVAILABLE:
413
+ logger.warning("vLLM not available, falling back to transformers")
414
+
415
+ logger.info(f"Initializing transformers with model: {model}")
416
+ hf_model = AutoModelForCausalLM.from_pretrained(
417
+ model,
418
+ torch_dtype=torch.bfloat16,
419
+ device_map="auto",
420
+ trust_remote_code=True,
421
+ )
422
+ processor = AutoProcessor.from_pretrained(model, trust_remote_code=True)
423
+
424
+ logger.info(f"Processing {len(dataset)} images with transformers")
425
+ logger.info(f"Mode: {mode}, Output format: {output_format}")
426
+
427
+ # Process all images
428
+ all_images = [dataset[i][image_column] for i in range(len(dataset))]
429
+ raw_outputs = process_with_transformers(
430
+ all_images,
431
+ hf_model,
432
+ processor,
433
+ mode=mode,
434
+ max_new_tokens=max_tokens
435
+ )
436
+
437
+ # Parse outputs
438
+ for raw_text in raw_outputs:
439
+ parsed = parse_dots_output(raw_text, output_format, filter_category, mode)
440
+ all_outputs.append(parsed)
441
+
442
+ else:
443
+ # Use vLLM
444
+ logger.info(f"Initializing vLLM with model: {model}")
445
+ llm = LLM(
446
+ model=model,
447
+ trust_remote_code=True,
448
+ max_model_len=max_model_len,
449
+ gpu_memory_utilization=gpu_memory_utilization,
450
+ )
451
+
452
+ sampling_params = SamplingParams(
453
+ temperature=0.0, # Deterministic for OCR
454
+ max_tokens=max_tokens,
455
+ )
456
+
457
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
458
+ logger.info(f"Mode: {mode}, Output format: {output_format}")
459
+
460
+ # Process in batches to avoid memory issues
461
+ for batch_indices in tqdm(
462
+ partition_all(batch_size, range(len(dataset))),
463
+ total=(len(dataset) + batch_size - 1) // batch_size,
464
+ desc="dots.ocr processing",
465
+ ):
466
+ batch_indices = list(batch_indices)
467
+ batch_images = [dataset[i][image_column] for i in batch_indices]
468
+
469
+ try:
470
+ # Create messages for batch
471
+ batch_messages = [make_dots_message(img, mode=mode) for img in batch_images]
472
+
473
+ # Process with vLLM
474
+ outputs = llm.chat(batch_messages, sampling_params)
475
+
476
+ # Extract and parse outputs
477
+ for output in outputs:
478
+ raw_text = output.outputs[0].text.strip()
479
+ parsed = parse_dots_output(raw_text, output_format, filter_category, mode)
480
+ all_outputs.append(parsed)
481
+
482
+ except Exception as e:
483
+ logger.error(f"Error processing batch: {e}")
484
+ # Add error placeholders for failed batch
485
+ all_outputs.extend([{"error": str(e)}] * len(batch_images))
486
+
487
+ # Add columns to dataset based on output format
488
+ logger.info("Adding output columns to dataset")
489
+
490
+ if output_format == "json":
491
+ dataset = dataset.add_column(output_column, all_outputs)
492
+
493
+ elif output_format == "structured":
494
+ # Extract lists from structured outputs
495
+ bboxes = []
496
+ categories = []
497
+ texts = []
498
+
499
+ for output in all_outputs:
500
+ if isinstance(output, dict) and "error" not in output:
501
+ bboxes.append(output.get("bboxes", []))
502
+ categories.append(output.get("categories", []))
503
+ texts.append(output.get("texts", []))
504
+ else:
505
+ bboxes.append([])
506
+ categories.append([])
507
+ texts.append([])
508
+
509
+ dataset = dataset.add_column(bbox_column, bboxes)
510
+ dataset = dataset.add_column(category_column, categories)
511
+ dataset = dataset.add_column(text_column, texts)
512
+
513
+ elif output_format == "markdown":
514
+ dataset = dataset.add_column(markdown_column, all_outputs)
515
+
516
+ else: # ocr mode
517
+ dataset = dataset.add_column(output_column, all_outputs)
518
+
519
+ # Push to hub
520
+ logger.info(f"Pushing to {output_dataset}")
521
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
522
+
523
+ logger.info("✅ dots.ocr processing complete!")
524
+ logger.info(
525
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
526
+ )
527
+
528
+
529
+ if __name__ == "__main__":
530
+ # Show example usage if no arguments
531
+ if len(sys.argv) == 1:
532
+ print("=" * 80)
533
+ print("dots.ocr Document Layout Analysis and OCR")
534
+ print("=" * 80)
535
+ print("\nThis script processes document images using the dots.ocr model to")
536
+ print("extract layout information, text content, or both.")
537
+ print("\nFeatures:")
538
+ print("- Layout detection with bounding boxes and categories")
539
+ print("- Text extraction with reading order preservation")
540
+ print("- Multiple output formats (JSON, structured, markdown)")
541
+ print("- Multilingual document support")
542
+ print("\nExample usage:")
543
+ print("\n1. Full layout analysis + OCR (default):")
544
+ print(" uv run dots-ocr.py document-images analyzed-docs")
545
+ print("\n2. Layout detection only:")
546
+ print(" uv run dots-ocr.py scanned-pdfs layout-analysis --mode layout-only")
547
+ print("\n3. Simple OCR (text only):")
548
+ print(" uv run dots-ocr.py documents extracted-text --mode ocr")
549
+ print("\n4. Convert to markdown:")
550
+ print(" uv run dots-ocr.py papers papers-markdown --output-format markdown")
551
+ print("\n5. Extract only tables:")
552
+ print(" uv run dots-ocr.py reports table-data --filter-category Table")
553
+ print("\n6. Structured output with custom columns:")
554
+ print(" uv run dots-ocr.py docs analyzed \\")
555
+ print(" --output-format structured \\")
556
+ print(" --bbox-column boxes \\")
557
+ print(" --category-column types \\")
558
+ print(" --text-column content")
559
+ print("\n7. Process a subset for testing:")
560
+ print(" uv run dots-ocr.py large-dataset test-output --max-samples 10")
561
+ print("\n8. Use transformers backend (more compatible):")
562
+ print(" uv run dots-ocr.py documents analyzed --use-transformers")
563
+ print("\n9. Running on HF Jobs:")
564
+ print(" hf jobs run --gpu l4x1 \\")
565
+ print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
566
+ print(
567
+ " uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr.py \\"
568
+ )
569
+ print(" your-document-dataset \\")
570
+ print(" your-analyzed-output \\")
571
+ print(" --use-transformers")
572
+ print("\n" + "=" * 80)
573
+ print("\nFor full help, run: uv run dots-ocr.py --help")
574
+ sys.exit(0)
575
+
576
+ parser = argparse.ArgumentParser(
577
+ description="Document layout analysis and OCR using dots.ocr",
578
+ formatter_class=argparse.RawDescriptionHelpFormatter,
579
+ epilog="""
580
+ Modes:
581
+ layout-all - Extract layout + text content (default)
582
+ layout-only - Extract only layout information (bbox + category)
583
+ ocr - Extract only text content
584
+ grounding-ocr - Extract text from specific bbox (requires --bbox)
585
+
586
+ Output Formats:
587
+ json - Raw JSON output from model (default)
588
+ structured - Separate columns for bboxes, categories, texts
589
+ markdown - Convert to markdown format
590
+
591
+ Examples:
592
+ # Basic layout + OCR
593
+ uv run dots-ocr.py my-docs analyzed-docs
594
+
595
+ # Layout detection only
596
+ uv run dots-ocr.py papers layouts --mode layout-only
597
+
598
+ # Convert to markdown
599
+ uv run dots-ocr.py scans readable --output-format markdown
600
+
601
+ # Extract only formulas
602
+ uv run dots-ocr.py math-docs formulas --filter-category Formula
603
+ """,
604
+ )
605
+
606
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
607
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
608
+ parser.add_argument(
609
+ "--image-column",
610
+ default="image",
611
+ help="Column containing images (default: image)",
612
+ )
613
+ parser.add_argument(
614
+ "--mode",
615
+ choices=["layout-all", "layout-only", "ocr", "grounding-ocr"],
616
+ default="layout-all",
617
+ help="Processing mode (default: layout-all)",
618
+ )
619
+ parser.add_argument(
620
+ "--output-format",
621
+ choices=["json", "structured", "markdown"],
622
+ default="json",
623
+ help="Output format (default: json)",
624
+ )
625
+ parser.add_argument(
626
+ "--filter-category",
627
+ choices=['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer',
628
+ 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'],
629
+ help="Filter results by layout category",
630
+ )
631
+ parser.add_argument(
632
+ "--batch-size",
633
+ type=int,
634
+ default=32,
635
+ help="Batch size for processing (default: 32)",
636
+ )
637
+ parser.add_argument(
638
+ "--model",
639
+ default="rednote-hilab/dots.ocr",
640
+ help="Model to use (default: rednote-hilab/dots.ocr)",
641
+ )
642
+ parser.add_argument(
643
+ "--max-model-len",
644
+ type=int,
645
+ default=24000,
646
+ help="Maximum model context length (default: 24000)",
647
+ )
648
+ parser.add_argument(
649
+ "--max-tokens",
650
+ type=int,
651
+ default=16384,
652
+ help="Maximum tokens to generate (default: 16384)",
653
+ )
654
+ parser.add_argument(
655
+ "--gpu-memory-utilization",
656
+ type=float,
657
+ default=0.8,
658
+ help="GPU memory utilization (default: 0.8)",
659
+ )
660
+ parser.add_argument("--hf-token", help="Hugging Face API token")
661
+ parser.add_argument(
662
+ "--split", default="train", help="Dataset split to use (default: train)"
663
+ )
664
+ parser.add_argument(
665
+ "--max-samples",
666
+ type=int,
667
+ help="Maximum number of samples to process (for testing)",
668
+ )
669
+ parser.add_argument(
670
+ "--private", action="store_true", help="Make output dataset private"
671
+ )
672
+ parser.add_argument(
673
+ "--use-transformers",
674
+ action="store_true",
675
+ help="Use transformers instead of vLLM (more compatible but slower)",
676
+ )
677
+
678
+ # Column name customization
679
+ parser.add_argument(
680
+ "--output-column",
681
+ default="dots_ocr_output",
682
+ help="Column name for JSON output (default: dots_ocr_output)",
683
+ )
684
+ parser.add_argument(
685
+ "--bbox-column",
686
+ default="layout_bboxes",
687
+ help="Column name for bboxes in structured mode (default: layout_bboxes)",
688
+ )
689
+ parser.add_argument(
690
+ "--category-column",
691
+ default="layout_categories",
692
+ help="Column name for categories in structured mode (default: layout_categories)",
693
+ )
694
+ parser.add_argument(
695
+ "--text-column",
696
+ default="layout_texts",
697
+ help="Column name for texts in structured mode (default: layout_texts)",
698
+ )
699
+ parser.add_argument(
700
+ "--markdown-column",
701
+ default="markdown",
702
+ help="Column name for markdown output (default: markdown)",
703
+ )
704
+
705
+ args = parser.parse_args()
706
+
707
+ main(
708
+ input_dataset=args.input_dataset,
709
+ output_dataset=args.output_dataset,
710
+ image_column=args.image_column,
711
+ mode=args.mode,
712
+ output_format=args.output_format,
713
+ filter_category=args.filter_category,
714
+ batch_size=args.batch_size,
715
+ model=args.model,
716
+ max_model_len=args.max_model_len,
717
+ max_tokens=args.max_tokens,
718
+ gpu_memory_utilization=args.gpu_memory_utilization,
719
+ hf_token=args.hf_token,
720
+ split=args.split,
721
+ max_samples=args.max_samples,
722
+ private=args.private,
723
+ use_transformers=args.use_transformers,
724
+ output_column=args.output_column,
725
+ bbox_column=args.bbox_column,
726
+ category_column=args.category_column,
727
+ text_column=args.text_column,
728
+ markdown_column=args.markdown_column,
729
+ )
nanonets-ocr.py CHANGED
@@ -91,11 +91,11 @@ def main(
91
  input_dataset: str,
92
  output_dataset: str,
93
  image_column: str = "image",
94
- batch_size: int = 8,
95
  model: str = "nanonets/Nanonets-OCR-s",
96
  max_model_len: int = 8192,
97
  max_tokens: int = 4096,
98
- gpu_memory_utilization: float = 0.7,
99
  hf_token: str = None,
100
  split: str = "train",
101
  max_samples: int = None,
@@ -251,8 +251,8 @@ Examples:
251
  parser.add_argument(
252
  "--batch-size",
253
  type=int,
254
- default=8,
255
- help="Batch size for processing (default: 8)",
256
  )
257
  parser.add_argument(
258
  "--model",
@@ -274,8 +274,8 @@ Examples:
274
  parser.add_argument(
275
  "--gpu-memory-utilization",
276
  type=float,
277
- default=0.7,
278
- help="GPU memory utilization (default: 0.7)",
279
  )
280
  parser.add_argument("--hf-token", help="Hugging Face API token")
281
  parser.add_argument(
 
91
  input_dataset: str,
92
  output_dataset: str,
93
  image_column: str = "image",
94
+ batch_size: int = 32,
95
  model: str = "nanonets/Nanonets-OCR-s",
96
  max_model_len: int = 8192,
97
  max_tokens: int = 4096,
98
+ gpu_memory_utilization: float = 0.8,
99
  hf_token: str = None,
100
  split: str = "train",
101
  max_samples: int = None,
 
251
  parser.add_argument(
252
  "--batch-size",
253
  type=int,
254
+ default=32,
255
+ help="Batch size for processing (default: 32)",
256
  )
257
  parser.add_argument(
258
  "--model",
 
274
  parser.add_argument(
275
  "--gpu-memory-utilization",
276
  type=float,
277
+ default=0.8,
278
+ help="GPU memory utilization (default: 0.8)",
279
  )
280
  parser.add_argument("--hf-token", help="Hugging Face API token")
281
  parser.add_argument(