NayanaDocs-45k-22
Collection
Foundational Datasets. This is used for SFT.
β’
3 items
β’
Updated
This is a large-scale multilingual document OCR dataset containing approximately 400GB of images with comprehensive annotations across multiple global languages and English. The dataset is stored in WebDataset format using TAR archives for efficient streaming and processing.
This dataset uses the WebDataset format where each sample is stored as separate files within TAR archives:
XXXXXXXX.jpg
: The document imageXXXXXXXX.image_id.txt
: Unique identifier for the imageXXXXXXXX.font_used.txt
: Font information used in the documentXXXXXXXX.regions.json
: Text regions with bounding boxes and OCR resultsXXXXXXXX.vqa.json
: Visual Question Answering annotationsimport webdataset as wds
import json
from PIL import Image
import io
# Create a WebDataset from TAR files
dataset = wds.WebDataset("path/to/language/tarfiles/*.tar")
# Process the dataset
for sample in dataset:
# Access image
image_data = sample["jpg"] # Raw image bytes
image = Image.open(io.BytesIO(image_data))
# Access metadata
image_id = sample["image_id.txt"].decode('utf-8')
font_used = sample["font_used.txt"].decode('utf-8')
regions = json.loads(sample["regions.json"].decode('utf-8'))
vqa_data = json.loads(sample["vqa.json"].decode('utf-8'))
print(f"Image ID: {image_id}")
print(f"Font: {font_used}")
print(f"Regions: {len(regions)}")
print(f"VQA entries: {len(vqa_data)}")
from datasets import load_dataset
import json
# Load specific language subset
dataset = load_dataset("webdataset", data_dir="hf://datasets/Nayana-cognitivelab/NayanaDocs-Global-45k-webdataset/fr", split="train")
# Or with streaming for memory efficiency
dataset = load_dataset("webdataset", data_dir="hf://datasets/Nayana-cognitivelab/NayanaDocs-Global-45k-webdataset/fr", split="train", streaming=True)
# Access data
for sample in dataset:
image = sample["jpg"] # PIL Image
image_id = sample["image_id.txt"] # string
font_used = sample["font_used.txt"] # string
regions = json.loads(sample["regions.json"]) # parsed JSON
vqa_data = json.loads(sample["vqa.json"]) # parsed JSON
from huggingface_hub import hf_hub_download
import tarfile
import webdataset as wds
# Download a specific TAR file
tar_path = hf_hub_download(
repo_id="Nayana-cognitivelab/NayanaDocs-Global-45k-webdataset",
filename="bn/bn_00000.tar",
repo_type="dataset"
)
# Process with webdataset
dataset = wds.WebDataset(tar_path)
for sample in dataset:
# Process sample
pass
streaming=True
for large datasets to avoid downloading everything at oncewebdataset
library directly for maximum performancerepository/
βββ ar/
β βββ ar_00000.tar
β βββ ar_00001.tar
β βββ ...
βββ fr/
β βββ fr_00000.tar
β βββ fr_00001.tar
β βββ ...
βββ README.md
[
{
"bbox": {"xmin": 10, "ymin": 20, "xmax": 100, "ymax": 50},
"english_text": "Original text",
"translated_text": "Translated text",
"layout_type": "paragraph",
"region_id": 1
}
]
{
"questions": [
{
"question": "What is the main topic?",
"answer": "Document analysis",
"type": "topic",
"options": ["Analysis", "Summary", "Review"]
}
]
}