Commit
Β·
4ec1d3b
1
Parent(s):
0e5cde1
Import original code from Daniel Travaglia
Browse files- README.md +7 -9
- app.py +57 -0
- example-1.jpg +0 -0
- example-2.jpg +0 -0
- example-3.jpg +0 -0
- example-4.jpg +0 -0
- example-5.jpg +0 -0
- requirements.txt +3 -0
- sha-001_1970_88__1968_d-1-11.jpg +0 -0
- sha-001_1983_101__30_d-1-11.jpg +0 -0
- sha-001_1989_107__122_d-2-7.jpg +0 -0
- sha-001_1994_112__95_d-0-3.jpg +0 -0
- sha-001_1997_115__107_d-0-1.jpg +0 -0
- sha-001_1998_116__2259_d-3-13.jpg +0 -0
README.md
CHANGED
@@ -1,14 +1,12 @@
|
|
1 |
---
|
2 |
-
title: Donut
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
-
license: mit
|
11 |
-
short_description: ' Donut fine-tuned - Swiss trademarks'
|
12 |
---
|
13 |
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Donut fine-tuned - Swiss trademarks
|
3 |
+
emoji: π
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.29.0
|
8 |
app_file: app.py
|
9 |
+
pinned: true
|
|
|
|
|
10 |
---
|
11 |
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
6 |
+
|
7 |
+
# updated
|
8 |
+
processor = DonutProcessor.from_pretrained("Travad98/donut-finetuned-sogc-trademarks-1883-2001")
|
9 |
+
model = VisionEncoderDecoderModel.from_pretrained("Travad98/donut-finetuned-sogc-trademarks-1883-2001")
|
10 |
+
|
11 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
+
model.to(device)
|
13 |
+
|
14 |
+
def process_document(image):
|
15 |
+
# prepare encoder inputs
|
16 |
+
pixel_values = processor(image, return_tensors="pt").pixel_values
|
17 |
+
|
18 |
+
# prepare decoder inputs
|
19 |
+
task_prompt = "<s_cord-v2>"
|
20 |
+
decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids
|
21 |
+
|
22 |
+
# generate answer
|
23 |
+
outputs = model.generate(
|
24 |
+
pixel_values.to(device),
|
25 |
+
decoder_input_ids=decoder_input_ids.to(device),
|
26 |
+
max_length=model.decoder.config.max_position_embeddings,
|
27 |
+
early_stopping=True,
|
28 |
+
pad_token_id=processor.tokenizer.pad_token_id,
|
29 |
+
eos_token_id=processor.tokenizer.eos_token_id,
|
30 |
+
use_cache=True,
|
31 |
+
num_beams=1,
|
32 |
+
bad_words_ids=[[processor.tokenizer.unk_token_id]],
|
33 |
+
return_dict_in_generate=True,
|
34 |
+
)
|
35 |
+
|
36 |
+
# postprocess
|
37 |
+
sequence = processor.batch_decode(outputs.sequences)[0]
|
38 |
+
# sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "")
|
39 |
+
# sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
|
40 |
+
|
41 |
+
return processor.token2json(sequence)
|
42 |
+
|
43 |
+
description = "Gradio-based demo for Donut, an instance of VisionEncoderDecoderModel fine-tuned on the sogc-trademark-1883-2001 dataset. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
|
44 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2111.15664' target='_blank'>Donut: OCR-free Document Understanding Transformer</a> | <a href='https://github.com/clovaai/donut' target='_blank'>Github Repo</a></p>"
|
45 |
+
|
46 |
+
demo = gr.Interface(
|
47 |
+
fn=process_document,
|
48 |
+
inputs="image",
|
49 |
+
outputs="json",
|
50 |
+
title="Donut π© for π¨π trademark registration events",
|
51 |
+
description=description,
|
52 |
+
article=article,
|
53 |
+
enable_queue=True,
|
54 |
+
examples=[["example-1.jpg"], ["example-2.jpg"], ["example-3.jpg"], ["example-4.jpg"], ["example-5.jpg"]],
|
55 |
+
cache_examples=False)
|
56 |
+
|
57 |
+
demo.launch()
|
example-1.jpg
ADDED
![]() |
example-2.jpg
ADDED
![]() |
example-3.jpg
ADDED
![]() |
example-4.jpg
ADDED
![]() |
example-5.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
git+https://github.com/huggingface/transformers.git
|
3 |
+
sentencepiece
|
sha-001_1970_88__1968_d-1-11.jpg
ADDED
![]() |
sha-001_1983_101__30_d-1-11.jpg
ADDED
![]() |
sha-001_1989_107__122_d-2-7.jpg
ADDED
![]() |
sha-001_1994_112__95_d-0-3.jpg
ADDED
![]() |
sha-001_1997_115__107_d-0-1.jpg
ADDED
![]() |
sha-001_1998_116__2259_d-3-13.jpg
ADDED
![]() |