prithivMLmods commited on
Commit
325f281
·
verified ·
1 Parent(s): 6710b44

upload notebooks

Browse files
Imgscope-OCR-2B-0527/Imgscope-OCR-2B-05270-Video-Understanding/Imgscope-OCR-2B-0527-Video-Understanding.ipynb ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {
23
+ "id": "XKQwuI75LWLA"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio transformers pillow opencv-python\n",
29
+ "!pip install accelerate torchvision torch huggingface_hub\n",
30
+ "!pip install hf_xet qwen-vl-utils gradio_client\n",
31
+ "!pip install transformers-stream-generator spaces"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "source": [
37
+ "import os\n",
38
+ "import uuid\n",
39
+ "import time\n",
40
+ "from threading import Thread\n",
41
+ "\n",
42
+ "import gradio as gr\n",
43
+ "import torch\n",
44
+ "import numpy as np\n",
45
+ "import cv2\n",
46
+ "from PIL import Image\n",
47
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor\n",
48
+ "\n",
49
+ "# Ensure CUDA if available\n",
50
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
51
+ "\n",
52
+ "# Load Callisto OCR3 multimodal model and processor\n",
53
+ "MODEL_ID = \"prithivMLmods/Imgscope-OCR-2B-0527\"\n",
54
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
55
+ "model = Qwen2VLForConditionalGeneration.from_pretrained(\n",
56
+ " MODEL_ID,\n",
57
+ " trust_remote_code=True,\n",
58
+ " torch_dtype=torch.float16\n",
59
+ ").to(device).eval()\n",
60
+ "\n",
61
+ "# Constants\n",
62
+ "MAX_INPUT_TOKEN_LENGTH = 4096\n",
63
+ "\n",
64
+ "\n",
65
+ "def downsample_video(video_path: str, num_frames: int = 10):\n",
66
+ " \"\"\"\n",
67
+ " Extracts 'num_frames' evenly spaced frames from the video.\n",
68
+ " Returns a list of (PIL.Image, timestamp_seconds).\n",
69
+ " \"\"\"\n",
70
+ " vidcap = cv2.VideoCapture(video_path)\n",
71
+ " total = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
72
+ " fps = vidcap.get(cv2.CAP_PROP_FPS) or 1\n",
73
+ " indices = np.linspace(0, total - 1, num_frames, dtype=int)\n",
74
+ " frames = []\n",
75
+ " for idx in indices:\n",
76
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, idx)\n",
77
+ " ret, frame = vidcap.read()\n",
78
+ " if not ret:\n",
79
+ " continue\n",
80
+ " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
81
+ " pil = Image.fromarray(frame)\n",
82
+ " timestamp = round(idx / fps, 2)\n",
83
+ " frames.append((pil, timestamp))\n",
84
+ " vidcap.release()\n",
85
+ " return frames\n",
86
+ "\n",
87
+ "\n",
88
+ "def generate(video_file: str):\n",
89
+ " \"\"\"\n",
90
+ " Process the uploaded video through OCR and return concatenated output.\n",
91
+ " \"\"\"\n",
92
+ " # Step 1: extract frames\n",
93
+ " frames = downsample_video(video_file)\n",
94
+ "\n",
95
+ " # Step 2: build chat-like messages\n",
96
+ " messages = [\n",
97
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant, for video understanding.\"}]},\n",
98
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": \"Please describe the content of the following video frames:\"}]\n",
99
+ " }\n",
100
+ " ]\n",
101
+ " for img, ts in frames:\n",
102
+ " # save temporary frame image\n",
103
+ " path = f\"frame_{uuid.uuid4().hex}.png\"\n",
104
+ " img.save(path)\n",
105
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame at {ts}s:\"})\n",
106
+ " messages[1][\"content\"].append({\"type\": \"image\", \"url\": path})\n",
107
+ "\n",
108
+ " # Step 3: tokenize with truncation\n",
109
+ " inputs = processor.apply_chat_template(\n",
110
+ " messages,\n",
111
+ " tokenize=True,\n",
112
+ " add_generation_prompt=True,\n",
113
+ " return_dict=True,\n",
114
+ " return_tensors=\"pt\",\n",
115
+ " truncation=True,\n",
116
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
117
+ " ).to(device)\n",
118
+ "\n",
119
+ " # Step 4: use streamer to collect output\n",
120
+ " from transformers import TextIteratorStreamer\n",
121
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
122
+ " gen_kwargs = {\n",
123
+ " **inputs,\n",
124
+ " \"streamer\": streamer,\n",
125
+ " \"max_new_tokens\": 1024,\n",
126
+ " \"do_sample\": True,\n",
127
+ " \"temperature\": 0.7,\n",
128
+ " }\n",
129
+ " thread = Thread(target=model.generate, kwargs=gen_kwargs)\n",
130
+ " thread.start()\n",
131
+ "\n",
132
+ " # collect all tokens\n",
133
+ " buffer = \"\"\n",
134
+ " for chunk in streamer:\n",
135
+ " buffer += chunk.replace(\"<|im_end|>\", \"\")\n",
136
+ " time.sleep(0.01)\n",
137
+ "\n",
138
+ " # return full concatenated response\n",
139
+ " return buffer\n",
140
+ "\n",
141
+ "\n",
142
+ "def launch_app():\n",
143
+ " demo = gr.Interface(\n",
144
+ " fn=generate,\n",
145
+ " inputs=gr.Video(label=\"Upload Video\"),\n",
146
+ " outputs=gr.Textbox(label=\"Video Description\"),\n",
147
+ " title=\"Video Understanding with Imgscope-OCR-2B-0527\",\n",
148
+ " description=\"Upload a video and get an OCR-based description of its frames.\",\n",
149
+ " allow_flagging=\"never\"\n",
150
+ " )\n",
151
+ " demo.queue().launch(debug=True)\n",
152
+ "\n",
153
+ "\n",
154
+ "if __name__ == \"__main__\":\n",
155
+ " launch_app()"
156
+ ],
157
+ "metadata": {
158
+ "id": "GZXqC00zLbS1"
159
+ },
160
+ "execution_count": null,
161
+ "outputs": []
162
+ }
163
+ ]
164
+ }
Imgscope-OCR-2B-0527/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
Imgscope-OCR-2B-0527/README.md ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![2.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/yUKVKSX2E18k0h3YwCx1h.png)
2
+
3
+ # **Imgscope-OCR-2B-0527**
4
+
5
+ > The **Imgscope-OCR-2B-0527** model is a fine-tuned version of *Qwen2-VL-2B-Instruct*, specifically optimized for *messy handwriting recognition*, *document OCR*, *realistic handwritten OCR*, and *math problem solving with LaTeX formatting*. This model is trained on custom datasets for document and handwriting OCR tasks and integrates a conversational approach with strong visual and textual understanding for multi-modal applications.
6
+
7
+ > [!warning]
8
+ Colab Demo : https://huggingface.co/prithivMLmods/Imgscope-OCR-2B-0527/blob/main/Imgscope%20OCR%202B%200527%20Demo/Imgscope-OCR-2B-0527.ipynb
9
+
10
+ ---
11
+
12
+ ### Key Enhancements
13
+
14
+ * **SoTA Understanding of Images of Various Resolution & Ratio**
15
+ Imgscope-OCR-2B-0527 achieves state-of-the-art performance on visual understanding benchmarks such as MathVista, DocVQA, RealWorldQA, and MTVQA.
16
+
17
+ * **Enhanced Handwriting OCR**
18
+ Specifically optimized for recognizing and interpreting **realistic and messy handwriting** with high accuracy. Ideal for digitizing handwritten documents and notes.
19
+
20
+ * **Document OCR Fine-Tuning**
21
+ Fine-tuned with curated and realistic **document OCR datasets**, enabling accurate extraction of text from various structured and unstructured layouts.
22
+
23
+ * **Understanding Videos of 20+ Minutes**
24
+ Capable of processing long videos for **video-based question answering**, **transcription**, and **content generation**.
25
+
26
+ * **Device Control Agent**
27
+ Supports decision-making and control capabilities for integration with **mobile devices**, **robots**, and **automation systems** using visual-textual commands.
28
+
29
+ * **Multilingual OCR Support**
30
+ In addition to English and Chinese, the model supports **OCR in multiple languages** including European languages, Japanese, Korean, Arabic, and Vietnamese.
31
+
32
+ ---
33
+
34
+ ### Demo Video Inference
35
+
36
+ https://github.com/user-attachments/assets/3ca9ef10-8a71-4cd1-8be1-951a9f6d5a00
37
+
38
+ ```
39
+
40
+ The video starts with a group of people gathered around a table filled with snacks and drinks, indicating a casual social gathering. One person is seen holding a can of Pringles, suggesting that the snack is being enjoyed by the attendees.
41
+
42
+ As the scene progresses, the focus shifts to a man who is seen pouring a drink from a can into a glass. This action implies that the drink is being served or shared among the group.
43
+
44
+ The next scene shows a different setting where a man is walking down a hallway while holding a can of Pringles. This could indicate that he is on his way to join the group or has just arrived at the location.
45
+
46
+ The following scene takes place in a diner where two people are seated at a booth. The man is seen holding a can of Pringles, which suggests that they might be enjoying a meal together.
47
+
48
+ The video then transitions to a wedding ceremony where a man is feeding a woman a piece of cake using a can of Pringles. This unusual gesture adds a humorous element to the otherwise traditional event.
49
+
50
+ Next, the scene changes to a bedroom where a man is seen feeding a woman a piece of cake using a can of Pringles. This scene further emphasizes the playful nature of the video.
51
+
52
+ The video then shifts to an office setting where a man is seen working at a desk. The presence of a can of Pringles on the desk suggests that it might be part of his workspace or a snack during work hours.
53
+
54
+ Finally, the video ends with a scene of a funeral where a woman is seen crying over a casket. The presence of a can of Pringles on the casket adds an unexpected and humorous touch to the solemn occasion.
55
+
56
+ Throughout the video, the recurring theme of Pringles is evident, with various scenes featuring the snack as a central element. The video concludes with the text "GET STUCK IN," encouraging viewers to enjoy the snack and engage with the content.
57
+
58
+ ```
59
+
60
+ ### How to Use
61
+
62
+ ```python
63
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
64
+ from qwen_vl_utils import process_vision_info
65
+
66
+ # Load the model
67
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
68
+ "prithivMLmods/Imgscope-OCR-2B-0527", # replace with updated model ID if available
69
+ torch_dtype="auto",
70
+ device_map="auto"
71
+ )
72
+
73
+ # Optional: Flash Attention for performance optimization
74
+ # model = Qwen2VLForConditionalGeneration.from_pretrained(
75
+ # "prithivMLmods/Imgscope-OCR-2B-0527",
76
+ # torch_dtype=torch.bfloat16,
77
+ # attn_implementation="flash_attention_2",
78
+ # device_map="auto",
79
+ # )
80
+
81
+ # Load processor
82
+ processor = AutoProcessor.from_pretrained("prithivMLmods/Imgscope-OCR-2B-0527")
83
+
84
+ messages = [
85
+ {
86
+ "role": "user",
87
+ "content": [
88
+ {
89
+ "type": "image",
90
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
91
+ },
92
+ {"type": "text", "text": "Recognize the handwriting in this image."},
93
+ ],
94
+ }
95
+ ]
96
+
97
+ # Prepare input
98
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
99
+ image_inputs, video_inputs = process_vision_info(messages)
100
+ inputs = processor(
101
+ text=[text],
102
+ images=image_inputs,
103
+ videos=video_inputs,
104
+ padding=True,
105
+ return_tensors="pt",
106
+ )
107
+ inputs = inputs.to("cuda")
108
+
109
+ # Generate output
110
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
111
+ generated_ids_trimmed = [
112
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
113
+ ]
114
+ output_text = processor.batch_decode(
115
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
116
+ )
117
+ print(output_text)
118
+ ```
119
+
120
+ ---
121
+
122
+ ### Demo Inference
123
+
124
+ ![Screenshot 2025-05-27 at 03-40-34 Gradio.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/9KiRkOGPB8cLl6VHwh2UD.png)
125
+ ![Screenshot 2025-05-27 at 03-40-56 (anonymous) - output_e0fbfa20-686e-4bce-b2e8-25991be5a5a0.pdf.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/VOHQIrT7hCs5afGMRROvD.png)
126
+
127
+ ---
128
+
129
+ ### Buffering Output (Streaming)
130
+
131
+ ```python
132
+ buffer = ""
133
+ for new_text in streamer:
134
+ buffer += new_text
135
+ buffer = buffer.replace("<|im_end|>", "")
136
+ yield buffer
137
+ ```
138
+
139
+ ---
140
+
141
+ ### Key Features
142
+
143
+ 1. **Realistic Messy Handwriting OCR**
144
+
145
+ * Fine-tuned for **complex and hard-to-read handwritten inputs** using real-world handwriting datasets.
146
+
147
+ 2. **Document OCR and Layout Understanding**
148
+
149
+ * Accurately extracts text from structured documents, including scanned pages, forms, and academic papers.
150
+
151
+ 3. **Image and Text Multi-modal Reasoning**
152
+
153
+ * Combines **vision-language capabilities** for tasks like captioning, answering image-based queries, and understanding image+text prompts.
154
+
155
+ 4. **Math Problem Solving and LaTeX Rendering**
156
+
157
+ * Converts mathematical expressions and problem-solving steps into **LaTeX** format.
158
+
159
+ 5. **Multi-turn Conversations**
160
+
161
+ * Supports **dialogue-based reasoning**, retaining context for follow-up questions.
162
+
163
+ 6. **Video + Image + Text-to-Text Generation**
164
+
165
+ * Accepts inputs from videos, images, or combined media with text, and generates relevant output accordingly.
166
+
167
+ ---
168
+
169
+ ## **Intended Use**
170
+
171
+ **Imgscope-OCR-2B-0527** is intended for:
172
+
173
+ * Handwritten and printed document digitization
174
+ * OCR pipelines for educational institutions and businesses
175
+ * Academic and scientific content parsing, especially math-heavy documents
176
+ * Assistive tools for visually impaired users
177
+ * Robotic and mobile automation agents interpreting screen or camera data
178
+ * Multilingual OCR processing for document translation or archiving
Imgscope-OCR-2B-0527/app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
4
+ from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import os
8
+ import uuid
9
+ import io
10
+ from threading import Thread
11
+ from reportlab.lib.pagesizes import A4
12
+ from reportlab.lib.styles import getSampleStyleSheet
13
+ from reportlab.lib import colors
14
+ from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer
15
+ from reportlab.lib.units import inch
16
+ from reportlab.pdfbase import pdfmetrics
17
+ from reportlab.pdfbase.ttfonts import TTFont
18
+ import docx
19
+ from docx.enum.text import WD_ALIGN_PARAGRAPH
20
+
21
+ # Define model options
22
+ MODEL_OPTIONS = {
23
+ "Imgscope-OCR-2B-0527": "prithivMLmods/Imgscope-OCR-2B-0527",
24
+ }
25
+
26
+ # Preload models and processors into CUDA
27
+ models = {}
28
+ processors = {}
29
+ for name, model_id in MODEL_OPTIONS.items():
30
+ print(f"Loading {name}...")
31
+ models[name] = Qwen2VLForConditionalGeneration.from_pretrained(
32
+ model_id,
33
+ trust_remote_code=True,
34
+ torch_dtype=torch.float16
35
+ ).to("cuda").eval()
36
+ processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
37
+
38
+ image_extensions = Image.registered_extensions()
39
+
40
+ def identify_and_save_blob(blob_path):
41
+ """Identifies if the blob is an image and saves it."""
42
+ try:
43
+ with open(blob_path, 'rb') as file:
44
+ blob_content = file.read()
45
+ try:
46
+ Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image
47
+ extension = ".png" # Default to PNG for saving
48
+ media_type = "image"
49
+ except (IOError, SyntaxError):
50
+ raise ValueError("Unsupported media type. Please upload a valid image.")
51
+
52
+ filename = f"temp_{uuid.uuid4()}_media{extension}"
53
+ with open(filename, "wb") as f:
54
+ f.write(blob_content)
55
+
56
+ return filename, media_type
57
+
58
+ except FileNotFoundError:
59
+ raise ValueError(f"The file {blob_path} was not found.")
60
+ except Exception as e:
61
+ raise ValueError(f"An error occurred while processing the file: {e}")
62
+
63
+ @spaces.GPU
64
+ def qwen_inference(model_name, media_input, text_input=None):
65
+ """Handles inference for the selected model."""
66
+ model = models[model_name]
67
+ processor = processors[model_name]
68
+
69
+ if isinstance(media_input, str):
70
+ media_path = media_input
71
+ if media_path.endswith(tuple([i for i in image_extensions.keys()])):
72
+ media_type = "image"
73
+ else:
74
+ try:
75
+ media_path, media_type = identify_and_save_blob(media_input)
76
+ except Exception as e:
77
+ raise ValueError("Unsupported media type. Please upload a valid image.")
78
+
79
+ messages = [
80
+ {
81
+ "role": "user",
82
+ "content": [
83
+ {
84
+ "type": media_type,
85
+ media_type: media_path
86
+ },
87
+ {"type": "text", "text": text_input},
88
+ ],
89
+ }
90
+ ]
91
+
92
+ text = processor.apply_chat_template(
93
+ messages, tokenize=False, add_generation_prompt=True
94
+ )
95
+ image_inputs, _ = process_vision_info(messages)
96
+ inputs = processor(
97
+ text=[text],
98
+ images=image_inputs,
99
+ padding=True,
100
+ return_tensors="pt",
101
+ ).to("cuda")
102
+
103
+ streamer = TextIteratorStreamer(
104
+ processor.tokenizer, skip_prompt=True, skip_special_tokens=True
105
+ )
106
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
107
+
108
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
109
+ thread.start()
110
+
111
+ buffer = ""
112
+ for new_text in streamer:
113
+ buffer += new_text
114
+ # Remove <|im_end|> or similar tokens from the output
115
+ buffer = buffer.replace("<|im_end|>", "")
116
+ yield buffer
117
+
118
+ def format_plain_text(output_text):
119
+ """Formats the output text as plain text without LaTeX delimiters."""
120
+ # Remove LaTeX delimiters and convert to plain text
121
+ plain_text = output_text.replace("\\(", "").replace("\\)", "").replace("\\[", "").replace("\\]", "")
122
+ return plain_text
123
+
124
+ def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):
125
+ """Generates a document with the input image and plain text output."""
126
+ plain_text = format_plain_text(output_text)
127
+ if file_format == "pdf":
128
+ return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)
129
+ elif file_format == "docx":
130
+ return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)
131
+
132
+ def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):
133
+ """Generates a PDF document."""
134
+ filename = f"output_{uuid.uuid4()}.pdf"
135
+ doc = SimpleDocTemplate(
136
+ filename,
137
+ pagesize=A4,
138
+ rightMargin=inch,
139
+ leftMargin=inch,
140
+ topMargin=inch,
141
+ bottomMargin=inch
142
+ )
143
+ styles = getSampleStyleSheet()
144
+ styles["Normal"].fontSize = int(font_size)
145
+ styles["Normal"].leading = int(font_size) * line_spacing
146
+ styles["Normal"].alignment = {
147
+ "Left": 0,
148
+ "Center": 1,
149
+ "Right": 2,
150
+ "Justified": 4
151
+ }[alignment]
152
+
153
+ story = []
154
+
155
+ # Add image with size adjustment
156
+ image_sizes = {
157
+ "Small": (200, 200),
158
+ "Medium": (400, 400),
159
+ "Large": (600, 600)
160
+ }
161
+ img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])
162
+ story.append(img)
163
+ story.append(Spacer(1, 12))
164
+
165
+ # Add plain text output
166
+ text = Paragraph(plain_text, styles["Normal"])
167
+ story.append(text)
168
+
169
+ doc.build(story)
170
+ return filename
171
+
172
+ def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):
173
+ """Generates a DOCX document."""
174
+ filename = f"output_{uuid.uuid4()}.docx"
175
+ doc = docx.Document()
176
+
177
+ # Add image with size adjustment
178
+ image_sizes = {
179
+ "Small": docx.shared.Inches(2),
180
+ "Medium": docx.shared.Inches(4),
181
+ "Large": docx.shared.Inches(6)
182
+ }
183
+ doc.add_picture(media_path, width=image_sizes[image_size])
184
+ doc.add_paragraph()
185
+
186
+ # Add plain text output
187
+ paragraph = doc.add_paragraph()
188
+ paragraph.paragraph_format.line_spacing = line_spacing
189
+ paragraph.paragraph_format.alignment = {
190
+ "Left": WD_ALIGN_PARAGRAPH.LEFT,
191
+ "Center": WD_ALIGN_PARAGRAPH.CENTER,
192
+ "Right": WD_ALIGN_PARAGRAPH.RIGHT,
193
+ "Justified": WD_ALIGN_PARAGRAPH.JUSTIFY
194
+ }[alignment]
195
+ run = paragraph.add_run(plain_text)
196
+ run.font.size = docx.shared.Pt(int(font_size))
197
+
198
+ doc.save(filename)
199
+ return filename
200
+
201
+ # CSS for output styling
202
+ css = """
203
+ #output {
204
+ height: 500px;
205
+ overflow: auto;
206
+ border: 1px solid #ccc;
207
+ }
208
+ .submit-btn {
209
+ background-color: #cf3434 !important;
210
+ color: white !important;
211
+ }
212
+ .submit-btn:hover {
213
+ background-color: #ff2323 !important;
214
+ }
215
+ .download-btn {
216
+ background-color: #35a6d6 !important;
217
+ color: white !important;
218
+ }
219
+ .download-btn:hover {
220
+ background-color: #22bcff !important;
221
+ }
222
+ """
223
+
224
+ # Gradio app setup
225
+ with gr.Blocks(css=css) as demo:
226
+ gr.Markdown("# Imgscope-OCR-2B-0527: Vision and Language Processing")
227
+
228
+ with gr.Tab(label="Image Input"):
229
+
230
+ with gr.Row():
231
+ with gr.Column():
232
+ model_choice = gr.Dropdown(
233
+ label="Model Selection",
234
+ choices=list(MODEL_OPTIONS.keys()),
235
+ value="Imgscope-OCR-2B-0527"
236
+ )
237
+ input_media = gr.File(
238
+ label="Upload Image", type="filepath"
239
+ )
240
+ text_input = gr.Textbox(label="Question", placeholder="Ask a question about the image...")
241
+ submit_btn = gr.Button(value="Submit", elem_classes="submit-btn")
242
+
243
+ with gr.Column():
244
+ output_text = gr.Textbox(label="Output Text", lines=10)
245
+ plain_text_output = gr.Textbox(label="Standardized Plain Text", lines=10)
246
+
247
+ submit_btn.click(
248
+ qwen_inference, [model_choice, input_media, text_input], [output_text]
249
+ ).then(
250
+ lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]
251
+ )
252
+
253
+ # Add examples directly usable by clicking
254
+ with gr.Row():
255
+ with gr.Column():
256
+ line_spacing = gr.Dropdown(
257
+ choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],
258
+ value=1.5,
259
+ label="Line Spacing"
260
+ )
261
+ font_size = gr.Dropdown(
262
+ choices=["8", "10", "12", "14", "16", "18", "20", "22", "24"],
263
+ value="18",
264
+ label="Font Size"
265
+ )
266
+ alignment = gr.Dropdown(
267
+ choices=["Left", "Center", "Right", "Justified"],
268
+ value="Justified",
269
+ label="Text Alignment"
270
+ )
271
+ image_size = gr.Dropdown(
272
+ choices=["Small", "Medium", "Large"],
273
+ value="Small",
274
+ label="Image Size"
275
+ )
276
+ file_format = gr.Radio(["pdf", "docx"], label="File Format", value="pdf")
277
+ get_document_btn = gr.Button(value="Get Document", elem_classes="download-btn")
278
+
279
+ get_document_btn.click(
280
+ generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label="Download Document")
281
+ )
282
+
283
+ demo.launch(debug=True)
Imgscope-OCR-2B-0527/notebook/Imgscope-OCR-2B-0527.ipynb ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "source": [
22
+ "%%capture\n",
23
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
24
+ "!pip install torch torchvision qwen-vl-utils av ipython reportlab\n",
25
+ "!pip install fpdf python-docx pillow huggingface_hub hf_xet"
26
+ ],
27
+ "metadata": {
28
+ "id": "oDmd1ZObGSel"
29
+ },
30
+ "execution_count": 1,
31
+ "outputs": []
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "import gradio as gr\n",
37
+ "import spaces\n",
38
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer\n",
39
+ "from qwen_vl_utils import process_vision_info\n",
40
+ "import torch\n",
41
+ "from PIL import Image\n",
42
+ "import os\n",
43
+ "import uuid\n",
44
+ "import io\n",
45
+ "from threading import Thread\n",
46
+ "from reportlab.lib.pagesizes import A4\n",
47
+ "from reportlab.lib.styles import getSampleStyleSheet\n",
48
+ "from reportlab.lib import colors\n",
49
+ "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n",
50
+ "from reportlab.lib.units import inch\n",
51
+ "from reportlab.pdfbase import pdfmetrics\n",
52
+ "from reportlab.pdfbase.ttfonts import TTFont\n",
53
+ "import docx\n",
54
+ "from docx.enum.text import WD_ALIGN_PARAGRAPH\n",
55
+ "\n",
56
+ "# Define model options\n",
57
+ "MODEL_OPTIONS = {\n",
58
+ " \"Imgscope-OCR-2B-0527\": \"prithivMLmods/Imgscope-OCR-2B-0527\",\n",
59
+ "}\n",
60
+ "\n",
61
+ "# Preload models and processors into CUDA\n",
62
+ "models = {}\n",
63
+ "processors = {}\n",
64
+ "for name, model_id in MODEL_OPTIONS.items():\n",
65
+ " print(f\"Loading {name}...\")\n",
66
+ " models[name] = Qwen2VLForConditionalGeneration.from_pretrained(\n",
67
+ " model_id,\n",
68
+ " trust_remote_code=True,\n",
69
+ " torch_dtype=torch.float16\n",
70
+ " ).to(\"cuda\").eval()\n",
71
+ " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n",
72
+ "\n",
73
+ "image_extensions = Image.registered_extensions()\n",
74
+ "\n",
75
+ "def identify_and_save_blob(blob_path):\n",
76
+ " \"\"\"Identifies if the blob is an image and saves it.\"\"\"\n",
77
+ " try:\n",
78
+ " with open(blob_path, 'rb') as file:\n",
79
+ " blob_content = file.read()\n",
80
+ " try:\n",
81
+ " Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image\n",
82
+ " extension = \".png\" # Default to PNG for saving\n",
83
+ " media_type = \"image\"\n",
84
+ " except (IOError, SyntaxError):\n",
85
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
86
+ "\n",
87
+ " filename = f\"temp_{uuid.uuid4()}_media{extension}\"\n",
88
+ " with open(filename, \"wb\") as f:\n",
89
+ " f.write(blob_content)\n",
90
+ "\n",
91
+ " return filename, media_type\n",
92
+ "\n",
93
+ " except FileNotFoundError:\n",
94
+ " raise ValueError(f\"The file {blob_path} was not found.\")\n",
95
+ " except Exception as e:\n",
96
+ " raise ValueError(f\"An error occurred while processing the file: {e}\")\n",
97
+ "\n",
98
+ "@spaces.GPU\n",
99
+ "def qwen_inference(model_name, media_input, text_input=None):\n",
100
+ " \"\"\"Handles inference for the selected model.\"\"\"\n",
101
+ " model = models[model_name]\n",
102
+ " processor = processors[model_name]\n",
103
+ "\n",
104
+ " if isinstance(media_input, str):\n",
105
+ " media_path = media_input\n",
106
+ " if media_path.endswith(tuple([i for i in image_extensions.keys()])):\n",
107
+ " media_type = \"image\"\n",
108
+ " else:\n",
109
+ " try:\n",
110
+ " media_path, media_type = identify_and_save_blob(media_input)\n",
111
+ " except Exception as e:\n",
112
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
113
+ "\n",
114
+ " messages = [\n",
115
+ " {\n",
116
+ " \"role\": \"user\",\n",
117
+ " \"content\": [\n",
118
+ " {\n",
119
+ " \"type\": media_type,\n",
120
+ " media_type: media_path\n",
121
+ " },\n",
122
+ " {\"type\": \"text\", \"text\": text_input},\n",
123
+ " ],\n",
124
+ " }\n",
125
+ " ]\n",
126
+ "\n",
127
+ " text = processor.apply_chat_template(\n",
128
+ " messages, tokenize=False, add_generation_prompt=True\n",
129
+ " )\n",
130
+ " image_inputs, _ = process_vision_info(messages)\n",
131
+ " inputs = processor(\n",
132
+ " text=[text],\n",
133
+ " images=image_inputs,\n",
134
+ " padding=True,\n",
135
+ " return_tensors=\"pt\",\n",
136
+ " ).to(\"cuda\")\n",
137
+ "\n",
138
+ " streamer = TextIteratorStreamer(\n",
139
+ " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n",
140
+ " )\n",
141
+ " generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)\n",
142
+ "\n",
143
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
144
+ " thread.start()\n",
145
+ "\n",
146
+ " buffer = \"\"\n",
147
+ " for new_text in streamer:\n",
148
+ " buffer += new_text\n",
149
+ " # Remove <|im_end|> or similar tokens from the output\n",
150
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
151
+ " yield buffer\n",
152
+ "\n",
153
+ "def format_plain_text(output_text):\n",
154
+ " \"\"\"Formats the output text as plain text without LaTeX delimiters.\"\"\"\n",
155
+ " # Remove LaTeX delimiters and convert to plain text\n",
156
+ " plain_text = output_text.replace(\"\\\\(\", \"\").replace(\"\\\\)\", \"\").replace(\"\\\\[\", \"\").replace(\"\\\\]\", \"\")\n",
157
+ " return plain_text\n",
158
+ "\n",
159
+ "def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):\n",
160
+ " \"\"\"Generates a document with the input image and plain text output.\"\"\"\n",
161
+ " plain_text = format_plain_text(output_text)\n",
162
+ " if file_format == \"pdf\":\n",
163
+ " return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
164
+ " elif file_format == \"docx\":\n",
165
+ " return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
166
+ "\n",
167
+ "def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
168
+ " \"\"\"Generates a PDF document.\"\"\"\n",
169
+ " filename = f\"output_{uuid.uuid4()}.pdf\"\n",
170
+ " doc = SimpleDocTemplate(\n",
171
+ " filename,\n",
172
+ " pagesize=A4,\n",
173
+ " rightMargin=inch,\n",
174
+ " leftMargin=inch,\n",
175
+ " topMargin=inch,\n",
176
+ " bottomMargin=inch\n",
177
+ " )\n",
178
+ " styles = getSampleStyleSheet()\n",
179
+ " styles[\"Normal\"].fontSize = int(font_size)\n",
180
+ " styles[\"Normal\"].leading = int(font_size) * line_spacing\n",
181
+ " styles[\"Normal\"].alignment = {\n",
182
+ " \"Left\": 0,\n",
183
+ " \"Center\": 1,\n",
184
+ " \"Right\": 2,\n",
185
+ " \"Justified\": 4\n",
186
+ " }[alignment]\n",
187
+ "\n",
188
+ " story = []\n",
189
+ "\n",
190
+ " # Add image with size adjustment\n",
191
+ " image_sizes = {\n",
192
+ " \"Small\": (200, 200),\n",
193
+ " \"Medium\": (400, 400),\n",
194
+ " \"Large\": (600, 600)\n",
195
+ " }\n",
196
+ " img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])\n",
197
+ " story.append(img)\n",
198
+ " story.append(Spacer(1, 12))\n",
199
+ "\n",
200
+ " # Add plain text output\n",
201
+ " text = Paragraph(plain_text, styles[\"Normal\"])\n",
202
+ " story.append(text)\n",
203
+ "\n",
204
+ " doc.build(story)\n",
205
+ " return filename\n",
206
+ "\n",
207
+ "def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
208
+ " \"\"\"Generates a DOCX document.\"\"\"\n",
209
+ " filename = f\"output_{uuid.uuid4()}.docx\"\n",
210
+ " doc = docx.Document()\n",
211
+ "\n",
212
+ " # Add image with size adjustment\n",
213
+ " image_sizes = {\n",
214
+ " \"Small\": docx.shared.Inches(2),\n",
215
+ " \"Medium\": docx.shared.Inches(4),\n",
216
+ " \"Large\": docx.shared.Inches(6)\n",
217
+ " }\n",
218
+ " doc.add_picture(media_path, width=image_sizes[image_size])\n",
219
+ " doc.add_paragraph()\n",
220
+ "\n",
221
+ " # Add plain text output\n",
222
+ " paragraph = doc.add_paragraph()\n",
223
+ " paragraph.paragraph_format.line_spacing = line_spacing\n",
224
+ " paragraph.paragraph_format.alignment = {\n",
225
+ " \"Left\": WD_ALIGN_PARAGRAPH.LEFT,\n",
226
+ " \"Center\": WD_ALIGN_PARAGRAPH.CENTER,\n",
227
+ " \"Right\": WD_ALIGN_PARAGRAPH.RIGHT,\n",
228
+ " \"Justified\": WD_ALIGN_PARAGRAPH.JUSTIFY\n",
229
+ " }[alignment]\n",
230
+ " run = paragraph.add_run(plain_text)\n",
231
+ " run.font.size = docx.shared.Pt(int(font_size))\n",
232
+ "\n",
233
+ " doc.save(filename)\n",
234
+ " return filename\n",
235
+ "\n",
236
+ "# CSS for output styling\n",
237
+ "css = \"\"\"\n",
238
+ " #output {\n",
239
+ " height: 500px;\n",
240
+ " overflow: auto;\n",
241
+ " border: 1px solid #ccc;\n",
242
+ " }\n",
243
+ ".submit-btn {\n",
244
+ " background-color: #cf3434 !important;\n",
245
+ " color: white !important;\n",
246
+ "}\n",
247
+ ".submit-btn:hover {\n",
248
+ " background-color: #ff2323 !important;\n",
249
+ "}\n",
250
+ ".download-btn {\n",
251
+ " background-color: #35a6d6 !important;\n",
252
+ " color: white !important;\n",
253
+ "}\n",
254
+ ".download-btn:hover {\n",
255
+ " background-color: #22bcff !important;\n",
256
+ "}\n",
257
+ "\"\"\"\n",
258
+ "\n",
259
+ "# Gradio app setup\n",
260
+ "with gr.Blocks(css=css) as demo:\n",
261
+ " gr.Markdown(\"# Imgscope-OCR-2B-0527: Vision and Language Processing\")\n",
262
+ "\n",
263
+ " with gr.Tab(label=\"Image Input\"):\n",
264
+ "\n",
265
+ " with gr.Row():\n",
266
+ " with gr.Column():\n",
267
+ " model_choice = gr.Dropdown(\n",
268
+ " label=\"Model Selection\",\n",
269
+ " choices=list(MODEL_OPTIONS.keys()),\n",
270
+ " value=\"Imgscope-OCR-2B-0527\"\n",
271
+ " )\n",
272
+ " input_media = gr.File(\n",
273
+ " label=\"Upload Image\", type=\"filepath\"\n",
274
+ " )\n",
275
+ " text_input = gr.Textbox(label=\"Question\", placeholder=\"Ask a question about the image...\")\n",
276
+ " submit_btn = gr.Button(value=\"Submit\", elem_classes=\"submit-btn\")\n",
277
+ "\n",
278
+ " with gr.Column():\n",
279
+ " output_text = gr.Textbox(label=\"Output Text\", lines=10)\n",
280
+ " plain_text_output = gr.Textbox(label=\"Standardized Plain Text\", lines=10)\n",
281
+ "\n",
282
+ " submit_btn.click(\n",
283
+ " qwen_inference, [model_choice, input_media, text_input], [output_text]\n",
284
+ " ).then(\n",
285
+ " lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]\n",
286
+ " )\n",
287
+ "\n",
288
+ " # Add examples directly usable by clicking\n",
289
+ " with gr.Row():\n",
290
+ " with gr.Column():\n",
291
+ " line_spacing = gr.Dropdown(\n",
292
+ " choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],\n",
293
+ " value=1.5,\n",
294
+ " label=\"Line Spacing\"\n",
295
+ " )\n",
296
+ " font_size = gr.Dropdown(\n",
297
+ " choices=[\"8\", \"10\", \"12\", \"14\", \"16\", \"18\", \"20\", \"22\", \"24\"],\n",
298
+ " value=\"18\",\n",
299
+ " label=\"Font Size\"\n",
300
+ " )\n",
301
+ " alignment = gr.Dropdown(\n",
302
+ " choices=[\"Left\", \"Center\", \"Right\", \"Justified\"],\n",
303
+ " value=\"Justified\",\n",
304
+ " label=\"Text Alignment\"\n",
305
+ " )\n",
306
+ " image_size = gr.Dropdown(\n",
307
+ " choices=[\"Small\", \"Medium\", \"Large\"],\n",
308
+ " value=\"Small\",\n",
309
+ " label=\"Image Size\"\n",
310
+ " )\n",
311
+ " file_format = gr.Radio([\"pdf\", \"docx\"], label=\"File Format\", value=\"pdf\")\n",
312
+ " get_document_btn = gr.Button(value=\"Get Document\", elem_classes=\"download-btn\")\n",
313
+ "\n",
314
+ " get_document_btn.click(\n",
315
+ " generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label=\"Download Document\")\n",
316
+ " )\n",
317
+ "\n",
318
+ "demo.launch(debug=True)"
319
+ ],
320
+ "metadata": {
321
+ "id": "ovBSsRFhGbs2"
322
+ },
323
+ "execution_count": null,
324
+ "outputs": []
325
+ }
326
+ ]
327
+ }
Imgscope-OCR-2B-0527/requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ spaces
3
+ transformers
4
+ accelerate
5
+ numpy
6
+ requests
7
+ torch
8
+ torchvision
9
+ qwen-vl-utils
10
+ av
11
+ ipython
12
+ reportlab
13
+ fpdf
14
+ python-docx
15
+ pillow
16
+ huggingface_hub
17
+ hf_xet
Inkscope-Captions-2B-0526/Inkscope-Captions-2B-0526-Video-Understanding/Inkscope-Captions-2B-0526-Video-Understanding.ipynb ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {
23
+ "id": "XKQwuI75LWLA"
24
+ },
25
+ "outputs": [],
26
+ "source": [
27
+ "%%capture\n",
28
+ "!pip install gradio transformers pillow opencv-python\n",
29
+ "!pip install accelerate torchvision torch huggingface_hub\n",
30
+ "!pip install hf_xet qwen-vl-utils gradio_client\n",
31
+ "!pip install transformers-stream-generator spaces"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "code",
36
+ "source": [
37
+ "import os\n",
38
+ "import uuid\n",
39
+ "import time\n",
40
+ "from threading import Thread\n",
41
+ "\n",
42
+ "import gradio as gr\n",
43
+ "import torch\n",
44
+ "import numpy as np\n",
45
+ "import cv2\n",
46
+ "from PIL import Image\n",
47
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor\n",
48
+ "\n",
49
+ "# Ensure CUDA if available\n",
50
+ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
51
+ "\n",
52
+ "# Load Callisto OCR3 multimodal model and processor\n",
53
+ "MODEL_ID = \"prithivMLmods/Inkscope-Captions-2B-0526\"\n",
54
+ "processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)\n",
55
+ "model = Qwen2VLForConditionalGeneration.from_pretrained(\n",
56
+ " MODEL_ID,\n",
57
+ " trust_remote_code=True,\n",
58
+ " torch_dtype=torch.float16\n",
59
+ ").to(device).eval()\n",
60
+ "\n",
61
+ "# Constants\n",
62
+ "MAX_INPUT_TOKEN_LENGTH = 4096\n",
63
+ "\n",
64
+ "\n",
65
+ "def downsample_video(video_path: str, num_frames: int = 10):\n",
66
+ " \"\"\"\n",
67
+ " Extracts 'num_frames' evenly spaced frames from the video.\n",
68
+ " Returns a list of (PIL.Image, timestamp_seconds).\n",
69
+ " \"\"\"\n",
70
+ " vidcap = cv2.VideoCapture(video_path)\n",
71
+ " total = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n",
72
+ " fps = vidcap.get(cv2.CAP_PROP_FPS) or 1\n",
73
+ " indices = np.linspace(0, total - 1, num_frames, dtype=int)\n",
74
+ " frames = []\n",
75
+ " for idx in indices:\n",
76
+ " vidcap.set(cv2.CAP_PROP_POS_FRAMES, idx)\n",
77
+ " ret, frame = vidcap.read()\n",
78
+ " if not ret:\n",
79
+ " continue\n",
80
+ " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
81
+ " pil = Image.fromarray(frame)\n",
82
+ " timestamp = round(idx / fps, 2)\n",
83
+ " frames.append((pil, timestamp))\n",
84
+ " vidcap.release()\n",
85
+ " return frames\n",
86
+ "\n",
87
+ "\n",
88
+ "def generate(video_file: str):\n",
89
+ " \"\"\"\n",
90
+ " Process the uploaded video through OCR and return concatenated output.\n",
91
+ " \"\"\"\n",
92
+ " # Step 1: extract frames\n",
93
+ " frames = downsample_video(video_file)\n",
94
+ "\n",
95
+ " # Step 2: build chat-like messages\n",
96
+ " messages = [\n",
97
+ " {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant, for video understanding.\"}]},\n",
98
+ " {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"text\": \"Please explain the content of the following video frames:\"}]\n",
99
+ " }\n",
100
+ " ]\n",
101
+ " for img, ts in frames:\n",
102
+ " # save temporary frame image\n",
103
+ " path = f\"frame_{uuid.uuid4().hex}.png\"\n",
104
+ " img.save(path)\n",
105
+ " messages[1][\"content\"].append({\"type\": \"text\", \"text\": f\"Frame at {ts}s:\"})\n",
106
+ " messages[1][\"content\"].append({\"type\": \"image\", \"url\": path})\n",
107
+ "\n",
108
+ " # Step 3: tokenize with truncation\n",
109
+ " inputs = processor.apply_chat_template(\n",
110
+ " messages,\n",
111
+ " tokenize=True,\n",
112
+ " add_generation_prompt=True,\n",
113
+ " return_dict=True,\n",
114
+ " return_tensors=\"pt\",\n",
115
+ " truncation=True,\n",
116
+ " max_length=MAX_INPUT_TOKEN_LENGTH\n",
117
+ " ).to(device)\n",
118
+ "\n",
119
+ " # Step 4: use streamer to collect output\n",
120
+ " from transformers import TextIteratorStreamer\n",
121
+ " streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)\n",
122
+ " gen_kwargs = {\n",
123
+ " **inputs,\n",
124
+ " \"streamer\": streamer,\n",
125
+ " \"max_new_tokens\": 1024,\n",
126
+ " \"do_sample\": True,\n",
127
+ " \"temperature\": 0.7,\n",
128
+ " }\n",
129
+ " thread = Thread(target=model.generate, kwargs=gen_kwargs)\n",
130
+ " thread.start()\n",
131
+ "\n",
132
+ " # collect all tokens\n",
133
+ " buffer = \"\"\n",
134
+ " for chunk in streamer:\n",
135
+ " buffer += chunk.replace(\"<|im_end|>\", \"\")\n",
136
+ " time.sleep(0.01)\n",
137
+ "\n",
138
+ " # return full concatenated response\n",
139
+ " return buffer\n",
140
+ "\n",
141
+ "\n",
142
+ "def launch_app():\n",
143
+ " demo = gr.Interface(\n",
144
+ " fn=generate,\n",
145
+ " inputs=gr.Video(label=\"Upload Video\"),\n",
146
+ " outputs=gr.Textbox(label=\"Video Caption\"),\n",
147
+ " title=\"Video Understanding with Inkscope-Captions-2B-0526\",\n",
148
+ " description=\"Upload a video and get an OCR-based description of its frames.\",\n",
149
+ " allow_flagging=\"never\"\n",
150
+ " )\n",
151
+ " demo.queue().launch(debug=True)\n",
152
+ "\n",
153
+ "\n",
154
+ "if __name__ == \"__main__\":\n",
155
+ " launch_app()"
156
+ ],
157
+ "metadata": {
158
+ "id": "GZXqC00zLbS1"
159
+ },
160
+ "execution_count": null,
161
+ "outputs": []
162
+ }
163
+ ]
164
+ }
Inkscope-Captions-2B-0526/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
Inkscope-Captions-2B-0526/README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![9.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/-1klnWCRiSPeNT25L4fCI.png)
2
+
3
+ # **Inkscope-Captions-2B-0526**
4
+
5
+ > The **Inkscope-Captions-2B-0526** model is a fine-tuned version of *Qwen2-VL-2B-Instruct*, optimized for **image captioning**, **vision-language understanding**, and **English-language caption generation**. This model was fine-tuned on the `conceptual-captions-cc12m-llavanext` dataset (first 30k entries) to generate **detailed, high-quality captions** for images, including complex or abstract scenes.
6
+
7
+ > [!note]
8
+ Colab Demo : https://huggingface.co/prithivMLmods/Inkscope-Captions-2B-0526/blob/main/Inkscope%20Captions%202B%200526%20Demo/Inkscope-Captions-2B-0526.ipynb
9
+
10
+ > [!note]
11
+ Video Understanding Demo : https://huggingface.co/prithivMLmods/Inkscope-Captions-2B-0526/blob/main/Inkscope-Captions-2B-0526-Video-Understanding/Inkscope-Captions-2B-0526-Video-Understanding.ipynb
12
+ ---
13
+
14
+ #### Key Enhancements:
15
+
16
+ * **High-Quality Visual Captioning**: Generates **rich and descriptive captions** from diverse visual inputs, including abstract, real-world, and complex images.
17
+
18
+ * **Fine-Tuned on CC12M Subset**: Trained using the **first 30k entries** of the *Conceptual Captions 12M (CC12M)* dataset with the **LLaVA-Next formatting**, ensuring alignment with instruction-tuned captioning.
19
+
20
+ * **Multimodal Understanding**: Supports detailed understanding of **text+image combinations**, ideal for **caption generation**, **scene understanding**, and **instruction-based vision-language tasks**.
21
+
22
+ * **Multilingual Recognition**: While focused on English captioning, the model can recognize text in various languages present in the image.
23
+
24
+ * **Strong Foundation Model**: Built on *Qwen2-VL-2B-Instruct*, offering powerful visual-linguistic reasoning, OCR capability, and flexible prompt handling.
25
+
26
+ ---
27
+
28
+ ### How to Use
29
+
30
+ ```python
31
+ from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
32
+ from qwen_vl_utils import process_vision_info
33
+
34
+ # Load the fine-tuned model
35
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
36
+ "prithivMLmods/Inkscope-Captions-2B-0526", torch_dtype="auto", device_map="auto"
37
+ )
38
+
39
+ # Load processor
40
+ processor = AutoProcessor.from_pretrained("prithivMLmods/Inkscope-Captions-2B-0526")
41
+
42
+ # Sample input message with an image
43
+ messages = [
44
+ {
45
+ "role": "user",
46
+ "content": [
47
+ {
48
+ "type": "image",
49
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
50
+ },
51
+ {"type": "text", "text": "Generate a detailed caption for this image."},
52
+ ],
53
+ }
54
+ ]
55
+
56
+ # Preprocess input
57
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
58
+ image_inputs, video_inputs = process_vision_info(messages)
59
+ inputs = processor(
60
+ text=[text],
61
+ images=image_inputs,
62
+ videos=video_inputs,
63
+ padding=True,
64
+ return_tensors="pt",
65
+ ).to("cuda")
66
+
67
+ # Generate output
68
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
69
+ generated_ids_trimmed = [
70
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
71
+ ]
72
+ output_text = processor.batch_decode(
73
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
74
+ )
75
+ print(output_text)
76
+ ```
77
+
78
+ ---
79
+
80
+ ### Buffering Output (Optional for streaming inference)
81
+
82
+ ```python
83
+ buffer = ""
84
+ for new_text in streamer:
85
+ buffer += new_text
86
+ buffer = buffer.replace("<|im_end|>", "")
87
+ yield buffer
88
+ ```
89
+
90
+ ---
91
+
92
+ ### **Demo Inference**
93
+
94
+ ![Screenshot 2025-05-27 at 03-59-36 Gradio.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/ykPB8Yxk0Z_1WDmSoCjKD.png)
95
+ ![Screenshot 2025-05-27 at 03-59-53 (anonymous) - output_8dc4ad31-403a-4f59-a483-be2aec11b756.pdf.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/tBPdM1iyRf8Fi12urNUbt.png)
96
+
97
+ ---
98
+
99
+ ### **Video Inference**
100
+
101
+ ![Screenshot 2025-05-27 at 20-35-30 Video Understanding with Inkscope-Captions-2B-0526.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/LrHNNYV1elysHjAmzOXw3.png)
102
+
103
+ ---
104
+
105
+ ### **Key Features**
106
+
107
+ 1. **Caption Generation from Images:**
108
+
109
+ * Transforms visual scenes into **detailed, human-like descriptions**.
110
+
111
+ 2. **Conceptual Reasoning:**
112
+
113
+ * Captures abstract or high-level elements from images, including **emotion, action, or scene context**.
114
+
115
+ 3. **Multi-modal Prompting:**
116
+
117
+ * Accepts both **image and text** input for **instruction-tuned** caption generation.
118
+
119
+ 4. **Flexible Output Format:**
120
+
121
+ * Generates output in **natural language**, ideal for storytelling, accessibility tools, and educational applications.
122
+
123
+ 5. **Instruction-Tuned**:
124
+
125
+ * Fine-tuned with **LLaVA-Next style prompts**, making it suitable for interactive use and vision-language agents.
126
+
127
+ ---
128
+
129
+ ## **Intended Use**
130
+
131
+ **Inkscope-Captions-2B-0526** is designed for the following applications:
132
+
133
+ * **Image Captioning** for web-scale datasets, social media analysis, and generative applications.
134
+ * **Accessibility Tools**: Helping visually impaired users understand image content through text.
135
+ * **Content Tagging and Metadata Generation** for media, digital assets, and educational material.
136
+ * **AI Companions and Tutors** that need to explain or describe visuals in a conversational setting.
137
+ * **Instruction-following Vision-Language Tasks**, such as zero-shot VQA, scene description, and multimodal storytelling.
Inkscope-Captions-2B-0526/app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
4
+ from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import os
8
+ import uuid
9
+ import io
10
+ from threading import Thread
11
+ from reportlab.lib.pagesizes import A4
12
+ from reportlab.lib.styles import getSampleStyleSheet
13
+ from reportlab.lib import colors
14
+ from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer
15
+ from reportlab.lib.units import inch
16
+ from reportlab.pdfbase import pdfmetrics
17
+ from reportlab.pdfbase.ttfonts import TTFont
18
+ import docx
19
+ from docx.enum.text import WD_ALIGN_PARAGRAPH
20
+
21
+ # Define model options
22
+ MODEL_OPTIONS = {
23
+ "Inkscope-Captions-2B-0526": "prithivMLmods/Inkscope-Captions-2B-0526",
24
+ }
25
+
26
+ # Preload models and processors into CUDA
27
+ models = {}
28
+ processors = {}
29
+ for name, model_id in MODEL_OPTIONS.items():
30
+ print(f"Loading {name}...")
31
+ models[name] = Qwen2VLForConditionalGeneration.from_pretrained(
32
+ model_id,
33
+ trust_remote_code=True,
34
+ torch_dtype=torch.float16
35
+ ).to("cuda").eval()
36
+ processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
37
+
38
+ image_extensions = Image.registered_extensions()
39
+
40
+ def identify_and_save_blob(blob_path):
41
+ """Identifies if the blob is an image and saves it."""
42
+ try:
43
+ with open(blob_path, 'rb') as file:
44
+ blob_content = file.read()
45
+ try:
46
+ Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image
47
+ extension = ".png" # Default to PNG for saving
48
+ media_type = "image"
49
+ except (IOError, SyntaxError):
50
+ raise ValueError("Unsupported media type. Please upload a valid image.")
51
+
52
+ filename = f"temp_{uuid.uuid4()}_media{extension}"
53
+ with open(filename, "wb") as f:
54
+ f.write(blob_content)
55
+
56
+ return filename, media_type
57
+
58
+ except FileNotFoundError:
59
+ raise ValueError(f"The file {blob_path} was not found.")
60
+ except Exception as e:
61
+ raise ValueError(f"An error occurred while processing the file: {e}")
62
+
63
+ @spaces.GPU
64
+ def qwen_inference(model_name, media_input, text_input=None):
65
+ """Handles inference for the selected model."""
66
+ model = models[model_name]
67
+ processor = processors[model_name]
68
+
69
+ if isinstance(media_input, str):
70
+ media_path = media_input
71
+ if media_path.endswith(tuple([i for i in image_extensions.keys()])):
72
+ media_type = "image"
73
+ else:
74
+ try:
75
+ media_path, media_type = identify_and_save_blob(media_input)
76
+ except Exception as e:
77
+ raise ValueError("Unsupported media type. Please upload a valid image.")
78
+
79
+ messages = [
80
+ {
81
+ "role": "user",
82
+ "content": [
83
+ {
84
+ "type": media_type,
85
+ media_type: media_path
86
+ },
87
+ {"type": "text", "text": text_input},
88
+ ],
89
+ }
90
+ ]
91
+
92
+ text = processor.apply_chat_template(
93
+ messages, tokenize=False, add_generation_prompt=True
94
+ )
95
+ image_inputs, _ = process_vision_info(messages)
96
+ inputs = processor(
97
+ text=[text],
98
+ images=image_inputs,
99
+ padding=True,
100
+ return_tensors="pt",
101
+ ).to("cuda")
102
+
103
+ streamer = TextIteratorStreamer(
104
+ processor.tokenizer, skip_prompt=True, skip_special_tokens=True
105
+ )
106
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
107
+
108
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
109
+ thread.start()
110
+
111
+ buffer = ""
112
+ for new_text in streamer:
113
+ buffer += new_text
114
+ # Remove <|im_end|> or similar tokens from the output
115
+ buffer = buffer.replace("<|im_end|>", "")
116
+ yield buffer
117
+
118
+ def format_plain_text(output_text):
119
+ """Formats the output text as plain text without LaTeX delimiters."""
120
+ # Remove LaTeX delimiters and convert to plain text
121
+ plain_text = output_text.replace("\\(", "").replace("\\)", "").replace("\\[", "").replace("\\]", "")
122
+ return plain_text
123
+
124
+ def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):
125
+ """Generates a document with the input image and plain text output."""
126
+ plain_text = format_plain_text(output_text)
127
+ if file_format == "pdf":
128
+ return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)
129
+ elif file_format == "docx":
130
+ return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)
131
+
132
+ def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):
133
+ """Generates a PDF document."""
134
+ filename = f"output_{uuid.uuid4()}.pdf"
135
+ doc = SimpleDocTemplate(
136
+ filename,
137
+ pagesize=A4,
138
+ rightMargin=inch,
139
+ leftMargin=inch,
140
+ topMargin=inch,
141
+ bottomMargin=inch
142
+ )
143
+ styles = getSampleStyleSheet()
144
+ styles["Normal"].fontSize = int(font_size)
145
+ styles["Normal"].leading = int(font_size) * line_spacing
146
+ styles["Normal"].alignment = {
147
+ "Left": 0,
148
+ "Center": 1,
149
+ "Right": 2,
150
+ "Justified": 4
151
+ }[alignment]
152
+
153
+ story = []
154
+
155
+ # Add image with size adjustment
156
+ image_sizes = {
157
+ "Small": (200, 200),
158
+ "Medium": (400, 400),
159
+ "Large": (600, 600)
160
+ }
161
+ img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])
162
+ story.append(img)
163
+ story.append(Spacer(1, 12))
164
+
165
+ # Add plain text output
166
+ text = Paragraph(plain_text, styles["Normal"])
167
+ story.append(text)
168
+
169
+ doc.build(story)
170
+ return filename
171
+
172
+ def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):
173
+ """Generates a DOCX document."""
174
+ filename = f"output_{uuid.uuid4()}.docx"
175
+ doc = docx.Document()
176
+
177
+ # Add image with size adjustment
178
+ image_sizes = {
179
+ "Small": docx.shared.Inches(2),
180
+ "Medium": docx.shared.Inches(4),
181
+ "Large": docx.shared.Inches(6)
182
+ }
183
+ doc.add_picture(media_path, width=image_sizes[image_size])
184
+ doc.add_paragraph()
185
+
186
+ # Add plain text output
187
+ paragraph = doc.add_paragraph()
188
+ paragraph.paragraph_format.line_spacing = line_spacing
189
+ paragraph.paragraph_format.alignment = {
190
+ "Left": WD_ALIGN_PARAGRAPH.LEFT,
191
+ "Center": WD_ALIGN_PARAGRAPH.CENTER,
192
+ "Right": WD_ALIGN_PARAGRAPH.RIGHT,
193
+ "Justified": WD_ALIGN_PARAGRAPH.JUSTIFY
194
+ }[alignment]
195
+ run = paragraph.add_run(plain_text)
196
+ run.font.size = docx.shared.Pt(int(font_size))
197
+
198
+ doc.save(filename)
199
+ return filename
200
+
201
+ # CSS for output styling
202
+ css = """
203
+ #output {
204
+ height: 500px;
205
+ overflow: auto;
206
+ border: 1px solid #ccc;
207
+ }
208
+ .submit-btn {
209
+ background-color: #cf3434 !important;
210
+ color: white !important;
211
+ }
212
+ .submit-btn:hover {
213
+ background-color: #ff2323 !important;
214
+ }
215
+ .download-btn {
216
+ background-color: #35a6d6 !important;
217
+ color: white !important;
218
+ }
219
+ .download-btn:hover {
220
+ background-color: #22bcff !important;
221
+ }
222
+ """
223
+
224
+ # Gradio app setup
225
+ with gr.Blocks(css=css) as demo:
226
+ gr.Markdown("# Inkscope-Captions-2B-0526 : Vision and Language Processing")
227
+
228
+ with gr.Tab(label="Image Input"):
229
+
230
+ with gr.Row():
231
+ with gr.Column():
232
+ model_choice = gr.Dropdown(
233
+ label="Model Selection",
234
+ choices=list(MODEL_OPTIONS.keys()),
235
+ value="Inkscope-Captions-2B-0526"
236
+ )
237
+ input_media = gr.File(
238
+ label="Upload Image", type="filepath"
239
+ )
240
+ text_input = gr.Textbox(label="Question", placeholder="Ask a question about the image...")
241
+ submit_btn = gr.Button(value="Submit", elem_classes="submit-btn")
242
+
243
+ with gr.Column():
244
+ output_text = gr.Textbox(label="Output Text", lines=10)
245
+ plain_text_output = gr.Textbox(label="Standardized Plain Text", lines=10)
246
+
247
+ submit_btn.click(
248
+ qwen_inference, [model_choice, input_media, text_input], [output_text]
249
+ ).then(
250
+ lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]
251
+ )
252
+
253
+ # Add examples directly usable by clicking
254
+ with gr.Row():
255
+ with gr.Column():
256
+ line_spacing = gr.Dropdown(
257
+ choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],
258
+ value=1.5,
259
+ label="Line Spacing"
260
+ )
261
+ font_size = gr.Dropdown(
262
+ choices=["8", "10", "12", "14", "16", "18", "20", "22", "24"],
263
+ value="18",
264
+ label="Font Size"
265
+ )
266
+ alignment = gr.Dropdown(
267
+ choices=["Left", "Center", "Right", "Justified"],
268
+ value="Justified",
269
+ label="Text Alignment"
270
+ )
271
+ image_size = gr.Dropdown(
272
+ choices=["Small", "Medium", "Large"],
273
+ value="Small",
274
+ label="Image Size"
275
+ )
276
+ file_format = gr.Radio(["pdf", "docx"], label="File Format", value="pdf")
277
+ get_document_btn = gr.Button(value="Get Document", elem_classes="download-btn")
278
+
279
+ get_document_btn.click(
280
+ generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label="Download Document")
281
+ )
282
+
283
+ demo.launch(debug=True)
Inkscope-Captions-2B-0526/notebook/Inkscope-Captions-2B-0526.ipynb ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "source": [
22
+ "%%capture\n",
23
+ "!pip install gradio spaces transformers accelerate numpy requests\n",
24
+ "!pip install torch torchvision qwen-vl-utils av ipython reportlab\n",
25
+ "!pip install fpdf python-docx pillow huggingface_hub hf_xet"
26
+ ],
27
+ "metadata": {
28
+ "id": "oDmd1ZObGSel"
29
+ },
30
+ "execution_count": null,
31
+ "outputs": []
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "source": [
36
+ "import gradio as gr\n",
37
+ "import spaces\n",
38
+ "from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer\n",
39
+ "from qwen_vl_utils import process_vision_info\n",
40
+ "import torch\n",
41
+ "from PIL import Image\n",
42
+ "import os\n",
43
+ "import uuid\n",
44
+ "import io\n",
45
+ "from threading import Thread\n",
46
+ "from reportlab.lib.pagesizes import A4\n",
47
+ "from reportlab.lib.styles import getSampleStyleSheet\n",
48
+ "from reportlab.lib import colors\n",
49
+ "from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer\n",
50
+ "from reportlab.lib.units import inch\n",
51
+ "from reportlab.pdfbase import pdfmetrics\n",
52
+ "from reportlab.pdfbase.ttfonts import TTFont\n",
53
+ "import docx\n",
54
+ "from docx.enum.text import WD_ALIGN_PARAGRAPH\n",
55
+ "\n",
56
+ "# Define model options\n",
57
+ "MODEL_OPTIONS = {\n",
58
+ " \"Inkscope-Captions-2B-0526\": \"prithivMLmods/Inkscope-Captions-2B-0526\",\n",
59
+ "}\n",
60
+ "\n",
61
+ "# Preload models and processors into CUDA\n",
62
+ "models = {}\n",
63
+ "processors = {}\n",
64
+ "for name, model_id in MODEL_OPTIONS.items():\n",
65
+ " print(f\"Loading {name}...\")\n",
66
+ " models[name] = Qwen2VLForConditionalGeneration.from_pretrained(\n",
67
+ " model_id,\n",
68
+ " trust_remote_code=True,\n",
69
+ " torch_dtype=torch.float16\n",
70
+ " ).to(\"cuda\").eval()\n",
71
+ " processors[name] = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)\n",
72
+ "\n",
73
+ "image_extensions = Image.registered_extensions()\n",
74
+ "\n",
75
+ "def identify_and_save_blob(blob_path):\n",
76
+ " \"\"\"Identifies if the blob is an image and saves it.\"\"\"\n",
77
+ " try:\n",
78
+ " with open(blob_path, 'rb') as file:\n",
79
+ " blob_content = file.read()\n",
80
+ " try:\n",
81
+ " Image.open(io.BytesIO(blob_content)).verify() # Check if it's a valid image\n",
82
+ " extension = \".png\" # Default to PNG for saving\n",
83
+ " media_type = \"image\"\n",
84
+ " except (IOError, SyntaxError):\n",
85
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
86
+ "\n",
87
+ " filename = f\"temp_{uuid.uuid4()}_media{extension}\"\n",
88
+ " with open(filename, \"wb\") as f:\n",
89
+ " f.write(blob_content)\n",
90
+ "\n",
91
+ " return filename, media_type\n",
92
+ "\n",
93
+ " except FileNotFoundError:\n",
94
+ " raise ValueError(f\"The file {blob_path} was not found.\")\n",
95
+ " except Exception as e:\n",
96
+ " raise ValueError(f\"An error occurred while processing the file: {e}\")\n",
97
+ "\n",
98
+ "@spaces.GPU\n",
99
+ "def qwen_inference(model_name, media_input, text_input=None):\n",
100
+ " \"\"\"Handles inference for the selected model.\"\"\"\n",
101
+ " model = models[model_name]\n",
102
+ " processor = processors[model_name]\n",
103
+ "\n",
104
+ " if isinstance(media_input, str):\n",
105
+ " media_path = media_input\n",
106
+ " if media_path.endswith(tuple([i for i in image_extensions.keys()])):\n",
107
+ " media_type = \"image\"\n",
108
+ " else:\n",
109
+ " try:\n",
110
+ " media_path, media_type = identify_and_save_blob(media_input)\n",
111
+ " except Exception as e:\n",
112
+ " raise ValueError(\"Unsupported media type. Please upload a valid image.\")\n",
113
+ "\n",
114
+ " messages = [\n",
115
+ " {\n",
116
+ " \"role\": \"user\",\n",
117
+ " \"content\": [\n",
118
+ " {\n",
119
+ " \"type\": media_type,\n",
120
+ " media_type: media_path\n",
121
+ " },\n",
122
+ " {\"type\": \"text\", \"text\": text_input},\n",
123
+ " ],\n",
124
+ " }\n",
125
+ " ]\n",
126
+ "\n",
127
+ " text = processor.apply_chat_template(\n",
128
+ " messages, tokenize=False, add_generation_prompt=True\n",
129
+ " )\n",
130
+ " image_inputs, _ = process_vision_info(messages)\n",
131
+ " inputs = processor(\n",
132
+ " text=[text],\n",
133
+ " images=image_inputs,\n",
134
+ " padding=True,\n",
135
+ " return_tensors=\"pt\",\n",
136
+ " ).to(\"cuda\")\n",
137
+ "\n",
138
+ " streamer = TextIteratorStreamer(\n",
139
+ " processor.tokenizer, skip_prompt=True, skip_special_tokens=True\n",
140
+ " )\n",
141
+ " generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)\n",
142
+ "\n",
143
+ " thread = Thread(target=model.generate, kwargs=generation_kwargs)\n",
144
+ " thread.start()\n",
145
+ "\n",
146
+ " buffer = \"\"\n",
147
+ " for new_text in streamer:\n",
148
+ " buffer += new_text\n",
149
+ " # Remove <|im_end|> or similar tokens from the output\n",
150
+ " buffer = buffer.replace(\"<|im_end|>\", \"\")\n",
151
+ " yield buffer\n",
152
+ "\n",
153
+ "def format_plain_text(output_text):\n",
154
+ " \"\"\"Formats the output text as plain text without LaTeX delimiters.\"\"\"\n",
155
+ " # Remove LaTeX delimiters and convert to plain text\n",
156
+ " plain_text = output_text.replace(\"\\\\(\", \"\").replace(\"\\\\)\", \"\").replace(\"\\\\[\", \"\").replace(\"\\\\]\", \"\")\n",
157
+ " return plain_text\n",
158
+ "\n",
159
+ "def generate_document(media_path, output_text, file_format, font_size, line_spacing, alignment, image_size):\n",
160
+ " \"\"\"Generates a document with the input image and plain text output.\"\"\"\n",
161
+ " plain_text = format_plain_text(output_text)\n",
162
+ " if file_format == \"pdf\":\n",
163
+ " return generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
164
+ " elif file_format == \"docx\":\n",
165
+ " return generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size)\n",
166
+ "\n",
167
+ "def generate_pdf(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
168
+ " \"\"\"Generates a PDF document.\"\"\"\n",
169
+ " filename = f\"output_{uuid.uuid4()}.pdf\"\n",
170
+ " doc = SimpleDocTemplate(\n",
171
+ " filename,\n",
172
+ " pagesize=A4,\n",
173
+ " rightMargin=inch,\n",
174
+ " leftMargin=inch,\n",
175
+ " topMargin=inch,\n",
176
+ " bottomMargin=inch\n",
177
+ " )\n",
178
+ " styles = getSampleStyleSheet()\n",
179
+ " styles[\"Normal\"].fontSize = int(font_size)\n",
180
+ " styles[\"Normal\"].leading = int(font_size) * line_spacing\n",
181
+ " styles[\"Normal\"].alignment = {\n",
182
+ " \"Left\": 0,\n",
183
+ " \"Center\": 1,\n",
184
+ " \"Right\": 2,\n",
185
+ " \"Justified\": 4\n",
186
+ " }[alignment]\n",
187
+ "\n",
188
+ " story = []\n",
189
+ "\n",
190
+ " # Add image with size adjustment\n",
191
+ " image_sizes = {\n",
192
+ " \"Small\": (200, 200),\n",
193
+ " \"Medium\": (400, 400),\n",
194
+ " \"Large\": (600, 600)\n",
195
+ " }\n",
196
+ " img = RLImage(media_path, width=image_sizes[image_size][0], height=image_sizes[image_size][1])\n",
197
+ " story.append(img)\n",
198
+ " story.append(Spacer(1, 12))\n",
199
+ "\n",
200
+ " # Add plain text output\n",
201
+ " text = Paragraph(plain_text, styles[\"Normal\"])\n",
202
+ " story.append(text)\n",
203
+ "\n",
204
+ " doc.build(story)\n",
205
+ " return filename\n",
206
+ "\n",
207
+ "def generate_docx(media_path, plain_text, font_size, line_spacing, alignment, image_size):\n",
208
+ " \"\"\"Generates a DOCX document.\"\"\"\n",
209
+ " filename = f\"output_{uuid.uuid4()}.docx\"\n",
210
+ " doc = docx.Document()\n",
211
+ "\n",
212
+ " # Add image with size adjustment\n",
213
+ " image_sizes = {\n",
214
+ " \"Small\": docx.shared.Inches(2),\n",
215
+ " \"Medium\": docx.shared.Inches(4),\n",
216
+ " \"Large\": docx.shared.Inches(6)\n",
217
+ " }\n",
218
+ " doc.add_picture(media_path, width=image_sizes[image_size])\n",
219
+ " doc.add_paragraph()\n",
220
+ "\n",
221
+ " # Add plain text output\n",
222
+ " paragraph = doc.add_paragraph()\n",
223
+ " paragraph.paragraph_format.line_spacing = line_spacing\n",
224
+ " paragraph.paragraph_format.alignment = {\n",
225
+ " \"Left\": WD_ALIGN_PARAGRAPH.LEFT,\n",
226
+ " \"Center\": WD_ALIGN_PARAGRAPH.CENTER,\n",
227
+ " \"Right\": WD_ALIGN_PARAGRAPH.RIGHT,\n",
228
+ " \"Justified\": WD_ALIGN_PARAGRAPH.JUSTIFY\n",
229
+ " }[alignment]\n",
230
+ " run = paragraph.add_run(plain_text)\n",
231
+ " run.font.size = docx.shared.Pt(int(font_size))\n",
232
+ "\n",
233
+ " doc.save(filename)\n",
234
+ " return filename\n",
235
+ "\n",
236
+ "# CSS for output styling\n",
237
+ "css = \"\"\"\n",
238
+ " #output {\n",
239
+ " height: 500px;\n",
240
+ " overflow: auto;\n",
241
+ " border: 1px solid #ccc;\n",
242
+ " }\n",
243
+ ".submit-btn {\n",
244
+ " background-color: #cf3434 !important;\n",
245
+ " color: white !important;\n",
246
+ "}\n",
247
+ ".submit-btn:hover {\n",
248
+ " background-color: #ff2323 !important;\n",
249
+ "}\n",
250
+ ".download-btn {\n",
251
+ " background-color: #35a6d6 !important;\n",
252
+ " color: white !important;\n",
253
+ "}\n",
254
+ ".download-btn:hover {\n",
255
+ " background-color: #22bcff !important;\n",
256
+ "}\n",
257
+ "\"\"\"\n",
258
+ "\n",
259
+ "# Gradio app setup\n",
260
+ "with gr.Blocks(css=css) as demo:\n",
261
+ " gr.Markdown(\"# Inkscope-Captions-2B-0526 : Vision and Language Processing\")\n",
262
+ "\n",
263
+ " with gr.Tab(label=\"Image Input\"):\n",
264
+ "\n",
265
+ " with gr.Row():\n",
266
+ " with gr.Column():\n",
267
+ " model_choice = gr.Dropdown(\n",
268
+ " label=\"Model Selection\",\n",
269
+ " choices=list(MODEL_OPTIONS.keys()),\n",
270
+ " value=\"Inkscope-Captions-2B-0526\"\n",
271
+ " )\n",
272
+ " input_media = gr.File(\n",
273
+ " label=\"Upload Image\", type=\"filepath\"\n",
274
+ " )\n",
275
+ " text_input = gr.Textbox(label=\"Question\", placeholder=\"Ask a question about the image...\")\n",
276
+ " submit_btn = gr.Button(value=\"Submit\", elem_classes=\"submit-btn\")\n",
277
+ "\n",
278
+ " with gr.Column():\n",
279
+ " output_text = gr.Textbox(label=\"Output Text\", lines=10)\n",
280
+ " plain_text_output = gr.Textbox(label=\"Standardized Plain Text\", lines=10)\n",
281
+ "\n",
282
+ " submit_btn.click(\n",
283
+ " qwen_inference, [model_choice, input_media, text_input], [output_text]\n",
284
+ " ).then(\n",
285
+ " lambda output_text: format_plain_text(output_text), [output_text], [plain_text_output]\n",
286
+ " )\n",
287
+ "\n",
288
+ " # Add examples directly usable by clicking\n",
289
+ " with gr.Row():\n",
290
+ " with gr.Column():\n",
291
+ " line_spacing = gr.Dropdown(\n",
292
+ " choices=[0.5, 1.0, 1.15, 1.5, 2.0, 2.5, 3.0],\n",
293
+ " value=1.5,\n",
294
+ " label=\"Line Spacing\"\n",
295
+ " )\n",
296
+ " font_size = gr.Dropdown(\n",
297
+ " choices=[\"8\", \"10\", \"12\", \"14\", \"16\", \"18\", \"20\", \"22\", \"24\"],\n",
298
+ " value=\"18\",\n",
299
+ " label=\"Font Size\"\n",
300
+ " )\n",
301
+ " alignment = gr.Dropdown(\n",
302
+ " choices=[\"Left\", \"Center\", \"Right\", \"Justified\"],\n",
303
+ " value=\"Justified\",\n",
304
+ " label=\"Text Alignment\"\n",
305
+ " )\n",
306
+ " image_size = gr.Dropdown(\n",
307
+ " choices=[\"Small\", \"Medium\", \"Large\"],\n",
308
+ " value=\"Small\",\n",
309
+ " label=\"Image Size\"\n",
310
+ " )\n",
311
+ " file_format = gr.Radio([\"pdf\", \"docx\"], label=\"File Format\", value=\"pdf\")\n",
312
+ " get_document_btn = gr.Button(value=\"Get Document\", elem_classes=\"download-btn\")\n",
313
+ "\n",
314
+ " get_document_btn.click(\n",
315
+ " generate_document, [input_media, output_text, file_format, font_size, line_spacing, alignment, image_size], gr.File(label=\"Download Document\")\n",
316
+ " )\n",
317
+ "\n",
318
+ "demo.launch(debug=True)"
319
+ ],
320
+ "metadata": {
321
+ "id": "ovBSsRFhGbs2"
322
+ },
323
+ "execution_count": null,
324
+ "outputs": []
325
+ }
326
+ ]
327
+ }
Inkscope-Captions-2B-0526/requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ spaces
3
+ transformers
4
+ accelerate
5
+ numpy
6
+ requests
7
+ torch
8
+ torchvision
9
+ qwen-vl-utils
10
+ av
11
+ ipython
12
+ reportlab
13
+ fpdf
14
+ python-docx
15
+ pillow
16
+ huggingface_hub
17
+ hf_xet