Update README.md
Browse files
README.md
CHANGED
@@ -4,16 +4,20 @@ base_model:
|
|
4 |
- OpenGVLab/InternViT-300M-448px
|
5 |
---
|
6 |
|
|
|
|
|
|
|
|
|
7 |
- Repository: https://github.com/OpenGVLab/OmniCorpus
|
8 |
-
- Paper: https://arxiv.org/abs/2406.08418
|
9 |
|
10 |
# Citation
|
11 |
|
12 |
```
|
13 |
-
@
|
14 |
title={OmniCorpus: A Unified Multimodal Corpus of 10 Billion-Level Images Interleaved with Text},
|
15 |
author={Li, Qingyun and Chen, Zhe and Wang, Weiyun and Wang, Wenhai and Ye, Shenglong and Jin, Zhenjiang and others},
|
16 |
-
|
17 |
-
year={
|
18 |
}
|
19 |
```
|
|
|
4 |
- OpenGVLab/InternViT-300M-448px
|
5 |
---
|
6 |
|
7 |
+
<p align="center">
|
8 |
+
<h1 align="center">🐳 OmniCorpus: A Unified Multimodal Corpus of 10 Billion-Level Images Interleaved with Text</h1>
|
9 |
+
</p>
|
10 |
+
|
11 |
- Repository: https://github.com/OpenGVLab/OmniCorpus
|
12 |
+
- Paper (ICLR 2025 Spotlight): https://arxiv.org/abs/2406.08418
|
13 |
|
14 |
# Citation
|
15 |
|
16 |
```
|
17 |
+
@inproceedings{li2024omnicorpus,
|
18 |
title={OmniCorpus: A Unified Multimodal Corpus of 10 Billion-Level Images Interleaved with Text},
|
19 |
author={Li, Qingyun and Chen, Zhe and Wang, Weiyun and Wang, Wenhai and Ye, Shenglong and Jin, Zhenjiang and others},
|
20 |
+
booktitle={The Thirteenth International Conference on Learning Representations},
|
21 |
+
year={2025}
|
22 |
}
|
23 |
```
|