Update README.md
Browse files
    	
        README.md
    CHANGED
    
    | @@ -7,9 +7,9 @@ base_model_relation: finetune | |
| 7 |  | 
| 8 | 
             
            # InternViT-6B-448px-V2_5
         | 
| 9 |  | 
| 10 | 
            -
            [\[π GitHub\]](https://github.com/OpenGVLab/InternVL)  [\[ | 
| 11 |  | 
| 12 | 
            -
            [\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/)  [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL)  [\[π Quick Start\]](#quick-start)  [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
         | 
| 13 |  | 
| 14 | 
             
            <div align="center">
         | 
| 15 | 
             
              <img width="500" alt="image" src="https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/zJsd2hqd3EevgXo6fNgC-.png">
         | 
| @@ -115,6 +115,12 @@ This project is released under the MIT License. | |
| 115 | 
             
            If you find this project useful in your research, please consider citing:
         | 
| 116 |  | 
| 117 | 
             
            ```BibTeX
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 118 | 
             
            @article{gao2024mini,
         | 
| 119 | 
             
              title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
         | 
| 120 | 
             
              author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
         | 
| @@ -127,10 +133,11 @@ If you find this project useful in your research, please consider citing: | |
| 127 | 
             
              journal={arXiv preprint arXiv:2404.16821},
         | 
| 128 | 
             
              year={2024}
         | 
| 129 | 
             
            }
         | 
| 130 | 
            -
            @ | 
| 131 | 
            -
              title={ | 
| 132 | 
            -
              author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and  | 
| 133 | 
            -
               | 
| 134 | 
            -
               | 
|  | |
| 135 | 
             
            }
         | 
| 136 | 
             
            ```
         | 
|  | |
| 7 |  | 
| 8 | 
             
            # InternViT-6B-448px-V2_5
         | 
| 9 |  | 
| 10 | 
            +
            [\[π GitHub\]](https://github.com/OpenGVLab/InternVL)  [\[π InternVL 1.0\]](https://huggingface.co/papers/2312.14238)  [\[π InternVL 1.5\]](https://huggingface.co/papers/2404.16821)  [\[π Mini-InternVL\]](https://arxiv.org/abs/2410.16261)  [\[π InternVL 2.5\]](https://huggingface.co/papers/2412.05271)
         | 
| 11 |  | 
| 12 | 
            +
            [\[π Blog\]](https://internvl.github.io/blog/)  [\[π¨οΈ Chat Demo\]](https://internvl.opengvlab.com/)  [\[π€ HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL)  [\[π Quick Start\]](#quick-start)  [\[π Documents\]](https://internvl.readthedocs.io/en/latest/)
         | 
| 13 |  | 
| 14 | 
             
            <div align="center">
         | 
| 15 | 
             
              <img width="500" alt="image" src="https://cdn-uploads.huggingface.co/production/uploads/64006c09330a45b03605bba3/zJsd2hqd3EevgXo6fNgC-.png">
         | 
|  | |
| 115 | 
             
            If you find this project useful in your research, please consider citing:
         | 
| 116 |  | 
| 117 | 
             
            ```BibTeX
         | 
| 118 | 
            +
            @article{chen2024expanding,
         | 
| 119 | 
            +
              title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
         | 
| 120 | 
            +
              author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
         | 
| 121 | 
            +
              journal={arXiv preprint arXiv:2412.05271},
         | 
| 122 | 
            +
              year={2024}
         | 
| 123 | 
            +
            }
         | 
| 124 | 
             
            @article{gao2024mini,
         | 
| 125 | 
             
              title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
         | 
| 126 | 
             
              author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
         | 
|  | |
| 133 | 
             
              journal={arXiv preprint arXiv:2404.16821},
         | 
| 134 | 
             
              year={2024}
         | 
| 135 | 
             
            }
         | 
| 136 | 
            +
            @inproceedings{chen2024internvl,
         | 
| 137 | 
            +
              title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
         | 
| 138 | 
            +
              author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
         | 
| 139 | 
            +
              booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
         | 
| 140 | 
            +
              pages={24185--24198},
         | 
| 141 | 
            +
              year={2024}
         | 
| 142 | 
             
            }
         | 
| 143 | 
             
            ```
         | 
