update
Browse files
README.md
CHANGED
@@ -1,3 +1,34 @@
|
|
1 |
---
|
|
|
|
|
2 |
license: apache-2.0
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- zh
|
4 |
license: apache-2.0
|
5 |
+
tasks:
|
6 |
+
- text-generation
|
7 |
---
|
8 |
+
|
9 |
+
<!-- markdownlint-disable first-line-h1 -->
|
10 |
+
<!-- markdownlint-disable html -->
|
11 |
+
<div align="center">
|
12 |
+
<h1>
|
13 |
+
HuatuoGPT2-34B-8bits
|
14 |
+
</h1>
|
15 |
+
</div>
|
16 |
+
|
17 |
+
<div align="center">
|
18 |
+
<a href="https://github.com/FreedomIntelligence/HuatuoGPT-II" target="_blank">GitHub</a> | <a href="https://arxiv.org/pdf/2311.09774.pdf" target="_blank">Our Paper</a>
|
19 |
+
</div>
|
20 |
+
|
21 |
+
# <span id="Start">Quick Start</span>
|
22 |
+
|
23 |
+
```Python
|
24 |
+
import torch
|
25 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
26 |
+
from transformers.generation.utils import GenerationConfig
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/HuatuoGPT2-34B-8bits", use_fast=True, trust_remote_code=True)
|
28 |
+
model = AutoModelForCausalLM.from_pretrained("FreedomIntelligence/HuatuoGPT2-34B-8bits", device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
29 |
+
model.generation_config = GenerationConfig.from_pretrained("FreedomIntelligence/HuatuoGPT2-34B-8bits")
|
30 |
+
messages = []
|
31 |
+
messages.append({"role": "user", "content": "肚子疼怎么办?"})
|
32 |
+
response = model.HuatuoChat(tokenizer, messages)
|
33 |
+
print(response)
|
34 |
+
```
|