Telugu-LLM-Labs
commited on
Commit
•
375bb58
1
Parent(s):
bdf7ec3
Update README.md
Browse files
README.md
CHANGED
@@ -28,7 +28,7 @@ The model is finetuned only on native telugu SFT data from above datasets and we
|
|
28 |
## Response: {response}
|
29 |
```
|
30 |
|
31 |
-
#
|
32 |
|
33 |
```python3
|
34 |
from unsloth import FastLanguageModel
|
@@ -67,7 +67,7 @@ outputs = model.generate(**inputs, max_new_tokens = 300, use_cache = True)
|
|
67 |
response = tokenizer.batch_decode(outputs)
|
68 |
```
|
69 |
|
70 |
-
#
|
71 |
|
72 |
```python3
|
73 |
from peft import AutoPeftModelForCausalLM
|
|
|
28 |
## Response: {response}
|
29 |
```
|
30 |
|
31 |
+
# Inference with Unsloth
|
32 |
|
33 |
```python3
|
34 |
from unsloth import FastLanguageModel
|
|
|
67 |
response = tokenizer.batch_decode(outputs)
|
68 |
```
|
69 |
|
70 |
+
# Inference with HuggingFace
|
71 |
|
72 |
```python3
|
73 |
from peft import AutoPeftModelForCausalLM
|