File size: 714 Bytes
8cc1ced fbac131 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced c8b09fd 8cc1ced fbac131 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
---
license: openrail
datasets:
- newsmediabias/debiased_dataset
language:
- en
---
```
from transformers import AutoTokenizer
import transformers
import torch
model = "newsmediabias/UnBIAS-LLama2-Debiaser-Chat-QLoRA"
tokenizer = AutoTokenizer.from_pretrained(model)
pipeline = transformers.pipeline(
"text-generation",
model=model,
torch_dtype=torch.float16,
device_map="auto",
)
sys_message = "Task:""
prompt=""
intput_text=""
sequences = pipeline(
intput_text,
do_sample=True,
top_k=10,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
max_length=len(prompt)+100,
)
res=sequences[0]['generated_text']
```
|