CARE
Collection
10 items
•
Updated
•
2
care-arabic-gemma2-9b
is based on google/gemma-2-9b-it
and further fine-tuned on our CARE, enhancing cultural awareness especially in Arabic.
>>> from vllm import LLM, SamplingParams
>>> from transformers import AutoTokenizer
>>> import torch
>>> model = LLM(model="geyang627/care-arabic-gemma2-9b", tensor_parallel_size=torch.cuda.device_count(), dtype="auto", trust_remote_code=True, max_model_len=2048)
>>> tokenizer = AutoTokenizer.from_pretrained("geyang627/care-arabic-gemma2-9b", use_fast=False, trust_remote_code=True)
>>> if tokenizer.pad_token is None:
>>> tokenizer.pad_token = tokenizer.eos_token
>>> tokenizer.pad_token_id = tokenizer.eos_token_id
>>> sampling_params = SamplingParams(temperature=0.7, top_p=1.0, max_tokens=256)
>>> outputs = model.generate(["为什么中国人不喜欢数字4?"], sampling_params)
>>> print(outputs[0].outputs[0].text)