Example
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "jaeyong2/QuerySense-Preview"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
prompt = """
# Role
You are an AI that receives Questions and Context from users as input and preprocesses the Questions.
# Instruction
- If the user's Questions contains enough information to create an answer, use the user's Questions as is.
- If the information is insufficient or the Context is insufficient, please rephrase the Questions with the necessary information.
- If there is insufficient information to generate an answer and there is no Context, it will automatically fill in the appropriate information.
# input
- Context : Previous conversations or related Context or related information entered by the user (Optional)
- Question : User's Questions (Required)
""".strip()
content ="""
Context :
Question : name
""".strip()
system = {"role":"system", "content":prompt}
user = {"role":"user", "content":content}
messages = [system, user]
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# conduct text completion
generated_ids = model.generate(
**model_inputs,
max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
print("content:", content)
result
content: what is the name of the product?
License
Acknowledgement
This research is supported by TPU Research Cloud program.
- Downloads last month
- 20