Beginner's Guide?
def _create_chat_completion(llm, system_content,user_content, max_tokens, temperature, top_p, top_k):
messages = [
{"role": "system", "content": f"{system_content}"},
{"role": "user", "content": f"{user_content}"},
]
outputs = llm.create_chat_completion(messages, max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k)
outputs = outputs["choices"][0]["message"]["content"]
return outputs
def _create_completion(llm, system_content,user_content, max_tokens, temperature, top_p, top_k):
messages = "<|system|>\n{system_content}</s>\n<|user|>\n{user_content}</s>\n<|assistant|>"
outputs = llm.create_completion(messages, max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k)
outputs = outputs["choices"][0]["text"]
return outputs
how to use this model?
These are GGUF files for use with llama.cpp - it is not the original model , nor did I create it. Kindly follow the link to the original model in the model card and ask the creator of the model. Good luck!