wasertech commited on
Commit
f6c46f3
·
verified ·
1 Parent(s): 8f34b7d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -2
README.md CHANGED
@@ -73,8 +73,16 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
73
  max_tokens = 8096
74
 
75
  print("Loading...")
76
- model = AutoModelForCausalLM.from_pretrained("wasertech/assistant-dolphin-2.2.1-mistral-7b-e1-qlora", quantization_config=BitsAndBytesConfig(load_in_4bit=True), torch_dtype="auto")
77
- tokenizer = AutoTokenizer.from_pretrained("wasertech/assistant-dolphin-2.2.1-mistral-7b-e1-qlora", torch_dtype="auto")
 
 
 
 
 
 
 
 
78
 
79
  pipe = pipeline(
80
  "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=max_tokens, trust_remote_code=True
 
73
  max_tokens = 8096
74
 
75
  print("Loading...")
76
+ model = AutoModelForCausalLM.from_pretrained(
77
+ "wasertech/assistant-dolphin-2.2.1-mistral-7b-e1-qlora",
78
+ quantization_config=BitsAndBytesConfig(load_in_4bit=True),
79
+ torch_dtype="auto"
80
+ )
81
+ tokenizer = AutoTokenizer.from_pretrained(
82
+ "wasertech/assistant-dolphin-2.2.1-mistral-7b-e1-qlora",
83
+ torch_dtype="auto"
84
+ )
85
+
86
 
87
  pipe = pipeline(
88
  "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=max_tokens, trust_remote_code=True