Update README.md
Browse files
README.md
CHANGED
@@ -39,16 +39,16 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
39 |
from peft import PeftModel
|
40 |
import torch
|
41 |
|
42 |
-
|
43 |
|
44 |
base_model_id = "llm-jp/llm-jp-3-13b"
|
45 |
adapter_model_path = ""/path/to/adapter_model.safetensors""
|
46 |
|
47 |
-
|
48 |
|
49 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
50 |
|
51 |
-
|
52 |
|
53 |
tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
|
54 |
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, torch_dtype=torch.float16).to(device)
|
|
|
39 |
from peft import PeftModel
|
40 |
import torch
|
41 |
|
42 |
+
#ベースモデル ID とアダプタファイルパス
|
43 |
|
44 |
base_model_id = "llm-jp/llm-jp-3-13b"
|
45 |
adapter_model_path = ""/path/to/adapter_model.safetensors""
|
46 |
|
47 |
+
#デバイス設定
|
48 |
|
49 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
50 |
|
51 |
+
#トークナイザーとベースモデルのロード
|
52 |
|
53 |
tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
|
54 |
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, torch_dtype=torch.float16).to(device)
|