Update README.md
Browse files
README.md
CHANGED
@@ -37,7 +37,6 @@ if torch.cuda.get_device_capability()[0] >= 8:
|
|
37 |
!pip install --no-deps packaging ninja einops "flash-attn>=2.6.3"
|
38 |
|
39 |
# Settings for HuggingFace
|
40 |
-
|
41 |
from google.colab import userdata
|
42 |
from huggingface_hub import login
|
43 |
|
@@ -53,7 +52,6 @@ print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
|
|
53 |
print(f"{start_gpu_memory} GB of memory reserved.")
|
54 |
|
55 |
# Load a model
|
56 |
-
|
57 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
58 |
from unsloth import FastLanguageModel
|
59 |
import torch
|
@@ -271,4 +269,7 @@ model.push_to_hub_merged(
|
|
271 |
# model.push_to_hub(new_model_id, token=HF_TOKEN, private=True) # Online saving
|
272 |
# tokenizer.push_to_hub(new_model_id, token=HF_TOKEN) # Online saving
|
273 |
|
274 |
-
```
|
|
|
|
|
|
|
|
37 |
!pip install --no-deps packaging ninja einops "flash-attn>=2.6.3"
|
38 |
|
39 |
# Settings for HuggingFace
|
|
|
40 |
from google.colab import userdata
|
41 |
from huggingface_hub import login
|
42 |
|
|
|
52 |
print(f"{start_gpu_memory} GB of memory reserved.")
|
53 |
|
54 |
# Load a model
|
|
|
55 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
56 |
from unsloth import FastLanguageModel
|
57 |
import torch
|
|
|
269 |
# model.push_to_hub(new_model_id, token=HF_TOKEN, private=True) # Online saving
|
270 |
# tokenizer.push_to_hub(new_model_id, token=HF_TOKEN) # Online saving
|
271 |
|
272 |
+
```
|
273 |
+
|
274 |
+
# Datasets
|
275 |
+
- ichikara-instruction-003-001-1
|