Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,14 +8,12 @@ import torch
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
-
# Gemma 2
|
| 12 |
|
| 13 |
-
Fine-tuned version of
|
| 14 |
-
|
| 15 |
|
| 16 |
-
[๐ชช **Model card**](https://huggingface.co/anakin87/gemma-2-
|
| 17 |
-
|
| 18 |
-
[๐ **Kaggle notebook**](https://www.kaggle.com/code/anakin87/post-training-gemma-for-italian-and-beyond) - Learn how this model was trained.
|
| 19 |
"""
|
| 20 |
|
| 21 |
MAX_MAX_NEW_TOKENS = 2048
|
|
@@ -24,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
| 24 |
|
| 25 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 26 |
|
| 27 |
-
model_id = "anakin87/gemma-2-
|
| 28 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 29 |
model = AutoModelForCausalLM.from_pretrained(
|
| 30 |
model_id,
|
|
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
+
# Gemma 2 9B Neogenesis ITA ๐๐๐ฎ๐น
|
| 12 |
|
| 13 |
+
Fine-tuned version of VAGOsolutions/SauerkrautLM-gemma-2-9b-it to improve the performance on the Italian language.
|
| 14 |
+
Good model with 9.24 billion parameters, with 8k context length.
|
| 15 |
|
| 16 |
+
[๐ชช **Model card**](https://huggingface.co/anakin87/gemma-2-9b-neogenesis-ita)
|
|
|
|
|
|
|
| 17 |
"""
|
| 18 |
|
| 19 |
MAX_MAX_NEW_TOKENS = 2048
|
|
|
|
| 22 |
|
| 23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 24 |
|
| 25 |
+
model_id = "anakin87/gemma-2-9b-neogenesis-ita"
|
| 26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 27 |
model = AutoModelForCausalLM.from_pretrained(
|
| 28 |
model_id,
|