Update README.md
Browse files
README.md
CHANGED
@@ -30,7 +30,7 @@ Falcon-1B-Mental-Health-Advisor is a fine-tuned version of the tiiuae/falcon-rw-
|
|
30 |
# Important Note
|
31 |
Mental Health is a sensitive topic. Preferably, use the code snippet provided below in order to get optimal results.
|
32 |
|
33 |
-
# Falcon-1B Fine-Tuned for Mental Health (LoRA)
|
34 |
|
35 |
This is a LoRA adapter for the Falcon-RW-1B model. It was fine-tuned on the 'marmikpandya/mental-health' dataset.
|
36 |
|
@@ -52,7 +52,7 @@ import re
|
|
52 |
import language_tool_python
|
53 |
|
54 |
base_model = "tiiuae/falcon-rw-1b"
|
55 |
-
peft_model = "ShivomH/Falcon-1B-
|
56 |
|
57 |
# Load the base model (without LoRA weights initially)
|
58 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
30 |
# Important Note
|
31 |
Mental Health is a sensitive topic. Preferably, use the code snippet provided below in order to get optimal results.
|
32 |
|
33 |
+
# Falcon-RW-1B Fine-Tuned for Mental Health (LoRA)
|
34 |
|
35 |
This is a LoRA adapter for the Falcon-RW-1B model. It was fine-tuned on the 'marmikpandya/mental-health' dataset.
|
36 |
|
|
|
52 |
import language_tool_python
|
53 |
|
54 |
base_model = "tiiuae/falcon-rw-1b"
|
55 |
+
peft_model = "ShivomH/Falcon-1B-Mental-Health-v1"
|
56 |
|
57 |
# Load the base model (without LoRA weights initially)
|
58 |
model = AutoModelForCausalLM.from_pretrained(
|