Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -863,10 +863,15 @@ hf_token = st.sidebar.text_input("Hugging Face API Token", type="password",
|
|
| 863 |
st.sidebar.subheader("Models")
|
| 864 |
available_models = [
|
| 865 |
"meta-llama/Llama-3.1-8B-Instruct",
|
| 866 |
-
"meta-llama/
|
|
|
|
|
|
|
| 867 |
"google/gemma-2-2b-it",
|
| 868 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
|
|
| 869 |
"microsoft/Phi-3-mini-4k-instruct",
|
|
|
|
|
|
|
| 870 |
# Add more models as needed
|
| 871 |
]
|
| 872 |
|
|
|
|
| 863 |
st.sidebar.subheader("Models")
|
| 864 |
available_models = [
|
| 865 |
"meta-llama/Llama-3.1-8B-Instruct",
|
| 866 |
+
"meta-llama/Llama-3.3-70B-Instruct",
|
| 867 |
+
"meta-llama/Llama-3.1-70B-Instruct",
|
| 868 |
+
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
| 869 |
"google/gemma-2-2b-it",
|
| 870 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 871 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 872 |
"microsoft/Phi-3-mini-4k-instruct",
|
| 873 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 874 |
+
"Qwen/QwQ-32B-Preview"
|
| 875 |
# Add more models as needed
|
| 876 |
]
|
| 877 |
|