Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -865,14 +865,11 @@ hf_token = st.sidebar.text_input("Hugging Face API Token", type="password",
|
|
| 865 |
# Model selection
|
| 866 |
st.sidebar.subheader("Models")
|
| 867 |
available_models = [
|
| 868 |
-
"meta-llama/Llama-3.1-8B-Instruct",
|
| 869 |
"meta-llama/Llama-3.3-70B-Instruct",
|
| 870 |
"meta-llama/Llama-3.1-70B-Instruct",
|
| 871 |
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
| 872 |
-
"google/gemma-2-2b-it",
|
| 873 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 874 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 875 |
-
"microsoft/Phi-3-mini-4k-instruct",
|
| 876 |
"Qwen/Qwen2.5-72B-Instruct",
|
| 877 |
"Qwen/QwQ-32B-Preview"
|
| 878 |
# Add more models as needed
|
|
|
|
| 865 |
# Model selection
|
| 866 |
st.sidebar.subheader("Models")
|
| 867 |
available_models = [
|
|
|
|
| 868 |
"meta-llama/Llama-3.3-70B-Instruct",
|
| 869 |
"meta-llama/Llama-3.1-70B-Instruct",
|
| 870 |
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
|
|
|
|
| 871 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 872 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
|
|
| 873 |
"Qwen/Qwen2.5-72B-Instruct",
|
| 874 |
"Qwen/QwQ-32B-Preview"
|
| 875 |
# Add more models as needed
|