Spaces:
Sleeping
Sleeping
Translation model incorporates Meta-Llama-3.2-8B-Instruct.
Browse files- app.py +17 -17
- config.json5 +6 -0
app.py
CHANGED
|
@@ -1036,36 +1036,36 @@ def create_ui(app_config: ApplicationConfig):
|
|
| 1036 |
madlad400_models = list(filter(lambda madlad400: "ct2" in madlad400, madlad400_models))
|
| 1037 |
|
| 1038 |
common_whisper_inputs = lambda : {
|
| 1039 |
-
gr.Dropdown(label="Whisper - Model (for audio)", choices=whisper_models, value=app_config.default_model_name if app_config.default_model_name != None else
|
| 1040 |
-
gr.Dropdown(label="Whisper - Language", choices=sorted(get_lang_whisper_names()), value=app_config.language if app_config.language != None else
|
| 1041 |
}
|
| 1042 |
common_m2m100_inputs = lambda : {
|
| 1043 |
-
gr.Dropdown(label="M2M100 - Model (for translate)", choices=m2m100_models, value=
|
| 1044 |
-
gr.Dropdown(label="M2M100 - Language", choices=sorted(get_lang_m2m100_names()), value=
|
| 1045 |
}
|
| 1046 |
common_nllb_inputs = lambda : {
|
| 1047 |
-
gr.Dropdown(label="NLLB - Model (for translate)", choices=nllb_models, value=
|
| 1048 |
-
gr.Dropdown(label="NLLB - Language", choices=sorted(get_lang_nllb_names()), value=
|
| 1049 |
}
|
| 1050 |
common_mt5_inputs = lambda : {
|
| 1051 |
-
gr.Dropdown(label="MT5 - Model (for translate)", choices=mt5_models, value=
|
| 1052 |
-
gr.Dropdown(label="MT5 - Language", choices=sorted(get_lang_m2m100_names(["en", "ja", "zh"])), value=
|
| 1053 |
}
|
| 1054 |
common_ALMA_inputs = lambda : {
|
| 1055 |
-
gr.Dropdown(label="ALMA - Model (for translate)", choices=ALMA_models, value=
|
| 1056 |
-
gr.Dropdown(label="ALMA - Language", choices=sort_lang_by_whisper_codes(["en", "de", "cs", "is", "ru", "zh", "ja"]), value=
|
| 1057 |
}
|
| 1058 |
common_madlad400_inputs = lambda : {
|
| 1059 |
-
gr.Dropdown(label="madlad400 - Model (for translate)", choices=madlad400_models, value=
|
| 1060 |
-
gr.Dropdown(label="madlad400 - Language", choices=sorted(get_lang_m2m100_names()), value=
|
| 1061 |
}
|
| 1062 |
common_seamless_inputs = lambda : {
|
| 1063 |
-
gr.Dropdown(label="seamless - Model (for translate)", choices=seamless_models, value=
|
| 1064 |
-
gr.Dropdown(label="seamless - Language", choices=sorted(get_lang_seamlessT_Tx_names()), value=
|
| 1065 |
}
|
| 1066 |
common_Llama_inputs = lambda : {
|
| 1067 |
-
gr.Dropdown(label="Llama - Model (for translate)", choices=Llama_models, value=
|
| 1068 |
-
gr.Dropdown(label="Llama - Language", choices=sorted(get_lang_m2m100_names()), value=
|
| 1069 |
}
|
| 1070 |
|
| 1071 |
common_translation_inputs = lambda : {
|
|
@@ -1280,7 +1280,7 @@ def create_ui(app_config: ApplicationConfig):
|
|
| 1280 |
llamaTab.select(fn=lambda: "Llama", inputs = [], outputs= [translateInput] )
|
| 1281 |
with gr.Column():
|
| 1282 |
inputDict.update({
|
| 1283 |
-
gr.Dropdown(label="Input - Language", choices=sorted(get_lang_whisper_names()), value=app_config.language if app_config.language != None else
|
| 1284 |
gr.Text(lines=5, label="Input - Text", elem_id="inputText", elem_classes="scroll-show"),
|
| 1285 |
})
|
| 1286 |
with gr.Column():
|
|
|
|
| 1036 |
madlad400_models = list(filter(lambda madlad400: "ct2" in madlad400, madlad400_models))
|
| 1037 |
|
| 1038 |
common_whisper_inputs = lambda : {
|
| 1039 |
+
gr.Dropdown(label="Whisper - Model (for audio)", choices=whisper_models, value=app_config.default_model_name if app_config.default_model_name != None else (lambda : None), elem_id="whisperModelName"),
|
| 1040 |
+
gr.Dropdown(label="Whisper - Language", choices=sorted(get_lang_whisper_names()), value=app_config.language if app_config.language != None else (lambda : None), elem_id="whisperLangName"),
|
| 1041 |
}
|
| 1042 |
common_m2m100_inputs = lambda : {
|
| 1043 |
+
gr.Dropdown(label="M2M100 - Model (for translate)", choices=m2m100_models, value=lambda : None, elem_id="m2m100ModelName"),
|
| 1044 |
+
gr.Dropdown(label="M2M100 - Language", choices=sorted(get_lang_m2m100_names()), value=lambda : None, elem_id="m2m100LangName"),
|
| 1045 |
}
|
| 1046 |
common_nllb_inputs = lambda : {
|
| 1047 |
+
gr.Dropdown(label="NLLB - Model (for translate)", choices=nllb_models, value=lambda : None, elem_id="nllbModelName"),
|
| 1048 |
+
gr.Dropdown(label="NLLB - Language", choices=sorted(get_lang_nllb_names()), value=lambda : None, elem_id="nllbLangName"),
|
| 1049 |
}
|
| 1050 |
common_mt5_inputs = lambda : {
|
| 1051 |
+
gr.Dropdown(label="MT5 - Model (for translate)", choices=mt5_models, value=lambda : None, elem_id="mt5ModelName"),
|
| 1052 |
+
gr.Dropdown(label="MT5 - Language", choices=sorted(get_lang_m2m100_names(["en", "ja", "zh"])), value=lambda : None, elem_id="mt5LangName"),
|
| 1053 |
}
|
| 1054 |
common_ALMA_inputs = lambda : {
|
| 1055 |
+
gr.Dropdown(label="ALMA - Model (for translate)", choices=ALMA_models, value=lambda : None, elem_id="ALMAModelName"),
|
| 1056 |
+
gr.Dropdown(label="ALMA - Language", choices=sort_lang_by_whisper_codes(["en", "de", "cs", "is", "ru", "zh", "ja"]), value=lambda : None, elem_id="ALMALangName"),
|
| 1057 |
}
|
| 1058 |
common_madlad400_inputs = lambda : {
|
| 1059 |
+
gr.Dropdown(label="madlad400 - Model (for translate)", choices=madlad400_models, value=lambda : None, elem_id="madlad400ModelName"),
|
| 1060 |
+
gr.Dropdown(label="madlad400 - Language", choices=sorted(get_lang_m2m100_names()), value=lambda : None, elem_id="madlad400LangName"),
|
| 1061 |
}
|
| 1062 |
common_seamless_inputs = lambda : {
|
| 1063 |
+
gr.Dropdown(label="seamless - Model (for translate)", choices=seamless_models, value=lambda : None, elem_id="seamlessModelName"),
|
| 1064 |
+
gr.Dropdown(label="seamless - Language", choices=sorted(get_lang_seamlessT_Tx_names()), value=lambda : None, elem_id="seamlessLangName"),
|
| 1065 |
}
|
| 1066 |
common_Llama_inputs = lambda : {
|
| 1067 |
+
gr.Dropdown(label="Llama - Model (for translate)", choices=Llama_models, value=lambda : None, elem_id="LlamaModelName"),
|
| 1068 |
+
gr.Dropdown(label="Llama - Language", choices=sorted(get_lang_m2m100_names()), value=lambda : None, elem_id="LlamaLangName"),
|
| 1069 |
}
|
| 1070 |
|
| 1071 |
common_translation_inputs = lambda : {
|
|
|
|
| 1280 |
llamaTab.select(fn=lambda: "Llama", inputs = [], outputs= [translateInput] )
|
| 1281 |
with gr.Column():
|
| 1282 |
inputDict.update({
|
| 1283 |
+
gr.Dropdown(label="Input - Language", choices=sorted(get_lang_whisper_names()), value=app_config.language if app_config.language != None else (lambda : None), elem_id="inputLangName"),
|
| 1284 |
gr.Text(lines=5, label="Input - Text", elem_id="inputText", elem_classes="scroll-show"),
|
| 1285 |
})
|
| 1286 |
with gr.Column():
|
config.json5
CHANGED
|
@@ -298,6 +298,12 @@
|
|
| 298 |
}
|
| 299 |
],
|
| 300 |
"Llama": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
{
|
| 302 |
"name": "Meta-Llama-3.1-8B-Instruct-ct2-int8_float16/avan",
|
| 303 |
"url": "avans06/Meta-Llama-3.1-8B-Instruct-ct2-int8_float16",
|
|
|
|
| 298 |
}
|
| 299 |
],
|
| 300 |
"Llama": [
|
| 301 |
+
{
|
| 302 |
+
"name": "Meta-Llama-3.2-8B-Instruct-ct2-int8_float16/avan",
|
| 303 |
+
"url": "avans06/Meta-Llama-3.2-8B-Instruct-ct2-int8_float16",
|
| 304 |
+
"type": "huggingface",
|
| 305 |
+
"tokenizer_url": "avans06/Meta-Llama-3.2-8B-Instruct-ct2-int8_float16"
|
| 306 |
+
},
|
| 307 |
{
|
| 308 |
"name": "Meta-Llama-3.1-8B-Instruct-ct2-int8_float16/avan",
|
| 309 |
"url": "avans06/Meta-Llama-3.1-8B-Instruct-ct2-int8_float16",
|