raghuprasadks commited on
Commit
1ea598f
·
verified ·
1 Parent(s): 5b30ece

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. UNESCO_META_HF_BANNER.png +0 -0
  3. app.py +123 -0
  4. flores.py +206 -0
  5. requirements.txt +8 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Aittranslator
3
- emoji: 😻
4
- colorFrom: blue
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.34.2
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Fdp
3
+ emoji: 🚀
4
+ colorFrom: red
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.34.0
8
  app_file: app.py
9
  pinned: false
10
  ---
UNESCO_META_HF_BANNER.png ADDED
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from sacremoses import MosesPunctNormalizer
4
+ from stopes.pipelines.monolingual.utils.sentence_split import get_split_algo
5
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
+ from flores import code_mapping
7
+ import platform
8
+ import torch
9
+ import nltk
10
+ from functools import lru_cache
11
+
12
+ nltk.download("punkt_tab")
13
+
14
+ REMOVED_TARGET_LANGUAGES = {"Ligurian", "Lombard", "Sicilian"}
15
+
16
+ # ✅ Dynamic CUDA check - use GPU only if available
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ print(f"Using device: {device}")
19
+
20
+ MODEL_NAME = "facebook/nllb-200-3.3B"
21
+
22
+ code_mapping = dict(sorted(code_mapping.items(), key=lambda item: item[0]))
23
+ flores_codes = list(code_mapping.keys())
24
+ target_languages = [language for language in flores_codes if not language in REMOVED_TARGET_LANGUAGES]
25
+
26
+ def load_model():
27
+ model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME).to(device)
28
+ print(f"Model loaded in {device}")
29
+ return model
30
+
31
+ model = load_model()
32
+
33
+ # Load tokenizer once
34
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
35
+ punct_normalizer = MosesPunctNormalizer(lang="en")
36
+
37
+ @lru_cache(maxsize=202)
38
+ def get_language_specific_sentence_splitter(language_code):
39
+ short_code = language_code[:3]
40
+ splitter = get_split_algo(short_code, "default")
41
+ return splitter
42
+
43
+ @lru_cache(maxsize=100)
44
+ def translate(text: str, src_lang: str, tgt_lang: str):
45
+ if not src_lang:
46
+ raise gr.Error("The source language is empty! Please choose it in the dropdown list.")
47
+ if not tgt_lang:
48
+ raise gr.Error("The target language is empty! Please choose it in the dropdown list.")
49
+ return _translate(text, src_lang, tgt_lang)
50
+
51
+ @spaces.GPU
52
+ def _translate(text: str, src_lang: str, tgt_lang: str):
53
+ src_code = code_mapping[src_lang]
54
+ tgt_code = code_mapping[tgt_lang]
55
+ tokenizer.src_lang = src_code
56
+ tokenizer.tgt_lang = tgt_code
57
+
58
+ text = punct_normalizer.normalize(text)
59
+ paragraphs = text.split("\n")
60
+ translated_paragraphs = []
61
+
62
+ for paragraph in paragraphs:
63
+ splitter = get_language_specific_sentence_splitter(src_code)
64
+ sentences = list(splitter(paragraph))
65
+ translated_sentences = []
66
+
67
+ for sentence in sentences:
68
+ input_tokens = (
69
+ tokenizer(sentence, return_tensors="pt")
70
+ .input_ids[0]
71
+ .cpu()
72
+ .numpy()
73
+ .tolist()
74
+ )
75
+ translated_chunk = model.generate(
76
+ input_ids=torch.tensor([input_tokens]).to(device),
77
+ forced_bos_token_id=tokenizer.convert_tokens_to_ids(tgt_code),
78
+ max_length=len(input_tokens) + 50,
79
+ num_return_sequences=1,
80
+ num_beams=5,
81
+ no_repeat_ngram_size=4,
82
+ renormalize_logits=True,
83
+ )
84
+ translated_chunk = tokenizer.decode(
85
+ translated_chunk[0], skip_special_tokens=True
86
+ )
87
+ translated_sentences.append(translated_chunk)
88
+
89
+ translated_paragraph = " ".join(translated_sentences)
90
+ translated_paragraphs.append(translated_paragraph)
91
+
92
+ return "\n".join(translated_paragraphs)
93
+
94
+ description = """<div style="text-align: center;">
95
+ <img src="https://huggingface.co/spaces/UNESCO/nllb/resolve/main/UNESCO_META_HF_BANNER.png" alt="UNESCO Meta Hugging Face Banner" style="max-width: 800px; width: 100%; margin: 0 auto;">
96
+ <h1 style="color: #0077be;">UNESCO Language Translator, powered by Meta and Hugging Face</h1></div>
97
+ UNESCO, Meta, and Hugging Face have come together to create an accessible, high-quality translation experience in 200 languages."""
98
+ disclaimer = """## Disclaimer
99
+ (This section remains unchanged)
100
+ """
101
+
102
+ examples_inputs = [["The United Nations Educational, Scientific and Cultural Organization is a specialized agency of the United Nations with the aim of promoting world peace and security through international cooperation in education, arts, sciences and culture.", "English", "Ayacucho Quechua"],]
103
+
104
+ with gr.Blocks() as demo:
105
+ gr.Markdown(description)
106
+ with gr.Row():
107
+ src_lang = gr.Dropdown(label="Source Language", choices=flores_codes)
108
+ target_lang = gr.Dropdown(label="Target Language", choices=target_languages)
109
+ with gr.Row():
110
+ input_text = gr.Textbox(label="Input Text", lines=6)
111
+ with gr.Row():
112
+ btn = gr.Button("Translate text")
113
+ with gr.Row():
114
+ output = gr.Textbox(label="Output Text", lines=6)
115
+ btn.click(
116
+ translate,
117
+ inputs=[input_text, src_lang, target_lang],
118
+ outputs=output,
119
+ )
120
+ examples = gr.Examples(examples=examples_inputs, inputs=[input_text, src_lang, target_lang], fn=translate, outputs=output, cache_examples=True)
121
+ with gr.Row():
122
+ gr.Markdown(disclaimer)
123
+ demo.launch()
flores.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ code_mapping = {
2
+ "Acehnese (Arabic script)": "ace_Arab",
3
+ "Acehnese (Latin script)": "ace_Latn",
4
+ "Mesopotamian Arabic": "acm_Arab",
5
+ "Ta’izzi-Adeni Arabic": "acq_Arab",
6
+ "Tunisian Arabic": "aeb_Arab",
7
+ "Afrikaans": "afr_Latn",
8
+ "South Levantine Arabic": "ajp_Arab",
9
+ "Akan": "aka_Latn",
10
+ "Amharic": "amh_Ethi",
11
+ "North Levantine Arabic": "apc_Arab",
12
+ "Modern Standard Arabic": "arb_Arab",
13
+ # "Modern Standard Arabic (Romanized)": "arb_Latn", # it is in FLORES, but not in NLLB
14
+ "Najdi Arabic": "ars_Arab",
15
+ "Moroccan Arabic": "ary_Arab",
16
+ "Egyptian Arabic": "arz_Arab",
17
+ "Assamese": "asm_Beng",
18
+ "Asturian": "ast_Latn",
19
+ "Awadhi": "awa_Deva",
20
+ "Central Aymara": "ayr_Latn",
21
+ "South Azerbaijani": "azb_Arab",
22
+ "North Azerbaijani": "azj_Latn",
23
+ "Bashkir": "bak_Cyrl",
24
+ "Bambara": "bam_Latn",
25
+ "Balinese": "ban_Latn",
26
+ "Belarusian": "bel_Cyrl",
27
+ "Bemba": "bem_Latn",
28
+ "Bengali": "ben_Beng",
29
+ "Bhojpuri": "bho_Deva",
30
+ "Banjar (Arabic script)": "bjn_Arab",
31
+ "Banjar (Latin script)": "bjn_Latn",
32
+ "Standard Tibetan": "bod_Tibt",
33
+ "Bosnian": "bos_Latn",
34
+ "Buginese": "bug_Latn",
35
+ "Bulgarian": "bul_Cyrl",
36
+ "Catalan": "cat_Latn",
37
+ "Cebuano": "ceb_Latn",
38
+ "Czech": "ces_Latn",
39
+ "Chokwe": "cjk_Latn",
40
+ "Central Kurdish": "ckb_Arab",
41
+ "Crimean Tatar": "crh_Latn",
42
+ "Welsh": "cym_Latn",
43
+ "Danish": "dan_Latn",
44
+ "German": "deu_Latn",
45
+ "Southwestern Dinka": "dik_Latn",
46
+ "Dyula": "dyu_Latn",
47
+ "Dzongkha": "dzo_Tibt",
48
+ "Greek": "ell_Grek",
49
+ "English": "eng_Latn",
50
+ "Esperanto": "epo_Latn",
51
+ "Estonian": "est_Latn",
52
+ "Basque": "eus_Latn",
53
+ "Ewe": "ewe_Latn",
54
+ "Faroese": "fao_Latn",
55
+ "Fijian": "fij_Latn",
56
+ "Finnish": "fin_Latn",
57
+ "Fon": "fon_Latn",
58
+ "French": "fra_Latn",
59
+ "Friulian": "fur_Latn",
60
+ "Nigerian Fulfulde": "fuv_Latn",
61
+ "Scottish Gaelic": "gla_Latn",
62
+ "Irish": "gle_Latn",
63
+ "Galician": "glg_Latn",
64
+ "Guarani": "grn_Latn",
65
+ "Gujarati": "guj_Gujr",
66
+ "Haitian Creole": "hat_Latn",
67
+ "Hausa": "hau_Latn",
68
+ "Hebrew": "heb_Hebr",
69
+ "Hindi": "hin_Deva",
70
+ "Chhattisgarhi": "hne_Deva",
71
+ "Croatian": "hrv_Latn",
72
+ "Hungarian": "hun_Latn",
73
+ "Armenian": "hye_Armn",
74
+ "Igbo": "ibo_Latn",
75
+ "Ilocano": "ilo_Latn",
76
+ "Indonesian": "ind_Latn",
77
+ "Icelandic": "isl_Latn",
78
+ "Italian": "ita_Latn",
79
+ "Javanese": "jav_Latn",
80
+ "Japanese": "jpn_Jpan",
81
+ "Kabyle": "kab_Latn",
82
+ "Jingpho": "kac_Latn",
83
+ "Kamba": "kam_Latn",
84
+ "Kannada": "kan_Knda",
85
+ "Kashmiri (Arabic script)": "kas_Arab",
86
+ "Kashmiri (Devanagari script)": "kas_Deva",
87
+ "Georgian": "kat_Geor",
88
+ "Central Kanuri (Arabic script)": "knc_Arab",
89
+ "Central Kanuri (Latin script)": "knc_Latn",
90
+ "Kazakh": "kaz_Cyrl",
91
+ "Kabiyè": "kbp_Latn",
92
+ "Kabuverdianu": "kea_Latn",
93
+ "Khmer": "khm_Khmr",
94
+ "Kikuyu": "kik_Latn",
95
+ "Kinyarwanda": "kin_Latn",
96
+ "Kyrgyz": "kir_Cyrl",
97
+ "Kimbundu": "kmb_Latn",
98
+ "Northern Kurdish": "kmr_Latn",
99
+ "Kikongo": "kon_Latn",
100
+ "Korean": "kor_Hang",
101
+ "Lao": "lao_Laoo",
102
+ "Ligurian": "lij_Latn",
103
+ "Limburgish": "lim_Latn",
104
+ "Lingala": "lin_Latn",
105
+ "Lithuanian": "lit_Latn",
106
+ "Lombard": "lmo_Latn",
107
+ "Latgalian": "ltg_Latn",
108
+ "Luxembourgish": "ltz_Latn",
109
+ "Luba-Kasai": "lua_Latn",
110
+ "Ganda": "lug_Latn",
111
+ "Luo": "luo_Latn",
112
+ "Mizo": "lus_Latn",
113
+ "Standard Latvian": "lvs_Latn",
114
+ "Magahi": "mag_Deva",
115
+ "Maithili": "mai_Deva",
116
+ "Malayalam": "mal_Mlym",
117
+ "Marathi": "mar_Deva",
118
+ # "Minangkabau (Arabic script)": "min_Arab", # it is in FLORES, but not in NLLB
119
+ "Minangkabau (Latin script)": "min_Latn",
120
+ "Macedonian": "mkd_Cyrl",
121
+ "Plateau Malagasy": "plt_Latn",
122
+ "Maltese": "mlt_Latn",
123
+ "Meitei (Bengali script)": "mni_Beng",
124
+ "Halh Mongolian": "khk_Cyrl",
125
+ "Mossi": "mos_Latn",
126
+ "Maori": "mri_Latn",
127
+ "Burmese": "mya_Mymr",
128
+ "Dutch": "nld_Latn",
129
+ "Norwegian Nynorsk": "nno_Latn",
130
+ "Norwegian Bokmål": "nob_Latn",
131
+ "Nepali": "npi_Deva",
132
+ "Northern Sotho": "nso_Latn",
133
+ "Nuer": "nus_Latn",
134
+ "Nyanja": "nya_Latn",
135
+ "Occitan": "oci_Latn",
136
+ "West Central Oromo": "gaz_Latn",
137
+ "Odia": "ory_Orya",
138
+ "Pangasinan": "pag_Latn",
139
+ "Eastern Panjabi": "pan_Guru",
140
+ "Papiamento": "pap_Latn",
141
+ "Western Persian": "pes_Arab",
142
+ "Polish": "pol_Latn",
143
+ "Portuguese": "por_Latn",
144
+ "Dari": "prs_Arab",
145
+ "Southern Pashto": "pbt_Arab",
146
+ "Ayacucho Quechua": "quy_Latn",
147
+ "Romanian": "ron_Latn",
148
+ "Rundi": "run_Latn",
149
+ "Russian": "rus_Cyrl",
150
+ "Sango": "sag_Latn",
151
+ "Sanskrit": "san_Deva",
152
+ "Santali": "sat_Beng", # It is called sat_Olck in FLORES, but (less correctly sat_Beng in NLLB)
153
+ "Sicilian": "scn_Latn",
154
+ "Shan": "shn_Mymr",
155
+ "Sinhala": "sin_Sinh",
156
+ "Slovak": "slk_Latn",
157
+ "Slovenian": "slv_Latn",
158
+ "Samoan": "smo_Latn",
159
+ "Shona": "sna_Latn",
160
+ "Sindhi": "snd_Arab",
161
+ "Somali": "som_Latn",
162
+ "Southern Sotho": "sot_Latn",
163
+ "Spanish": "spa_Latn",
164
+ "Tosk Albanian": "als_Latn",
165
+ "Sardinian": "srd_Latn",
166
+ "Serbian": "srp_Cyrl",
167
+ "Swati": "ssw_Latn",
168
+ "Sundanese": "sun_Latn",
169
+ "Swedish": "swe_Latn",
170
+ "Swahili": "swh_Latn",
171
+ "Silesian": "szl_Latn",
172
+ "Tamil": "tam_Taml",
173
+ "Tatar": "tat_Cyrl",
174
+ "Telugu": "tel_Telu",
175
+ "Tajik": "tgk_Cyrl",
176
+ "Tagalog": "tgl_Latn",
177
+ "Thai": "tha_Thai",
178
+ "Tigrinya": "tir_Ethi",
179
+ "Tamasheq (Latin script)": "taq_Latn",
180
+ "Tamasheq (Tifinagh script)": "taq_Tfng",
181
+ "Tok Pisin": "tpi_Latn",
182
+ "Tswana": "tsn_Latn",
183
+ "Tsonga": "tso_Latn",
184
+ "Turkmen": "tuk_Latn",
185
+ "Tumbuka": "tum_Latn",
186
+ "Turkish": "tur_Latn",
187
+ "Twi": "twi_Latn",
188
+ "Central Atlas Tamazight": "tzm_Tfng",
189
+ "Uyghur": "uig_Arab",
190
+ "Ukrainian": "ukr_Cyrl",
191
+ "Umbundu": "umb_Latn",
192
+ "Urdu": "urd_Arab",
193
+ "Northern Uzbek": "uzn_Latn",
194
+ "Venetian": "vec_Latn",
195
+ "Vietnamese": "vie_Latn",
196
+ "Waray": "war_Latn",
197
+ "Wolof": "wol_Latn",
198
+ "Xhosa": "xho_Latn",
199
+ "Eastern Yiddish": "ydd_Hebr",
200
+ "Yoruba": "yor_Latn",
201
+ "Yue Chinese": "yue_Hant",
202
+ "Chinese (Simplified)": "zho_Hans",
203
+ "Chinese (Traditional)": "zho_Hant",
204
+ "Standard Malay": "zsm_Latn",
205
+ "Zulu": "zul_Latn",
206
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ transformers
3
+ torch
4
+ gradio==4.32.2
5
+ spaces
6
+ nltk
7
+ sacremoses
8
+ stopes[mono] @ git+https://github.com/facebookresearch/stopes@better-sentence-splitters