| --- |
| license: cc-by-nc-4.0 |
| language: |
| - ru |
| - tyv |
| pipeline_tag: translation |
| datasets: |
| - Agisight/tyvan-russian-parallel-50k |
| --- |
| |
| It is a [NLLB-200-600M](https://huggingface.co/facebook/nllb-200-distilled-600M) model |
| fine-tuned for translating between Tyvan and Russian languages using the dataset from https://tyvan.ru. |
|
|
| Here is [a post](https://cointegrated.medium.com/a37fc706b865) about how it was trained. |
|
|
| How to use the model: |
|
|
| ```Python |
| # the version of transformers is important! |
| !pip install sentencepiece transformers==4.33 |
| import torch |
| from transformers import NllbTokenizer, AutoModelForSeq2SeqLM |
| |
| def fix_tokenizer(tokenizer, new_lang='tyv_Cyrl'): |
| """ Add a new language token to the tokenizer vocabulary (this should be done each time after its initialization) """ |
| old_len = len(tokenizer) - int(new_lang in tokenizer.added_tokens_encoder) |
| tokenizer.lang_code_to_id[new_lang] = old_len-1 |
| tokenizer.id_to_lang_code[old_len-1] = new_lang |
| # always move "mask" to the last position |
| tokenizer.fairseq_tokens_to_ids["<mask>"] = len(tokenizer.sp_model) + len(tokenizer.lang_code_to_id) + tokenizer.fairseq_offset |
| |
| tokenizer.fairseq_tokens_to_ids.update(tokenizer.lang_code_to_id) |
| tokenizer.fairseq_ids_to_tokens = {v: k for k, v in tokenizer.fairseq_tokens_to_ids.items()} |
| if new_lang not in tokenizer._additional_special_tokens: |
| tokenizer._additional_special_tokens.append(new_lang) |
| # clear the added token encoder; otherwise a new token may end up there by mistake |
| tokenizer.added_tokens_encoder = {} |
| tokenizer.added_tokens_decoder = {} |
| |
| MODEL_URL = "slone/nllb-rus-tyv-v1" |
| model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_URL) |
| tokenizer = NllbTokenizer.from_pretrained(MODEL_URL) |
| fix_tokenizer(tokenizer) |
| |
| def translate( |
| text, |
| model, |
| tokenizer, |
| src_lang='rus_Cyrl', |
| tgt_lang='tyv_Cyrl', |
| max_length='auto', |
| num_beams=4, |
| n_out=None, |
| **kwargs |
| ): |
| tokenizer.src_lang = src_lang |
| encoded = tokenizer(text, return_tensors="pt", truncation=True, max_length=512) |
| if max_length == 'auto': |
| max_length = int(32 + 2.0 * encoded.input_ids.shape[1]) |
| model.eval() |
| generated_tokens = model.generate( |
| **encoded.to(model.device), |
| forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang], |
| max_length=max_length, |
| num_beams=num_beams, |
| num_return_sequences=n_out or 1, |
| **kwargs |
| ) |
| out = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) |
| if isinstance(text, str) and n_out is None: |
| return out[0] |
| return |
| |
| translate("красная птица", model=model, tokenizer=tokenizer) |
| # 'кызыл куш' |
| ``` |