| import os | |
| import shutil | |
| from os.path import dirname, join | |
| from datasets import load_dataset | |
| from underthesea import word_tokenize | |
| def create_wtk_dataset(text_dataset, output_folder): | |
| if os.path.exists(output_folder): | |
| shutil.rmtree(output_folder) | |
| os.makedirs(output_folder) | |
| for split in ["train", "validation", "test"]: | |
| sentences = text_dataset[split]["text"] | |
| with open(join(output_folder, f"{split}.txt"), "w") as f: | |
| for sentence in sentences: | |
| items = word_tokenize(sentence) | |
| for item in items: | |
| tokens = item.split() | |
| for i, token in enumerate(tokens): | |
| if i > 0: | |
| f.write(f"{token}\tI-W\n") | |
| else: | |
| f.write(f"{token}\tB-W\n") | |
| f.write("\n") | |
| pwd = dirname(__file__) | |
| data_folder = join(pwd, "data") | |
| text_dataset = load_dataset("undertheseanlp/UTS_Text", "small") | |
| create_wtk_dataset(text_dataset, join(data_folder, "small")) | |
| text_dataset = load_dataset("undertheseanlp/UTS_Text", "base") | |
| create_wtk_dataset(text_dataset, join(data_folder, "base")) | |
| text_dataset = load_dataset("undertheseanlp/UTS_Text", "large") | |
| create_wtk_dataset(text_dataset, join(data_folder, "large")) | |