import numpy as np import pandas as pd import re, os, sys os.environ["CUDA_VISIBLE_DEVICES"] = "0" import argparse import json from sklearn.model_selection import train_test_split from datasets import Dataset import torch from torch.utils.data import DataLoader from torch.optim import AdamW from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from tqdm import tqdm from transformers import get_scheduler from evaluate import load parser = argparse.ArgumentParser() parser.add_argument("--learning_rate", default=5e-5, type=float) parser.add_argument("--batch_size", default=32, type=int) parser.add_argument("--max_length_body", default=512, type=int) parser.add_argument("--max_length_summ", default=64, type=int) parser.add_argument("--num_epochs", default=512, type=int) parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--debug", action='store_true', help="debug") args = parser.parse_args() print("--- Chuẩn bị dataset... ---") df = pd.read_parquet('news_sapo_processed.parquet') if args.debug: df = df[0:1000].reset_index(drop=True) train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42) val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42) train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename( columns={"clean_body": "text", "clean_summary": "summary"})) val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename( columns={"clean_body": "text", "clean_summary": "summary"})) test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename( columns={"clean_body": "text", "clean_summary": "summary"})) tokenizer = AutoTokenizer.from_pretrained("google/mt5-base") def preprocess_function(examples): model_inputs = tokenizer( examples["text"], max_length=args.max_length_body, padding="max_length", truncation=True ) with tokenizer.as_target_tokenizer(): labels = tokenizer( examples["summary"], max_length=args.max_length_summ, padding="max_length", truncation=True ) model_inputs["labels"] = labels["input_ids"] return model_inputs tokenized_train = train_dataset.map(preprocess_function, batched=True) tokenized_val = val_dataset.map(preprocess_function, batched=True) tokenized_test = test_dataset.map(preprocess_function, batched=True) tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels']) tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels']) tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels']) train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True) val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size) test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size) for batch in train_dataloader: print({k: v.shape for k, v in batch.items()}) break rouge = load("rouge") def evaluate_cus(model, dataloader, tokenizer): model.eval() preds = [] labels = [] with torch.no_grad(): for batch in tqdm(dataloader, desc="Evaluating"): input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) generated_tokens = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_length=args.max_length_summ ) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True) preds.extend(decoded_preds) labels.extend(decoded_labels) result = rouge.compute(predictions=preds, references=labels, use_stemmer=True) rouge1 = result["rouge1"] rouge2 = result["rouge2"] rougeL = result["rougeL"] print(f"ROUGE-1: {rouge1:.4f}") print(f"ROUGE-2: {rouge2:.4f}") print(f"ROUGE-L: {rougeL:.4f}") return rougeL if args.do_train: print("--- Training ---") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-base").to(device) optimizer = AdamW(model.parameters(), lr=args.learning_rate) best_rougeL = 0 num_training_steps = len(train_dataloader) * args.num_epochs lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) results_history = [] for epoch in range(args.num_epochs): model.train() total_loss = 0 progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}") for batch in progress_bar: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() total_loss += loss.item() progress_bar.set_postfix(loss=loss.item()) avg_loss = total_loss / len(train_dataloader) print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}") rougeL = evaluate_cus(model, val_dataloader, tokenizer) results_history.append({ "epoch": epoch + 1, "loss": avg_loss, "rougeL": rougeL }) if rougeL > best_rougeL: best_rougeL = rougeL print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...") model.save_pretrained("best_model") tokenizer.save_pretrained("best_model") with open("training_log.json", "w", encoding="utf-8") as f: json.dump(results_history, f, indent=4, ensure_ascii=False) if args.do_eval: print("--- Evaluation on test set ---") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device) tokenizer = AutoTokenizer.from_pretrained("best_model") evaluate_cus(model, val_dataloader, tokenizer)