Upload 5 files
Browse files- BART_news_sapo_summarization.py +193 -0
- BartPho_new_sapo_summaiation.py +193 -0
- PhoBart_new_sapo_summaiation.py +198 -0
- ViT5_news_sapo_summarization.py +193 -0
- mt5_news_sapo_summarization.py +193 -0
BART_news_sapo_summarization.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re, os, sys
|
4 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
5 |
+
import argparse
|
6 |
+
import json
|
7 |
+
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from datasets import Dataset
|
10 |
+
import torch
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torch.optim import AdamW
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
14 |
+
from tqdm import tqdm
|
15 |
+
from transformers import get_scheduler
|
16 |
+
from evaluate import load
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--learning_rate",
|
21 |
+
default=5e-5,
|
22 |
+
type=float)
|
23 |
+
|
24 |
+
parser.add_argument("--batch_size",
|
25 |
+
default=32,
|
26 |
+
type=int)
|
27 |
+
|
28 |
+
parser.add_argument("--max_length_body",
|
29 |
+
default=512,
|
30 |
+
type=int)
|
31 |
+
|
32 |
+
parser.add_argument("--max_length_summ",
|
33 |
+
default=64,
|
34 |
+
type=int)
|
35 |
+
|
36 |
+
parser.add_argument("--num_epochs",
|
37 |
+
default=512,
|
38 |
+
type=int)
|
39 |
+
|
40 |
+
parser.add_argument("--do_train",
|
41 |
+
action='store_true',
|
42 |
+
help="Whether to run training.")
|
43 |
+
|
44 |
+
parser.add_argument("--do_eval",
|
45 |
+
action='store_true',
|
46 |
+
help="Whether to run eval on the test set.")
|
47 |
+
|
48 |
+
parser.add_argument("--debug",
|
49 |
+
action='store_true',
|
50 |
+
help="debug")
|
51 |
+
|
52 |
+
|
53 |
+
args = parser.parse_args()
|
54 |
+
|
55 |
+
print("--- Chuẩn bị dataset... ---")
|
56 |
+
df = pd.read_parquet('news_sapo_processed.parquet')
|
57 |
+
if args.debug:
|
58 |
+
df = df[0:1000].reset_index(drop=True)
|
59 |
+
|
60 |
+
train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42)
|
61 |
+
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
|
62 |
+
|
63 |
+
train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename(
|
64 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
65 |
+
val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename(
|
66 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
67 |
+
test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename(
|
68 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
69 |
+
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("VietAI/vibart2-cased")
|
71 |
+
|
72 |
+
def preprocess_function(examples):
|
73 |
+
inputs = tokenizer(
|
74 |
+
examples["text"],
|
75 |
+
max_length=args.max_length_body,
|
76 |
+
padding="max_length",
|
77 |
+
truncation=True
|
78 |
+
)
|
79 |
+
|
80 |
+
targets = tokenizer(
|
81 |
+
examples["summary"],
|
82 |
+
max_length=args.max_length_summ,
|
83 |
+
padding="max_length",
|
84 |
+
truncation=True
|
85 |
+
)
|
86 |
+
|
87 |
+
inputs["labels"] = targets["input_ids"]
|
88 |
+
return inputs
|
89 |
+
|
90 |
+
|
91 |
+
tokenized_train = train_dataset.map(preprocess_function, batched=True)
|
92 |
+
tokenized_val = val_dataset.map(preprocess_function, batched=True)
|
93 |
+
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
94 |
+
|
95 |
+
tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
96 |
+
tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
97 |
+
tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
98 |
+
|
99 |
+
train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True)
|
100 |
+
val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size)
|
101 |
+
test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size)
|
102 |
+
|
103 |
+
for batch in train_dataloader:
|
104 |
+
print({k: v.shape for k, v in batch.items()})
|
105 |
+
break
|
106 |
+
|
107 |
+
rouge = load("rouge")
|
108 |
+
|
109 |
+
def evaluate_cus(model, dataloader, tokenizer):
|
110 |
+
model.eval()
|
111 |
+
preds = []
|
112 |
+
labels = []
|
113 |
+
|
114 |
+
with torch.no_grad():
|
115 |
+
for batch in tqdm(dataloader, desc="Evaluating"):
|
116 |
+
input_ids = batch['input_ids'].to(device)
|
117 |
+
attention_mask = batch['attention_mask'].to(device)
|
118 |
+
|
119 |
+
generated_tokens = model.generate(
|
120 |
+
input_ids=input_ids,
|
121 |
+
attention_mask=attention_mask,
|
122 |
+
max_length=args.max_length_summ
|
123 |
+
)
|
124 |
+
|
125 |
+
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
126 |
+
decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True)
|
127 |
+
|
128 |
+
preds.extend(decoded_preds)
|
129 |
+
labels.extend(decoded_labels)
|
130 |
+
|
131 |
+
result = rouge.compute(predictions=preds, references=labels, use_stemmer=True)
|
132 |
+
rouge1 = result["rouge1"]
|
133 |
+
rouge2 = result["rouge2"]
|
134 |
+
rougeL = result["rougeL"]
|
135 |
+
|
136 |
+
print(f"ROUGE-1: {rouge1:.4f}")
|
137 |
+
print(f"ROUGE-2: {rouge2:.4f}")
|
138 |
+
print(f"ROUGE-L: {rougeL:.4f}")
|
139 |
+
|
140 |
+
return rougeL
|
141 |
+
if args.do_train:
|
142 |
+
print("--- Training ---")
|
143 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
+
|
145 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vibart2-cased").to(device)
|
146 |
+
optimizer = AdamW(model.parameters(), lr=args.learing_rate)
|
147 |
+
|
148 |
+
best_rougeL = 0
|
149 |
+
num_training_steps = len(train_dataloader) * args.num_epochs
|
150 |
+
lr_scheduler = get_scheduler(
|
151 |
+
"linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
|
152 |
+
)
|
153 |
+
results_history = []
|
154 |
+
for epoch in range(args.num_epochs):
|
155 |
+
model.train()
|
156 |
+
total_loss = 0
|
157 |
+
progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}")
|
158 |
+
|
159 |
+
for batch in progress_bar:
|
160 |
+
batch = {k: v.to(device) for k, v in batch.items()}
|
161 |
+
outputs = model(**batch)
|
162 |
+
loss = outputs.loss
|
163 |
+
loss.backward()
|
164 |
+
|
165 |
+
optimizer.step()
|
166 |
+
lr_scheduler.step()
|
167 |
+
optimizer.zero_grad()
|
168 |
+
|
169 |
+
total_loss += loss.item()
|
170 |
+
progress_bar.set_postfix(loss=loss.item())
|
171 |
+
|
172 |
+
avg_loss = total_loss / len(train_dataloader)
|
173 |
+
print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}")
|
174 |
+
|
175 |
+
rougeL = evaluate_cus(model, val_dataloader, tokenizer)
|
176 |
+
results_history.append({
|
177 |
+
"epoch": epoch + 1,
|
178 |
+
"loss": avg_loss,
|
179 |
+
"rougeL": rougeL
|
180 |
+
})
|
181 |
+
if rougeL > best_rougeL:
|
182 |
+
best_rougeL = rougeL
|
183 |
+
print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...")
|
184 |
+
model.save_pretrained("best_model")
|
185 |
+
tokenizer.save_pretrained("best_model")
|
186 |
+
with open("training_log.json", "w", encoding="utf-8") as f:
|
187 |
+
json.dump(results_history, f, indent=4, ensure_ascii=False)
|
188 |
+
if args.do_eval:
|
189 |
+
print("--- Evaluation on test set ---")
|
190 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
191 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device)
|
192 |
+
tokenizer = AutoTokenizer.from_pretrained("best_model")
|
193 |
+
evaluate_cus(model, val_dataloader, tokenizer)
|
BartPho_new_sapo_summaiation.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re, os, sys
|
4 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
5 |
+
import argparse
|
6 |
+
import json
|
7 |
+
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from datasets import Dataset
|
10 |
+
import torch
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torch.optim import AdamW
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
14 |
+
from tqdm import tqdm
|
15 |
+
from transformers import get_scheduler
|
16 |
+
from evaluate import load
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--learning_rate",
|
21 |
+
default=5e-5,
|
22 |
+
type=float)
|
23 |
+
|
24 |
+
parser.add_argument("--batch_size",
|
25 |
+
default=32,
|
26 |
+
type=int)
|
27 |
+
|
28 |
+
parser.add_argument("--max_length_body",
|
29 |
+
default=512,
|
30 |
+
type=int)
|
31 |
+
|
32 |
+
parser.add_argument("--max_length_summ",
|
33 |
+
default=64,
|
34 |
+
type=int)
|
35 |
+
|
36 |
+
parser.add_argument("--num_epochs",
|
37 |
+
default=512,
|
38 |
+
type=int)
|
39 |
+
|
40 |
+
parser.add_argument("--do_train",
|
41 |
+
action='store_true',
|
42 |
+
help="Whether to run training.")
|
43 |
+
|
44 |
+
parser.add_argument("--do_eval",
|
45 |
+
action='store_true',
|
46 |
+
help="Whether to run eval on the test set.")
|
47 |
+
|
48 |
+
parser.add_argument("--debug",
|
49 |
+
action='store_true',
|
50 |
+
help="debug")
|
51 |
+
|
52 |
+
|
53 |
+
args = parser.parse_args()
|
54 |
+
|
55 |
+
print("--- Chuẩn bị dataset... ---")
|
56 |
+
df = pd.read_parquet('news_sapo_processed.parquet')
|
57 |
+
if args.debug:
|
58 |
+
df = df[0:1000].reset_index(drop=True)
|
59 |
+
|
60 |
+
train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42)
|
61 |
+
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
|
62 |
+
|
63 |
+
train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename(
|
64 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
65 |
+
val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename(
|
66 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
67 |
+
test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename(
|
68 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
69 |
+
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("vinai/phobart-base")
|
71 |
+
|
72 |
+
def preprocess_function(examples):
|
73 |
+
model_inputs = tokenizer(
|
74 |
+
examples["text"],
|
75 |
+
max_length=args.max_length_body,
|
76 |
+
padding="max_length",
|
77 |
+
truncation=True
|
78 |
+
)
|
79 |
+
|
80 |
+
# with tokenizer.as_target_tokenizer():
|
81 |
+
labels = tokenizer(
|
82 |
+
examples["summary"],
|
83 |
+
max_length=args.max_length_summ,
|
84 |
+
padding="max_length",
|
85 |
+
truncation=True
|
86 |
+
)
|
87 |
+
|
88 |
+
model_inputs["labels"] = labels["input_ids"]
|
89 |
+
return model_inputs
|
90 |
+
|
91 |
+
tokenized_train = train_dataset.map(preprocess_function, batched=True)
|
92 |
+
tokenized_val = val_dataset.map(preprocess_function, batched=True)
|
93 |
+
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
94 |
+
|
95 |
+
tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
96 |
+
tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
97 |
+
tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
98 |
+
|
99 |
+
train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True)
|
100 |
+
val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size)
|
101 |
+
test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size)
|
102 |
+
|
103 |
+
for batch in train_dataloader:
|
104 |
+
print({k: v.shape for k, v in batch.items()})
|
105 |
+
break
|
106 |
+
|
107 |
+
rouge = load("rouge")
|
108 |
+
|
109 |
+
def evaluate_cus(model, dataloader, tokenizer):
|
110 |
+
model.eval()
|
111 |
+
preds = []
|
112 |
+
labels = []
|
113 |
+
|
114 |
+
with torch.no_grad():
|
115 |
+
for batch in tqdm(dataloader, desc="Evaluating"):
|
116 |
+
input_ids = batch['input_ids'].to(device)
|
117 |
+
attention_mask = batch['attention_mask'].to(device)
|
118 |
+
|
119 |
+
generated_tokens = model.generate(
|
120 |
+
input_ids=input_ids,
|
121 |
+
attention_mask=attention_mask,
|
122 |
+
max_length=args.max_length_summ
|
123 |
+
)
|
124 |
+
|
125 |
+
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
126 |
+
decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True)
|
127 |
+
|
128 |
+
preds.extend(decoded_preds)
|
129 |
+
labels.extend(decoded_labels)
|
130 |
+
|
131 |
+
result = rouge.compute(predictions=preds, references=labels, use_stemmer=True)
|
132 |
+
rouge1 = result["rouge1"]
|
133 |
+
rouge2 = result["rouge2"]
|
134 |
+
rougeL = result["rougeL"]
|
135 |
+
|
136 |
+
print(f"ROUGE-1: {rouge1:.4f}")
|
137 |
+
print(f"ROUGE-2: {rouge2:.4f}")
|
138 |
+
print(f"ROUGE-L: {rougeL:.4f}")
|
139 |
+
|
140 |
+
return rougeL
|
141 |
+
if args.do_train:
|
142 |
+
print("--- Training ---")
|
143 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
+
|
145 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("vinai/phobart-base").to(device)
|
146 |
+
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
|
147 |
+
|
148 |
+
best_rougeL = 0
|
149 |
+
num_training_steps = len(train_dataloader) * args.num_epochs
|
150 |
+
lr_scheduler = get_scheduler(
|
151 |
+
"linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
|
152 |
+
)
|
153 |
+
results_history = []
|
154 |
+
for epoch in range(args.num_epochs):
|
155 |
+
model.train()
|
156 |
+
total_loss = 0
|
157 |
+
progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}")
|
158 |
+
|
159 |
+
for batch in progress_bar:
|
160 |
+
batch = {k: v.to(device) for k, v in batch.items()}
|
161 |
+
outputs = model(**batch)
|
162 |
+
loss = outputs.loss
|
163 |
+
loss.backward()
|
164 |
+
|
165 |
+
optimizer.step()
|
166 |
+
lr_scheduler.step()
|
167 |
+
optimizer.zero_grad()
|
168 |
+
|
169 |
+
total_loss += loss.item()
|
170 |
+
progress_bar.set_postfix(loss=loss.item())
|
171 |
+
|
172 |
+
avg_loss = total_loss / len(train_dataloader)
|
173 |
+
print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}")
|
174 |
+
|
175 |
+
rougeL = evaluate_cus(model, val_dataloader, tokenizer)
|
176 |
+
results_history.append({
|
177 |
+
"epoch": epoch + 1,
|
178 |
+
"loss": avg_loss,
|
179 |
+
"rougeL": rougeL
|
180 |
+
})
|
181 |
+
if rougeL > best_rougeL:
|
182 |
+
best_rougeL = rougeL
|
183 |
+
print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...")
|
184 |
+
model.save_pretrained("best_model")
|
185 |
+
tokenizer.save_pretrained("best_model")
|
186 |
+
with open("training_log.json", "w", encoding="utf-8") as f:
|
187 |
+
json.dump(results_history, f, indent=4, ensure_ascii=False)
|
188 |
+
if args.do_eval:
|
189 |
+
print("--- Evaluation on test set ---")
|
190 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
191 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device)
|
192 |
+
tokenizer = AutoTokenizer.from_pretrained("best_model")
|
193 |
+
evaluate_cus(model, val_dataloader, tokenizer)
|
PhoBart_new_sapo_summaiation.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re, os, sys
|
4 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
5 |
+
import argparse
|
6 |
+
import json
|
7 |
+
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from datasets import Dataset
|
10 |
+
import torch
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torch.optim import AdamW
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
14 |
+
from tqdm import tqdm
|
15 |
+
from transformers import get_scheduler
|
16 |
+
from evaluate import load
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--learning_rate",
|
21 |
+
default=5e-5,
|
22 |
+
type=float)
|
23 |
+
|
24 |
+
parser.add_argument("--batch_size",
|
25 |
+
default=32,
|
26 |
+
type=int)
|
27 |
+
|
28 |
+
parser.add_argument("--max_length_body",
|
29 |
+
default=512,
|
30 |
+
type=int)
|
31 |
+
|
32 |
+
parser.add_argument("--max_length_summ",
|
33 |
+
default=64,
|
34 |
+
type=int)
|
35 |
+
|
36 |
+
parser.add_argument("--num_epochs",
|
37 |
+
default=512,
|
38 |
+
type=int)
|
39 |
+
|
40 |
+
parser.add_argument("--do_train",
|
41 |
+
action='store_true',
|
42 |
+
help="Whether to run training.")
|
43 |
+
|
44 |
+
parser.add_argument("--do_eval",
|
45 |
+
action='store_true',
|
46 |
+
help="Whether to run eval on the test set.")
|
47 |
+
|
48 |
+
parser.add_argument("--debug",
|
49 |
+
action='store_true',
|
50 |
+
help="debug")
|
51 |
+
|
52 |
+
|
53 |
+
args = parser.parse_args()
|
54 |
+
|
55 |
+
print("--- Chuẩn bị dataset... ---")
|
56 |
+
df = pd.read_parquet('news_sapo_processed.parquet')
|
57 |
+
if args.debug:
|
58 |
+
df = df[0:1000].reset_index(drop=True)
|
59 |
+
|
60 |
+
train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42)
|
61 |
+
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
|
62 |
+
|
63 |
+
train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename(
|
64 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
65 |
+
val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename(
|
66 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
67 |
+
test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename(
|
68 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
69 |
+
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("vinai/phobart-base")
|
71 |
+
|
72 |
+
def preprocess_function(examples):
|
73 |
+
inputs = tokenizer(
|
74 |
+
examples["text"],
|
75 |
+
max_length=args.max_length_body,
|
76 |
+
padding="max_length",
|
77 |
+
truncation=True
|
78 |
+
)
|
79 |
+
|
80 |
+
targets = tokenizer(
|
81 |
+
examples["summary"],
|
82 |
+
max_length=args.max_length_summ,
|
83 |
+
padding="max_length",
|
84 |
+
truncation=True
|
85 |
+
)
|
86 |
+
|
87 |
+
inputs["labels"] = [
|
88 |
+
[(label if label != tokenizer.pad_token_id else -100) for label in labels]
|
89 |
+
for labels in targets["input_ids"]
|
90 |
+
]
|
91 |
+
|
92 |
+
return inputs
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
tokenized_train = train_dataset.map(preprocess_function, batched=True)
|
97 |
+
tokenized_val = val_dataset.map(preprocess_function, batched=True)
|
98 |
+
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
99 |
+
|
100 |
+
tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
101 |
+
tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
102 |
+
tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
103 |
+
|
104 |
+
train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True)
|
105 |
+
val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size)
|
106 |
+
test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size)
|
107 |
+
|
108 |
+
for batch in train_dataloader:
|
109 |
+
print({k: v.shape for k, v in batch.items()})
|
110 |
+
break
|
111 |
+
|
112 |
+
rouge = load("rouge")
|
113 |
+
|
114 |
+
def evaluate_cus(model, dataloader, tokenizer):
|
115 |
+
model.eval()
|
116 |
+
preds = []
|
117 |
+
labels = []
|
118 |
+
|
119 |
+
with torch.no_grad():
|
120 |
+
for batch in tqdm(dataloader, desc="Evaluating"):
|
121 |
+
input_ids = batch['input_ids'].to(device)
|
122 |
+
attention_mask = batch['attention_mask'].to(device)
|
123 |
+
|
124 |
+
generated_tokens = model.generate(
|
125 |
+
input_ids=input_ids,
|
126 |
+
attention_mask=attention_mask,
|
127 |
+
max_length=args.max_length_summ
|
128 |
+
)
|
129 |
+
|
130 |
+
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
131 |
+
decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True)
|
132 |
+
|
133 |
+
preds.extend(decoded_preds)
|
134 |
+
labels.extend(decoded_labels)
|
135 |
+
|
136 |
+
result = rouge.compute(predictions=preds, references=labels, use_stemmer=True)
|
137 |
+
rouge1 = result["rouge1"]
|
138 |
+
rouge2 = result["rouge2"]
|
139 |
+
rougeL = result["rougeL"]
|
140 |
+
|
141 |
+
print(f"ROUGE-1: {rouge1:.4f}")
|
142 |
+
print(f"ROUGE-2: {rouge2:.4f}")
|
143 |
+
print(f"ROUGE-L: {rougeL:.4f}")
|
144 |
+
|
145 |
+
return rougeL
|
146 |
+
if args.do_train:
|
147 |
+
print("--- Training ---")
|
148 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
149 |
+
|
150 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("vinai/phobart-base").to(device)
|
151 |
+
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
|
152 |
+
|
153 |
+
best_rougeL = 0
|
154 |
+
num_training_steps = len(train_dataloader) * args.num_epochs
|
155 |
+
lr_scheduler = get_scheduler(
|
156 |
+
"linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
|
157 |
+
)
|
158 |
+
results_history = []
|
159 |
+
for epoch in range(args.num_epochs):
|
160 |
+
model.train()
|
161 |
+
total_loss = 0
|
162 |
+
progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}")
|
163 |
+
|
164 |
+
for batch in progress_bar:
|
165 |
+
batch = {k: v.to(device) for k, v in batch.items()}
|
166 |
+
outputs = model(**batch)
|
167 |
+
loss = outputs.loss
|
168 |
+
loss.backward()
|
169 |
+
|
170 |
+
optimizer.step()
|
171 |
+
lr_scheduler.step()
|
172 |
+
optimizer.zero_grad()
|
173 |
+
|
174 |
+
total_loss += loss.item()
|
175 |
+
progress_bar.set_postfix(loss=loss.item())
|
176 |
+
|
177 |
+
avg_loss = total_loss / len(train_dataloader)
|
178 |
+
print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}")
|
179 |
+
|
180 |
+
rougeL = evaluate_cus(model, val_dataloader, tokenizer)
|
181 |
+
results_history.append({
|
182 |
+
"epoch": epoch + 1,
|
183 |
+
"loss": avg_loss,
|
184 |
+
"rougeL": rougeL
|
185 |
+
})
|
186 |
+
if rougeL > best_rougeL:
|
187 |
+
best_rougeL = rougeL
|
188 |
+
print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...")
|
189 |
+
model.save_pretrained("best_model")
|
190 |
+
tokenizer.save_pretrained("best_model")
|
191 |
+
with open("training_log.json", "w", encoding="utf-8") as f:
|
192 |
+
json.dump(results_history, f, indent=4, ensure_ascii=False)
|
193 |
+
if args.do_eval:
|
194 |
+
print("--- Evaluation on test set ---")
|
195 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
196 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device)
|
197 |
+
tokenizer = AutoTokenizer.from_pretrained("best_model")
|
198 |
+
evaluate_cus(model, val_dataloader, tokenizer)
|
ViT5_news_sapo_summarization.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re, os, sys
|
4 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
5 |
+
import argparse
|
6 |
+
import json
|
7 |
+
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from datasets import Dataset
|
10 |
+
import torch
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torch.optim import AdamW
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
14 |
+
from tqdm import tqdm
|
15 |
+
from transformers import get_scheduler
|
16 |
+
from evaluate import load
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--learning_rate",
|
21 |
+
default=5e-5,
|
22 |
+
type=float)
|
23 |
+
|
24 |
+
parser.add_argument("--batch_size",
|
25 |
+
default=32,
|
26 |
+
type=int)
|
27 |
+
|
28 |
+
parser.add_argument("--max_length_body",
|
29 |
+
default=512,
|
30 |
+
type=int)
|
31 |
+
|
32 |
+
parser.add_argument("--max_length_summ",
|
33 |
+
default=64,
|
34 |
+
type=int)
|
35 |
+
|
36 |
+
parser.add_argument("--num_epochs",
|
37 |
+
default=512,
|
38 |
+
type=int)
|
39 |
+
|
40 |
+
parser.add_argument("--do_train",
|
41 |
+
action='store_true',
|
42 |
+
help="Whether to run training.")
|
43 |
+
|
44 |
+
parser.add_argument("--do_eval",
|
45 |
+
action='store_true',
|
46 |
+
help="Whether to run eval on the test set.")
|
47 |
+
|
48 |
+
parser.add_argument("--debug",
|
49 |
+
action='store_true',
|
50 |
+
help="debug")
|
51 |
+
|
52 |
+
|
53 |
+
args = parser.parse_args()
|
54 |
+
|
55 |
+
print("--- Chuẩn bị dataset... ---")
|
56 |
+
df = pd.read_parquet('news_sapo_processed.parquet')
|
57 |
+
if args.debug:
|
58 |
+
df = df[0:1000].reset_index(drop=True)
|
59 |
+
|
60 |
+
train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42)
|
61 |
+
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
|
62 |
+
|
63 |
+
train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename(
|
64 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
65 |
+
val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename(
|
66 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
67 |
+
test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename(
|
68 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
69 |
+
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base")
|
71 |
+
|
72 |
+
def preprocess_function(examples):
|
73 |
+
model_inputs = tokenizer(
|
74 |
+
examples["text"],
|
75 |
+
max_length=args.max_length_body,
|
76 |
+
padding="max_length",
|
77 |
+
truncation=True
|
78 |
+
)
|
79 |
+
|
80 |
+
with tokenizer.as_target_tokenizer():
|
81 |
+
labels = tokenizer(
|
82 |
+
examples["summary"],
|
83 |
+
max_length=args.max_length_summ,
|
84 |
+
padding="max_length",
|
85 |
+
truncation=True
|
86 |
+
)
|
87 |
+
|
88 |
+
model_inputs["labels"] = labels["input_ids"]
|
89 |
+
return model_inputs
|
90 |
+
|
91 |
+
tokenized_train = train_dataset.map(preprocess_function, batched=True)
|
92 |
+
tokenized_val = val_dataset.map(preprocess_function, batched=True)
|
93 |
+
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
94 |
+
|
95 |
+
tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
96 |
+
tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
97 |
+
tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
98 |
+
|
99 |
+
train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True)
|
100 |
+
val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size)
|
101 |
+
test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size)
|
102 |
+
|
103 |
+
for batch in train_dataloader:
|
104 |
+
print({k: v.shape for k, v in batch.items()})
|
105 |
+
break
|
106 |
+
|
107 |
+
rouge = load("rouge")
|
108 |
+
|
109 |
+
def evaluate_cus(model, dataloader, tokenizer):
|
110 |
+
model.eval()
|
111 |
+
preds = []
|
112 |
+
labels = []
|
113 |
+
|
114 |
+
with torch.no_grad():
|
115 |
+
for batch in tqdm(dataloader, desc="Evaluating"):
|
116 |
+
input_ids = batch['input_ids'].to(device)
|
117 |
+
attention_mask = batch['attention_mask'].to(device)
|
118 |
+
|
119 |
+
generated_tokens = model.generate(
|
120 |
+
input_ids=input_ids,
|
121 |
+
attention_mask=attention_mask,
|
122 |
+
max_length=args.max_length_summ
|
123 |
+
)
|
124 |
+
|
125 |
+
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
126 |
+
decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True)
|
127 |
+
|
128 |
+
preds.extend(decoded_preds)
|
129 |
+
labels.extend(decoded_labels)
|
130 |
+
|
131 |
+
result = rouge.compute(predictions=preds, references=labels, use_stemmer=True)
|
132 |
+
rouge1 = result["rouge1"]
|
133 |
+
rouge2 = result["rouge2"]
|
134 |
+
rougeL = result["rougeL"]
|
135 |
+
|
136 |
+
print(f"ROUGE-1: {rouge1:.4f}")
|
137 |
+
print(f"ROUGE-2: {rouge2:.4f}")
|
138 |
+
print(f"ROUGE-L: {rougeL:.4f}")
|
139 |
+
|
140 |
+
return rougeL
|
141 |
+
if args.do_train:
|
142 |
+
print("--- Training ---")
|
143 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
+
|
145 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base").to(device)
|
146 |
+
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
|
147 |
+
|
148 |
+
best_rougeL = 0
|
149 |
+
num_training_steps = len(train_dataloader) * args.num_epochs
|
150 |
+
lr_scheduler = get_scheduler(
|
151 |
+
"linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
|
152 |
+
)
|
153 |
+
results_history = []
|
154 |
+
for epoch in range(args.num_epochs):
|
155 |
+
model.train()
|
156 |
+
total_loss = 0
|
157 |
+
progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}")
|
158 |
+
|
159 |
+
for batch in progress_bar:
|
160 |
+
batch = {k: v.to(device) for k, v in batch.items()}
|
161 |
+
outputs = model(**batch)
|
162 |
+
loss = outputs.loss
|
163 |
+
loss.backward()
|
164 |
+
|
165 |
+
optimizer.step()
|
166 |
+
lr_scheduler.step()
|
167 |
+
optimizer.zero_grad()
|
168 |
+
|
169 |
+
total_loss += loss.item()
|
170 |
+
progress_bar.set_postfix(loss=loss.item())
|
171 |
+
|
172 |
+
avg_loss = total_loss / len(train_dataloader)
|
173 |
+
print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}")
|
174 |
+
|
175 |
+
rougeL = evaluate_cus(model, val_dataloader, tokenizer)
|
176 |
+
results_history.append({
|
177 |
+
"epoch": epoch + 1,
|
178 |
+
"loss": avg_loss,
|
179 |
+
"rougeL": rougeL
|
180 |
+
})
|
181 |
+
if rougeL > best_rougeL:
|
182 |
+
best_rougeL = rougeL
|
183 |
+
print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...")
|
184 |
+
model.save_pretrained("best_model")
|
185 |
+
tokenizer.save_pretrained("best_model")
|
186 |
+
with open("training_log.json", "w", encoding="utf-8") as f:
|
187 |
+
json.dump(results_history, f, indent=4, ensure_ascii=False)
|
188 |
+
if args.do_eval:
|
189 |
+
print("--- Evaluation on test set ---")
|
190 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
191 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device)
|
192 |
+
tokenizer = AutoTokenizer.from_pretrained("best_model")
|
193 |
+
evaluate_cus(model, val_dataloader, tokenizer)
|
mt5_news_sapo_summarization.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re, os, sys
|
4 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
5 |
+
import argparse
|
6 |
+
import json
|
7 |
+
|
8 |
+
from sklearn.model_selection import train_test_split
|
9 |
+
from datasets import Dataset
|
10 |
+
import torch
|
11 |
+
from torch.utils.data import DataLoader
|
12 |
+
from torch.optim import AdamW
|
13 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
14 |
+
from tqdm import tqdm
|
15 |
+
from transformers import get_scheduler
|
16 |
+
from evaluate import load
|
17 |
+
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--learning_rate",
|
21 |
+
default=5e-5,
|
22 |
+
type=float)
|
23 |
+
|
24 |
+
parser.add_argument("--batch_size",
|
25 |
+
default=32,
|
26 |
+
type=int)
|
27 |
+
|
28 |
+
parser.add_argument("--max_length_body",
|
29 |
+
default=512,
|
30 |
+
type=int)
|
31 |
+
|
32 |
+
parser.add_argument("--max_length_summ",
|
33 |
+
default=64,
|
34 |
+
type=int)
|
35 |
+
|
36 |
+
parser.add_argument("--num_epochs",
|
37 |
+
default=512,
|
38 |
+
type=int)
|
39 |
+
|
40 |
+
parser.add_argument("--do_train",
|
41 |
+
action='store_true',
|
42 |
+
help="Whether to run training.")
|
43 |
+
|
44 |
+
parser.add_argument("--do_eval",
|
45 |
+
action='store_true',
|
46 |
+
help="Whether to run eval on the test set.")
|
47 |
+
|
48 |
+
parser.add_argument("--debug",
|
49 |
+
action='store_true',
|
50 |
+
help="debug")
|
51 |
+
|
52 |
+
|
53 |
+
args = parser.parse_args()
|
54 |
+
|
55 |
+
print("--- Chuẩn bị dataset... ---")
|
56 |
+
df = pd.read_parquet('news_sapo_processed.parquet')
|
57 |
+
if args.debug:
|
58 |
+
df = df[0:1000].reset_index(drop=True)
|
59 |
+
|
60 |
+
train_df, temp_df = train_test_split(df, test_size=0.0042, random_state=42)
|
61 |
+
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
|
62 |
+
|
63 |
+
train_dataset = Dataset.from_pandas(train_df[["clean_body", "clean_summary"]].rename(
|
64 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
65 |
+
val_dataset = Dataset.from_pandas(val_df[["clean_body", "clean_summary"]].rename(
|
66 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
67 |
+
test_dataset = Dataset.from_pandas(test_df[["clean_body", "clean_summary"]].rename(
|
68 |
+
columns={"clean_body": "text", "clean_summary": "summary"}))
|
69 |
+
|
70 |
+
tokenizer = AutoTokenizer.from_pretrained("google/mt5-base")
|
71 |
+
|
72 |
+
def preprocess_function(examples):
|
73 |
+
model_inputs = tokenizer(
|
74 |
+
examples["text"],
|
75 |
+
max_length=args.max_length_body,
|
76 |
+
padding="max_length",
|
77 |
+
truncation=True
|
78 |
+
)
|
79 |
+
|
80 |
+
with tokenizer.as_target_tokenizer():
|
81 |
+
labels = tokenizer(
|
82 |
+
examples["summary"],
|
83 |
+
max_length=args.max_length_summ,
|
84 |
+
padding="max_length",
|
85 |
+
truncation=True
|
86 |
+
)
|
87 |
+
|
88 |
+
model_inputs["labels"] = labels["input_ids"]
|
89 |
+
return model_inputs
|
90 |
+
|
91 |
+
tokenized_train = train_dataset.map(preprocess_function, batched=True)
|
92 |
+
tokenized_val = val_dataset.map(preprocess_function, batched=True)
|
93 |
+
tokenized_test = test_dataset.map(preprocess_function, batched=True)
|
94 |
+
|
95 |
+
tokenized_train.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
96 |
+
tokenized_val.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
97 |
+
tokenized_test.set_format(type='torch', columns=['input_ids', 'attention_mask', 'labels'])
|
98 |
+
|
99 |
+
train_dataloader = DataLoader(tokenized_train, batch_size=args.batch_size, shuffle=True)
|
100 |
+
val_dataloader = DataLoader(tokenized_val, batch_size=args.batch_size)
|
101 |
+
test_dataloader = DataLoader(tokenized_test, batch_size=args.batch_size)
|
102 |
+
|
103 |
+
for batch in train_dataloader:
|
104 |
+
print({k: v.shape for k, v in batch.items()})
|
105 |
+
break
|
106 |
+
|
107 |
+
rouge = load("rouge")
|
108 |
+
|
109 |
+
def evaluate_cus(model, dataloader, tokenizer):
|
110 |
+
model.eval()
|
111 |
+
preds = []
|
112 |
+
labels = []
|
113 |
+
|
114 |
+
with torch.no_grad():
|
115 |
+
for batch in tqdm(dataloader, desc="Evaluating"):
|
116 |
+
input_ids = batch['input_ids'].to(device)
|
117 |
+
attention_mask = batch['attention_mask'].to(device)
|
118 |
+
|
119 |
+
generated_tokens = model.generate(
|
120 |
+
input_ids=input_ids,
|
121 |
+
attention_mask=attention_mask,
|
122 |
+
max_length=args.max_length_summ
|
123 |
+
)
|
124 |
+
|
125 |
+
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
126 |
+
decoded_labels = tokenizer.batch_decode(batch['labels'].to(device), skip_special_tokens=True)
|
127 |
+
|
128 |
+
preds.extend(decoded_preds)
|
129 |
+
labels.extend(decoded_labels)
|
130 |
+
|
131 |
+
result = rouge.compute(predictions=preds, references=labels, use_stemmer=True)
|
132 |
+
rouge1 = result["rouge1"]
|
133 |
+
rouge2 = result["rouge2"]
|
134 |
+
rougeL = result["rougeL"]
|
135 |
+
|
136 |
+
print(f"ROUGE-1: {rouge1:.4f}")
|
137 |
+
print(f"ROUGE-2: {rouge2:.4f}")
|
138 |
+
print(f"ROUGE-L: {rougeL:.4f}")
|
139 |
+
|
140 |
+
return rougeL
|
141 |
+
if args.do_train:
|
142 |
+
print("--- Training ---")
|
143 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
+
|
145 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("google/mt5-base").to(device)
|
146 |
+
optimizer = AdamW(model.parameters(), lr=args.learning_rate)
|
147 |
+
|
148 |
+
best_rougeL = 0
|
149 |
+
num_training_steps = len(train_dataloader) * args.num_epochs
|
150 |
+
lr_scheduler = get_scheduler(
|
151 |
+
"linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps
|
152 |
+
)
|
153 |
+
results_history = []
|
154 |
+
for epoch in range(args.num_epochs):
|
155 |
+
model.train()
|
156 |
+
total_loss = 0
|
157 |
+
progress_bar = tqdm(train_dataloader, desc=f"Epoch {epoch+1}")
|
158 |
+
|
159 |
+
for batch in progress_bar:
|
160 |
+
batch = {k: v.to(device) for k, v in batch.items()}
|
161 |
+
outputs = model(**batch)
|
162 |
+
loss = outputs.loss
|
163 |
+
loss.backward()
|
164 |
+
|
165 |
+
optimizer.step()
|
166 |
+
lr_scheduler.step()
|
167 |
+
optimizer.zero_grad()
|
168 |
+
|
169 |
+
total_loss += loss.item()
|
170 |
+
progress_bar.set_postfix(loss=loss.item())
|
171 |
+
|
172 |
+
avg_loss = total_loss / len(train_dataloader)
|
173 |
+
print(f">>> Avg loss for epoch {epoch+1}: {avg_loss:.4f}")
|
174 |
+
|
175 |
+
rougeL = evaluate_cus(model, val_dataloader, tokenizer)
|
176 |
+
results_history.append({
|
177 |
+
"epoch": epoch + 1,
|
178 |
+
"loss": avg_loss,
|
179 |
+
"rougeL": rougeL
|
180 |
+
})
|
181 |
+
if rougeL > best_rougeL:
|
182 |
+
best_rougeL = rougeL
|
183 |
+
print(f">>> New best ROUGE-L: {best_rougeL:.4f}. Saving model...")
|
184 |
+
model.save_pretrained("best_model")
|
185 |
+
tokenizer.save_pretrained("best_model")
|
186 |
+
with open("training_log.json", "w", encoding="utf-8") as f:
|
187 |
+
json.dump(results_history, f, indent=4, ensure_ascii=False)
|
188 |
+
if args.do_eval:
|
189 |
+
print("--- Evaluation on test set ---")
|
190 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
191 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("best_model").to(device)
|
192 |
+
tokenizer = AutoTokenizer.from_pretrained("best_model")
|
193 |
+
evaluate_cus(model, val_dataloader, tokenizer)
|