|
import os |
|
import pandas as pd |
|
from sklearn.model_selection import train_test_split |
|
from datasets import Dataset, DatasetDict |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
|
|
|
|
parquet_dir = "./dataset_parquet" |
|
|
|
|
|
os.makedirs(parquet_dir, exist_ok=True) |
|
|
|
|
|
df = pd.read_csv("data-final.csv", delimiter='\t') |
|
|
|
|
|
train_df, temp_df = train_test_split(df, test_size=0.4, random_state=42) |
|
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42) |
|
|
|
|
|
train_dataset = Dataset.from_pandas(train_df) |
|
val_dataset = Dataset.from_pandas(val_df) |
|
test_dataset = Dataset.from_pandas(test_df) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
"train": train_dataset, |
|
"validation": val_dataset, |
|
"test": test_dataset |
|
}) |
|
|
|
|
|
for split_name, dataset in dataset_dict.items(): |
|
table = pa.Table.from_pandas(dataset.to_pandas()) |
|
pq.write_table(table, os.path.join(parquet_dir, f"{split_name}.parquet")) |
|
|
|
print("Dataset splits saved as Parquet files.") |
|
|