2018-11-08-OpenPsychometrics-IPIP-FFM / prepare_parket_files.py
Racoci's picture
Update README with accrurate tests of the dataset
1038c33
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from datasets import Dataset, DatasetDict
import pyarrow as pa
import pyarrow.parquet as pq
# Define the directory to save Parquet files
parquet_dir = "./dataset_parquet"
# Create the directory if it doesn't exist
os.makedirs(parquet_dir, exist_ok=True)
# Load your CSV file into a pandas DataFrame
df = pd.read_csv("data-final.csv", delimiter='\t')
# Split the DataFrame into train, validation, and test sets
train_df, temp_df = train_test_split(df, test_size=0.4, random_state=42)
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
# Convert the pandas DataFrames to Hugging Face Datasets
train_dataset = Dataset.from_pandas(train_df)
val_dataset = Dataset.from_pandas(val_df)
test_dataset = Dataset.from_pandas(test_df)
# Create a DatasetDict
dataset_dict = DatasetDict({
"train": train_dataset,
"validation": val_dataset,
"test": test_dataset
})
# Convert each split to Parquet format and save
for split_name, dataset in dataset_dict.items():
table = pa.Table.from_pandas(dataset.to_pandas())
pq.write_table(table, os.path.join(parquet_dir, f"{split_name}.parquet"))
print("Dataset splits saved as Parquet files.")