Racoci commited on
Commit
1038c33
Β·
1 Parent(s): 53160cb

Update README with accrurate tests of the dataset

Browse files
Files changed (6) hide show
  1. README.md +2 -1
  2. data-final.csv +0 -3
  3. ipip_ffm.py +0 -49
  4. prepare_parket_files.py +38 -0
  5. tests.ipynb +117 -0
  6. upload_dataset.py +27 -0
README.md CHANGED
@@ -63,5 +63,6 @@ To load the dataset:
63
  ```python
64
  from datasets import load_dataset
65
 
66
- dataset = load_dataset("ipip_ffm", split="train")
 
67
  ```
 
63
  ```python
64
  from datasets import load_dataset
65
 
66
+ dataset = load_dataset("Tetratics/2018-11-08-OpenPsychometrics-IPIP-FFM")
67
+ df = pd.DataFrame(dataset["train"])
68
  ```
data-final.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfbd5253f3f21f0569b34f2d1f47fbb71f5324ed26c3debbe29e84d42ce6d563
3
- size 416273844
 
 
 
 
ipip_ffm.py DELETED
@@ -1,49 +0,0 @@
1
- from datasets import DatasetBuilder, DatasetInfo, SplitGenerator, Split, Value, Features
2
- import pandas as pd
3
- import os
4
-
5
- class IPIPFFMDataset(DatasetBuilder):
6
- def _info(self):
7
- return DatasetInfo(
8
- description="IPIP-FFM dataset with personality traits and metadata.",
9
- features=Features({
10
- "EXT1": Value("int32"),
11
- "EXT2": Value("int32"),
12
- "EXT3": Value("int32"),
13
- # Adicione todos os outros itens aqui
14
- "dateload": Value("string"),
15
- "screenw": Value("int32"),
16
- "screenh": Value("int32"),
17
- "introelapse": Value("int32"),
18
- "testelapse": Value("int32"),
19
- "endelapse": Value("int32"),
20
- "IPC": Value("int32"),
21
- "country": Value("string"),
22
- "lat_appx_lots_of_err": Value("float32"),
23
- "long_appx_lots_of_err": Value("float32"),
24
- }),
25
- homepage="https://openpsychometrics.org/_rawdata/",
26
- citation="@misc{openpsychometrics, author = {OpenPsychometrics}, title = {IPIP-FFM Dataset}, year = {2018}, url = {https://openpsychometrics.org/_rawdata/}}",
27
- )
28
-
29
- def _split_generators(self, dl_manager):
30
- return [
31
- SplitGenerator(
32
- name=Split.TRAIN,
33
- gen_kwargs={"filepath": "data-final.csv"}
34
- ),
35
- ]
36
-
37
- def _generate_examples(self, filepath):
38
- df = pd.read_csv(filepath)
39
- for i, row in df.iterrows():
40
- yield i, row.to_dict()
41
-
42
- def _prepare_split(self, split_generator, **kwargs):
43
- # Preparar os dados para o split
44
- filepath = split_generator.gen_kwargs["filepath"]
45
- df = pd.read_csv(filepath)
46
- # Aqui vocΓͺ pode adicionar qualquer prΓ©-processamento necessΓ‘rio
47
- # Por exemplo, dividir os dados em treino e validaΓ§Γ£o
48
- # Neste caso, estamos apenas retornando o dataframe como estΓ‘
49
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prepare_parket_files.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from sklearn.model_selection import train_test_split
4
+ from datasets import Dataset, DatasetDict
5
+ import pyarrow as pa
6
+ import pyarrow.parquet as pq
7
+
8
+ # Define the directory to save Parquet files
9
+ parquet_dir = "./dataset_parquet"
10
+
11
+ # Create the directory if it doesn't exist
12
+ os.makedirs(parquet_dir, exist_ok=True)
13
+
14
+ # Load your CSV file into a pandas DataFrame
15
+ df = pd.read_csv("data-final.csv", delimiter='\t')
16
+
17
+ # Split the DataFrame into train, validation, and test sets
18
+ train_df, temp_df = train_test_split(df, test_size=0.4, random_state=42)
19
+ val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)
20
+
21
+ # Convert the pandas DataFrames to Hugging Face Datasets
22
+ train_dataset = Dataset.from_pandas(train_df)
23
+ val_dataset = Dataset.from_pandas(val_df)
24
+ test_dataset = Dataset.from_pandas(test_df)
25
+
26
+ # Create a DatasetDict
27
+ dataset_dict = DatasetDict({
28
+ "train": train_dataset,
29
+ "validation": val_dataset,
30
+ "test": test_dataset
31
+ })
32
+
33
+ # Convert each split to Parquet format and save
34
+ for split_name, dataset in dataset_dict.items():
35
+ table = pa.Table.from_pandas(dataset.to_pandas())
36
+ pq.write_table(table, os.path.join(parquet_dir, f"{split_name}.parquet"))
37
+
38
+ print("Dataset splits saved as Parquet files.")
tests.ipynb ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "!pip install -U datasets huggingface_hub fsspec"
21
+ ],
22
+ "metadata": {
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "id": "OSWN0xQn6z8u",
27
+ "outputId": "bb05c540-e196-49c3-cf7a-d2f4942abe52"
28
+ },
29
+ "execution_count": null,
30
+ "outputs": [
31
+ {
32
+ "output_type": "stream",
33
+ "name": "stdout",
34
+ "text": [
35
+ "Requirement already satisfied: datasets in /usr/local/lib/python3.11/dist-packages (2.14.4)\n",
36
+ "Collecting datasets\n",
37
+ " Downloading datasets-3.6.0-py3-none-any.whl.metadata (19 kB)\n",
38
+ "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.11/dist-packages (0.32.4)\n",
39
+ "Collecting huggingface_hub\n",
40
+ " Downloading huggingface_hub-0.33.0-py3-none-any.whl.metadata (14 kB)\n",
41
+ "Requirement already satisfied: fsspec in /usr/local/lib/python3.11/dist-packages (2025.3.2)\n",
42
+ "Collecting fsspec\n",
43
+ " Downloading fsspec-2025.5.1-py3-none-any.whl.metadata (11 kB)\n",
44
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from datasets) (3.18.0)\n",
45
+ "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.11/dist-packages (from datasets) (2.0.2)\n",
46
+ "Requirement already satisfied: pyarrow>=15.0.0 in /usr/local/lib/python3.11/dist-packages (from datasets) (18.1.0)\n",
47
+ "Requirement already satisfied: dill<0.3.9,>=0.3.0 in /usr/local/lib/python3.11/dist-packages (from datasets) (0.3.7)\n",
48
+ "Requirement already satisfied: pandas in /usr/local/lib/python3.11/dist-packages (from datasets) (2.2.2)\n",
49
+ "Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.11/dist-packages (from datasets) (2.32.3)\n",
50
+ "Requirement already satisfied: tqdm>=4.66.3 in /usr/local/lib/python3.11/dist-packages (from datasets) (4.67.1)\n",
51
+ "Requirement already satisfied: xxhash in /usr/local/lib/python3.11/dist-packages (from datasets) (3.5.0)\n",
52
+ "Requirement already satisfied: multiprocess<0.70.17 in /usr/local/lib/python3.11/dist-packages (from datasets) (0.70.15)\n",
53
+ " Downloading fsspec-2025.3.0-py3-none-any.whl.metadata (11 kB)\n",
54
+ "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from datasets) (24.2)\n",
55
+ "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from datasets) (6.0.2)\n",
56
+ "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface_hub) (4.14.0)\n",
57
+ "Requirement already satisfied: hf-xet<2.0.0,>=1.1.2 in /usr/local/lib/python3.11/dist-packages (from huggingface_hub) (1.1.2)\n",
58
+ "Requirement already satisfied: aiohttp!=4.0.0a0,!=4.0.0a1 in /usr/local/lib/python3.11/dist-packages (from fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (3.11.15)\n",
59
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests>=2.32.2->datasets) (3.4.2)\n",
60
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests>=2.32.2->datasets) (3.10)\n",
61
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests>=2.32.2->datasets) (2.4.0)\n",
62
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests>=2.32.2->datasets) (2025.4.26)\n",
63
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas->datasets) (2.9.0.post0)\n",
64
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas->datasets) (2025.2)\n",
65
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas->datasets) (2025.2)\n",
66
+ "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (2.6.1)\n",
67
+ "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (1.3.2)\n",
68
+ "Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (25.3.0)\n",
69
+ "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (1.6.0)\n",
70
+ "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (6.4.4)\n",
71
+ "Requirement already satisfied: propcache>=0.2.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (0.3.1)\n",
72
+ "Requirement already satisfied: yarl<2.0,>=1.17.0 in /usr/local/lib/python3.11/dist-packages (from aiohttp!=4.0.0a0,!=4.0.0a1->fsspec[http]<=2025.3.0,>=2023.1.0->datasets) (1.20.0)\n",
73
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.8.2->pandas->datasets) (1.17.0)\n",
74
+ "Downloading datasets-3.6.0-py3-none-any.whl (491 kB)\n",
75
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m491.5/491.5 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
76
+ "\u001b[?25hDownloading huggingface_hub-0.33.0-py3-none-any.whl (514 kB)\n",
77
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m514.8/514.8 kB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
78
+ "\u001b[?25hDownloading fsspec-2025.3.0-py3-none-any.whl (193 kB)\n",
79
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m193.6/193.6 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
80
+ "\u001b[?25h"
81
+ ]
82
+ }
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "markdown",
87
+ "source": [
88
+ "### πŸ“₯ Step 1: Preprocess the Data\n",
89
+ "\n",
90
+ "Since the dataset is hosted on Hugging Face, you can load it directly using the `load_dataset` function."
91
+ ],
92
+ "metadata": {
93
+ "id": "edF1DuNE6nSg"
94
+ }
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "source": [
99
+ "import numpy as np\n",
100
+ "import pandas as pd\n",
101
+ "from datasets import load_dataset\n",
102
+ "\n",
103
+ "# Load the dataset\n",
104
+ "dataset = load_dataset(\"Tetratics/2018-11-08-OpenPsychometrics-IPIP-FFM\")\n",
105
+ "\n",
106
+ "# Convert to a pandas DataFrame for easier manipulation\n",
107
+ "df = pd.DataFrame(dataset[\"train\"])\n",
108
+ "df"
109
+ ],
110
+ "metadata": {
111
+ "id": "HmHDx6cU7WRG"
112
+ },
113
+ "execution_count": null,
114
+ "outputs": []
115
+ }
116
+ ]
117
+ }
upload_dataset.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from huggingface_hub import HfApi
3
+ import os
4
+
5
+ # Define the repository name
6
+ repo_name = "Tetratics/2018-11-08-OpenPsychometrics-IPIP-FFM"
7
+
8
+ # Initialize the Hugging Face API
9
+ api = HfApi()
10
+
11
+ # Create the repository on Hugging Face
12
+ api.create_repo(repo_id=repo_name, repo_type="dataset")
13
+
14
+ # Upload each Parquet file to the repository
15
+ for split_name in ["train", "validation", "test"]:
16
+ file_path = f"{parquet_dir}/{split_name}.parquet"
17
+ if os.path.exists(file_path):
18
+ api.upload_file(
19
+ path_or_fileobj=file_path,
20
+ path_in_repo=f"{split_name}.parquet",
21
+ repo_id=repo_name,
22
+ repo_type="dataset"
23
+ )
24
+ print(f"Uploaded {split_name}.parquet to {repo_name}")
25
+ else:
26
+ print(f"{split_name}.parquet not found. Skipping upload.")
27
+