fix random splits names
Browse files- ppb_affinity.py +7 -12
ppb_affinity.py
CHANGED
@@ -39,23 +39,19 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
39 |
]
|
40 |
elif self.config.name == "filtered_random":
|
41 |
filepath = dl_manager.download_and_extract("filtered.csv")
|
42 |
-
# Read all rows to determine splits
|
43 |
with open(filepath, encoding="utf-8") as f:
|
44 |
reader = csv.DictReader(f)
|
45 |
rows = list(reader)
|
46 |
n_total = len(rows)
|
47 |
-
# Generate shuffled indices with fixed seed
|
48 |
indices = list(range(n_total))
|
49 |
-
rng = random.Random(42)
|
50 |
rng.shuffle(indices)
|
51 |
-
# Calculate split sizes
|
52 |
n_train = int(0.8 * n_total)
|
53 |
n_val = int(0.1 * n_total)
|
54 |
-
n_test = n_total - n_train - n_val
|
55 |
-
# Split indices into ranges
|
56 |
return [
|
57 |
datasets.SplitGenerator(
|
58 |
-
name=datasets.
|
59 |
gen_kwargs={
|
60 |
"filepath": filepath,
|
61 |
"shuffled_indices": indices,
|
@@ -64,7 +60,7 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
64 |
},
|
65 |
),
|
66 |
datasets.SplitGenerator(
|
67 |
-
name=datasets.
|
68 |
gen_kwargs={
|
69 |
"filepath": filepath,
|
70 |
"shuffled_indices": indices,
|
@@ -73,7 +69,7 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
73 |
},
|
74 |
),
|
75 |
datasets.SplitGenerator(
|
76 |
-
name=datasets.
|
77 |
gen_kwargs={
|
78 |
"filepath": filepath,
|
79 |
"shuffled_indices": indices,
|
@@ -96,9 +92,8 @@ class ppb_affinity(datasets.GeneratorBasedBuilder):
|
|
96 |
del row["split"]
|
97 |
yield idx, row
|
98 |
elif self.config.name == "filtered_random":
|
99 |
-
# Iterate over the range [split_start, split_end) in shuffled_indices
|
100 |
for global_idx in range(split_start, split_end):
|
101 |
original_idx = shuffled_indices[global_idx]
|
102 |
row = rows[original_idx]
|
103 |
-
del row["split"]
|
104 |
-
yield global_idx, row
|
|
|
39 |
]
|
40 |
elif self.config.name == "filtered_random":
|
41 |
filepath = dl_manager.download_and_extract("filtered.csv")
|
|
|
42 |
with open(filepath, encoding="utf-8") as f:
|
43 |
reader = csv.DictReader(f)
|
44 |
rows = list(reader)
|
45 |
n_total = len(rows)
|
|
|
46 |
indices = list(range(n_total))
|
47 |
+
rng = random.Random(42)
|
48 |
rng.shuffle(indices)
|
|
|
49 |
n_train = int(0.8 * n_total)
|
50 |
n_val = int(0.1 * n_total)
|
51 |
+
n_test = n_total - n_train - n_val
|
|
|
52 |
return [
|
53 |
datasets.SplitGenerator(
|
54 |
+
name=datasets.Split.TRAIN,
|
55 |
gen_kwargs={
|
56 |
"filepath": filepath,
|
57 |
"shuffled_indices": indices,
|
|
|
60 |
},
|
61 |
),
|
62 |
datasets.SplitGenerator(
|
63 |
+
name=datasets.Split.VALIDATION,
|
64 |
gen_kwargs={
|
65 |
"filepath": filepath,
|
66 |
"shuffled_indices": indices,
|
|
|
69 |
},
|
70 |
),
|
71 |
datasets.SplitGenerator(
|
72 |
+
name=datasets.Split.TEST,
|
73 |
gen_kwargs={
|
74 |
"filepath": filepath,
|
75 |
"shuffled_indices": indices,
|
|
|
92 |
del row["split"]
|
93 |
yield idx, row
|
94 |
elif self.config.name == "filtered_random":
|
|
|
95 |
for global_idx in range(split_start, split_end):
|
96 |
original_idx = shuffled_indices[global_idx]
|
97 |
row = rows[original_idx]
|
98 |
+
del row["split"]
|
99 |
+
yield global_idx, row
|