Datasets:
Delete dataset.py
Browse files- dataset.py +0 -136
dataset.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split, Features, Value, ClassLabel, Image, Sequence
|
2 |
-
import csv
|
3 |
-
import datasets
|
4 |
-
import ast
|
5 |
-
|
6 |
-
class CAFOSatConfig(datasets.BuilderConfig):
|
7 |
-
def __init__(self, split_column=None, data_filter=None, **kwargs):
|
8 |
-
super().__init__(**kwargs)
|
9 |
-
self.split_column = split_column # e.g., 'cafosat_set1_training_train'
|
10 |
-
self.data_filter = data_filter # e.g., 'verified', 'augmented', 'neg', 'cafo'
|
11 |
-
|
12 |
-
class CAFOSat(datasets.GeneratorBasedBuilder):
|
13 |
-
BUILDER_CONFIG_CLASS = CAFOSatConfig
|
14 |
-
BUILDER_CONFIGS = [
|
15 |
-
# Train/test/val configs
|
16 |
-
CAFOSatConfig(name="verified_train", split_column="cafosat_verified_training_train", description="Verified training split"),
|
17 |
-
CAFOSatConfig(name="verified_val", split_column="cafosat_verified_training_val", description="Verified validation split"),
|
18 |
-
CAFOSatConfig(name="verified_test", split_column="cafosat_verified_training_test", description="Verified test split"),
|
19 |
-
|
20 |
-
CAFOSatConfig(name="all_train", split_column="cafosat_all_training_train", description="All training split"),
|
21 |
-
CAFOSatConfig(name="all_val", split_column="cafosat_all_training_val", description="All validation split"),
|
22 |
-
CAFOSatConfig(name="all_test", split_column="cafosat_all_training_test", description="All test split"),
|
23 |
-
|
24 |
-
CAFOSatConfig(name="set1_train", split_column="cafosat_training_set1_train", description="Set1 training split"),
|
25 |
-
CAFOSatConfig(name="set1_val", split_column="cafosat_training_set1_val", description="Set1 validation split"),
|
26 |
-
CAFOSatConfig(name="set1_test", split_column="cafosat_training_set1_test", description="Set1 test split"),
|
27 |
-
|
28 |
-
CAFOSatConfig(name="set2_train", split_column="cafosat_training_set2_train", description="Set2 training split"),
|
29 |
-
CAFOSatConfig(name="set2_val", split_column="cafosat_training_set2_val", description="Set2 validation split"),
|
30 |
-
CAFOSatConfig(name="set2_test", split_column="cafosat_training_set2_test", description="Set2 test split"),
|
31 |
-
|
32 |
-
CAFOSatConfig(name="merged_train", split_column="cafosat_merged_training_train", description="Merged training split"),
|
33 |
-
CAFOSatConfig(name="merged_val", split_column="cafosat_merged_training_val", description="Merged validation split"),
|
34 |
-
CAFOSatConfig(name="merged_test", split_column="cafosat_merged_training_test", description="Merged test split"),
|
35 |
-
|
36 |
-
CAFOSatConfig(name="augmented_train", split_column="cafosat_augmented_training_train", description="Augmented training split"),
|
37 |
-
CAFOSatConfig(name="augmented_val", split_column="cafosat_augmented_training_val", description="Augmented validation split"),
|
38 |
-
CAFOSatConfig(name="augmented_test", split_column="cafosat_augmented_training_test", description="Augmented test split"),
|
39 |
-
|
40 |
-
|
41 |
-
# Data type filters
|
42 |
-
CAFOSatConfig(name="verified_only", data_filter="verified", description="Only verified patches"),
|
43 |
-
CAFOSatConfig(name="verified_cafo_only", data_filter="verified_cafo", description="Only verified cafo patches"),
|
44 |
-
CAFOSatConfig(name="augmented_only", data_filter="augmented", description="Only augmented patches"),
|
45 |
-
CAFOSatConfig(name="negatives_only", data_filter="neg", description="Only negative examples"),
|
46 |
-
CAFOSatConfig(name="cafo_only", data_filter="cafo", description="Only positive CAFOs (label > 0)"),
|
47 |
-
CAFOSatConfig(name="all", data_filter="all", description="All Patches"),
|
48 |
-
]
|
49 |
-
# DEFAULT_CONFIG_NAME = "all_train"
|
50 |
-
|
51 |
-
def _info(self):
|
52 |
-
return DatasetInfo(
|
53 |
-
description="CAFOSat: Remote sensing CAFO dataset with bounding boxes and infrastructure tags.",
|
54 |
-
features=Features({
|
55 |
-
"patch_file": Image(),
|
56 |
-
"label": ClassLabel(
|
57 |
-
names=["Negative", "Swine", "Dairy", "Beef", "Poultry", "Horses", "Sheep/Goats"]
|
58 |
-
),
|
59 |
-
"barn": Value("float32"),
|
60 |
-
"manure_pond": Value("float32"),
|
61 |
-
"grazing_area": Value("float32"),
|
62 |
-
"others": Value("float32"),
|
63 |
-
"geom_bbox": Sequence(Value("float32")),
|
64 |
-
"category": Value("string"),
|
65 |
-
"state": Value("string"),
|
66 |
-
"image_type": Value("string"),
|
67 |
-
"CAFO_UNIQUE_ID": Value("string"),
|
68 |
-
"verified_label": Value("string"),
|
69 |
-
"patch_res": Value("string"),
|
70 |
-
"refine_x": Value("string"),
|
71 |
-
"refine_y": Value("string")
|
72 |
-
}),
|
73 |
-
supervised_keys=None,
|
74 |
-
homepage="https://huggingface.co/datasets/oishee3003/CAFOSat",
|
75 |
-
license="cc-by-4.0"
|
76 |
-
)
|
77 |
-
|
78 |
-
def _split_generators(self, dl_manager):
|
79 |
-
csv_path = dl_manager.download_and_extract("cafosat.csv")
|
80 |
-
return [
|
81 |
-
SplitGenerator(name=Split.TRAIN, gen_kwargs={"csv_path": csv_path})
|
82 |
-
]
|
83 |
-
|
84 |
-
def _generate_examples(self, csv_path):
|
85 |
-
split_col = self.config.split_column
|
86 |
-
data_filter = self.config.data_filter
|
87 |
-
|
88 |
-
with open(csv_path, encoding="utf-8") as f:
|
89 |
-
reader = csv.DictReader(f)
|
90 |
-
for idx, row in enumerate(reader):
|
91 |
-
include = True
|
92 |
-
|
93 |
-
# Apply split filtering
|
94 |
-
if split_col:
|
95 |
-
if row.get(split_col, "0") != "1":
|
96 |
-
continue
|
97 |
-
|
98 |
-
# Apply type-based filtering
|
99 |
-
if data_filter == "augmented":
|
100 |
-
include = "augmented" in row.get("image_type", "").lower()
|
101 |
-
elif data_filter == "verified":
|
102 |
-
include = bool(row.get("verified_label", "").strip())
|
103 |
-
elif data_filter == "verified_cafo_only":
|
104 |
-
include = bool(row.get("verified_label", "CAFO").strip())
|
105 |
-
elif data_filter == "neg":
|
106 |
-
include = int(row.get("label", 0)) < 0
|
107 |
-
elif data_filter == "cafo":
|
108 |
-
include = int(row.get("label", 0)) > 0
|
109 |
-
elif data_filter == "all":
|
110 |
-
include = True
|
111 |
-
|
112 |
-
if not include:
|
113 |
-
continue
|
114 |
-
|
115 |
-
try:
|
116 |
-
bbox = ast.literal_eval(row.get("geom_bbox", "[5.0, 5.0, 700.0, 700.0]"))
|
117 |
-
except:
|
118 |
-
bbox = [5.0, 5.0, 700.0, 700.0]
|
119 |
-
|
120 |
-
yield idx, {
|
121 |
-
"patch_file": row["patch_file"],
|
122 |
-
"label": int(row["label"]),
|
123 |
-
"barn": float(row.get("barn", 0)),
|
124 |
-
"manure_pond": float(row.get("manure_pond", 0)),
|
125 |
-
"grazing_area": float(row.get("grazing_area", 0)),
|
126 |
-
"others": float(row.get("others", 0)),
|
127 |
-
"geom_bbox": bbox,
|
128 |
-
"category": row.get("category", ""),
|
129 |
-
"state": row.get("state", ""),
|
130 |
-
"image_type": row.get("image_type", ""),
|
131 |
-
"CAFO_UNIQUE_ID": row.get("CAFO_UNIQUE_ID", ""),
|
132 |
-
"verified_label": row.get("verified_label", ""),
|
133 |
-
"patch_res": row.get("patch_res", ""),
|
134 |
-
"refine_x": row.get("refine_x", ""),
|
135 |
-
"refine_y": row.get("refine_y", "")
|
136 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|