Meehai commited on
Commit
6d72a48
·
1 Parent(s): 4266ad9

added m2f metris analysis and moved data analysis nb in a dir

Browse files
data/test_set_annotated_only/.task_statistics.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36d9eaa9eb0c01c32f03fea8fb2046a9ce51b39d04c8ca202a3a13c9fa7835a0
3
+ size 5620
scripts/dronescapes_viewer.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
scripts/m2f_metrics_analysis/m2f_main.ipynb ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from mask2former import Mask2Former\n",
10
+ "import torch as tr\n",
11
+ "import os\n",
12
+ "from datetime import datetime\n",
13
+ "import numpy as np\n",
14
+ "from PIL import Image\n",
15
+ "from vre.utils import (FFmpegVideo, collage_fn, semantic_mapper, FakeVideo,\n",
16
+ " colorize_semantic_segmentation, image_resize, image_write)\n",
17
+ "from pathlib import Path\n",
18
+ "import pandas as pd\n",
19
+ "from torchmetrics.functional.classification import multiclass_stat_scores\n",
20
+ "\n",
21
+ "%load_ext autoreload\n",
22
+ "%autoreload 2\n",
23
+ "%matplotlib inline"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "mapi_mapping = {\n",
33
+ " \"land\": [\"Terrain\", \"Sand\", \"Snow\"],\n",
34
+ " \"forest\": [\"Vegetation\"],\n",
35
+ " \"residential\": [\"Building\", \"Utility Pole\", \"Pole\", \"Fence\", \"Wall\", \"Manhole\", \"Street Light\", \"Curb\",\n",
36
+ " \"Guard Rail\", \"Caravan\", \"Junction Box\", \"Traffic Sign (Front)\", \"Billboard\", \"Banner\",\n",
37
+ " \"Mailbox\", \"Traffic Sign (Back)\", \"Bench\", \"Fire Hydrant\", \"Trash Can\", \"CCTV Camera\",\n",
38
+ " \"Traffic Light\", \"Barrier\", \"Rail Track\", \"Phone Booth\", \"Curb Cut\", \"Traffic Sign Frame\",\n",
39
+ " \"Bike Rack\"],\n",
40
+ " \"road\": [\"Road\", \"Lane Marking - General\", \"Sidewalk\", \"Bridge\", \"Other Vehicle\", \"Motorcyclist\", \"Pothole\",\n",
41
+ " \"Catch Basin\", \"Car Mount\", \"Tunnel\", \"Parking\", \"Service Lane\", \"Lane Marking - Crosswalk\",\n",
42
+ " \"Pedestrian Area\", \"On Rails\", \"Bike Lane\", \"Crosswalk - Plain\"],\n",
43
+ " \"little-objects\": [\"Car\", \"Person\", \"Truck\", \"Boat\", \"Wheeled Slow\", \"Trailer\", \"Ground Animal\", \"Bicycle\",\n",
44
+ " \"Motorcycle\", \"Bird\", \"Bus\", \"Ego Vehicle\", \"Bicyclist\", \"Other Rider\"],\n",
45
+ " \"water\": [\"Water\"],\n",
46
+ " \"sky\": [\"Sky\"],\n",
47
+ " \"hill\": [\"Mountain\"]\n",
48
+ "}\n",
49
+ "\n",
50
+ "coco_mapping = {\n",
51
+ " \"land\": [\"grass-merged\", \"dirt-merged\", \"sand\", \"gravel\", \"flower\", \"playingfield\", \"snow\", \"platform\"],\n",
52
+ " \"forest\": [\"tree-merged\"],\n",
53
+ " \"residential\": [\"building-other-merged\", \"house\", \"roof\", \"fence-merged\", \"wall-other-merged\", \"wall-brick\",\n",
54
+ " \"rock-merged\", \"tent\", \"bridge\", \"bench\", \"window-other\", \"fire hydrant\", \"traffic light\",\n",
55
+ " \"umbrella\", \"wall-stone\", \"clock\", \"chair\", \"sports ball\", \"floor-other-merged\",\n",
56
+ " \"floor-wood\", \"stop sign\", \"door-stuff\", \"banner\", \"light\", \"net\", \"surfboard\", \"frisbee\",\n",
57
+ " \"rug-merged\", \"potted plant\", \"parking meter\", \"tennis racket\", \"sink\", \"hair drier\",\n",
58
+ " \"food-other-merged\", \"curtain\", \"mirror-stuff\", \"baseball glove\", \"baseball bat\", \"zebra\",\n",
59
+ " \"spoon\", \"towel\", \"donut\", \"apple\", \"handbag\", \"couch\", \"orange\", \"wall-wood\",\n",
60
+ " \"window-blind\", \"pizza\", \"cabinet-merged\", \"skateboard\", \"remote\", \"bottle\", \"bed\",\n",
61
+ " \"table-merged\", \"backpack\", \"bear\", \"wall-tile\", \"cup\", \"scissors\", \"ceiling-merged\",\n",
62
+ " \"oven\", \"cell phone\", \"microwave\", \"toaster\", \"carrot\", \"fork\", \"giraffe\", \"paper-merged\",\n",
63
+ " \"cat\", \"book\", \"sandwich\", \"wine glass\", \"pillow\", \"blanket\", \"tie\", \"bowl\", \"snowboard\",\n",
64
+ " \"vase\", \"toothbrush\", \"toilet\", \"dining table\", \"laptop\", \"tv\", \"cardboard\", \"keyboard\",\n",
65
+ " \"hot dog\", \"cake\", \"knife\", \"suitcase\", \"refrigerator\", \"fruit\", \"shelf\", \"counter\", \"skis\",\n",
66
+ " \"banana\", \"teddy bear\", \"broccoli\", \"mouse\"],\n",
67
+ " \"road\": [\"road\", \"railroad\", \"pavement-merged\", \"stairs\"],\n",
68
+ " \"little-objects\": [\"truck\", \"car\", \"boat\", \"horse\", \"person\", \"train\", \"elephant\", \"bus\", \"bird\", \"sheep\",\n",
69
+ " \"cow\", \"motorcycle\", \"dog\", \"bicycle\", \"airplane\", \"kite\"],\n",
70
+ " \"water\": [\"river\", \"water-other\", \"sea\"],\n",
71
+ " \"sky\": [\"sky-other-merged\"],\n",
72
+ " \"hill\": [\"mountain-merged\"]\n",
73
+ "}\n",
74
+ "\n",
75
+ "color_map = [[0, 255, 0], [0, 127, 0], [255, 255, 0], [255, 255, 255],\n",
76
+ " [255, 0, 0], [0, 0, 255], [0, 255, 255], [127, 127, 63]]\n",
77
+ "\n",
78
+ "def eval(y: np.ndarray, gt: np.ndarray) -> float:\n",
79
+ " tp, fp, _, fn = multiclass_stat_scores(tr.from_numpy(y), tr.from_numpy(gt), num_classes=8, average=None)[:, 0:4].T\n",
80
+ " iou = (tp / (tp + fp + fn)).nan_to_num(0, 0, 0)\n",
81
+ " weights = tr.FloatTensor([0.28172092, 0.30589653, 0.13341699, 0.05937348,\n",
82
+ " 0.00474491, 0.05987466, 0.08660721, 0.06836531])\n",
83
+ " iou_avg = (iou * weights).sum().item()\n",
84
+ " return iou_avg\n",
85
+ "\n",
86
+ "def collage_fn2(images: list[np.ndarray], size: tuple[int, int], **kwargs):\n",
87
+ " images_rsz = [image_resize(image, *size) for image in images]\n",
88
+ " return collage_fn(images_rsz, **kwargs)"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": null,
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "video = FFmpegVideo((\"/export/home/proiecte/aux/mihai_cristian.pirvu/datasets/dronescapes/raw_data/videos\"\n",
98
+ " \"/norway_210821_DJI_0015_full/DJI_0015.MP4\"))\n",
99
+ "gt_dir = (\"/export/home/proiecte/aux/mihai_cristian.pirvu/datasets/dronescapes/data/\"\n",
100
+ " \"test_set_annotated_only/semantic_segprop8/norway_210821_DJI_0015_full_\")\n",
101
+ "print(video)"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "code",
106
+ "execution_count": null,
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "model_id = \"49189528_0\" # \"49189528_1\" (r50/mapillary), \"47429163_0\" (swin/coco), \"49189528_0\" (swin/mapillary)\n",
111
+ "os.environ[\"VRE_DEVICE\"] = device = \"cuda\" #\"cpu\"\n",
112
+ "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\n",
113
+ "\n",
114
+ "m2f_1 = Mask2Former(model_id, semantic_argmax_only=False, name=\"m2f\", dependencies=[])\n",
115
+ "m2f_2 = Mask2Former(\"47429163_0\", semantic_argmax_only=False, name=\"m2f\", dependencies=[])\n",
116
+ "m2f_3 = Mask2Former(\"49189528_1\", semantic_argmax_only=False, name=\"m2f\", dependencies=[])\n",
117
+ "\n",
118
+ "m2f_1.device = \"cuda\" if tr.cuda.is_available() else \"cpu\"\n",
119
+ "m2f_2.device = \"cuda\" if tr.cuda.is_available() else \"cpu\"\n",
120
+ "m2f_3.device = \"cuda\" if tr.cuda.is_available() else \"cpu\"\n",
121
+ "\n",
122
+ "metrics = {}"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "metadata": {},
129
+ "outputs": [],
130
+ "source": [
131
+ "frame_ix = 900\n",
132
+ "def load_gt(ix: int) -> np.ndarray:\n",
133
+ " gt_path = f\"{gt_dir}{ix}.npz\"\n",
134
+ " assert Path(gt_path).exists(), gt_path\n",
135
+ " gt_data = np.load(gt_path)[\"arr_0\"]\n",
136
+ " return gt_data\n"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": null,
142
+ "metadata": {},
143
+ "outputs": [],
144
+ "source": [
145
+ "def m2f_do_one(m2f: Mask2Former, frame: np.ndarray, gt_data_shape, mapping: dict) -> tuple[np.ndarray, np.ndarray]:\n",
146
+ " m2f_1.vre_free() if m2f_1.setup_called and id(m2f) != id(m2f_1) else None\n",
147
+ " m2f_2.vre_free() if m2f_2.setup_called and id(m2f) != id(m2f_1) else None\n",
148
+ " m2f_3.vre_free() if m2f_3.setup_called and id(m2f) != id(m2f_1) else None\n",
149
+ " m2f.vre_setup() if not m2f.setup_called else None\n",
150
+ "\n",
151
+ " now = datetime.now()\n",
152
+ " m2f.data = None\n",
153
+ " m2f.compute(FakeVideo(frame[None], fps=1), [0])\n",
154
+ " print(f\"Pred took: {datetime.now() - now}\"); now = datetime.now()\n",
155
+ " m2f_mapped = semantic_mapper(m2f.data.output.argmax(-1)[0], mapping, m2f.classes)\n",
156
+ " m2f_mapped = image_resize(m2f_mapped, *gt_data_shape, interpolation=\"nearest\")\n",
157
+ " print(f\"semantic_mapper took: {datetime.now() - now}\"); now = datetime.now()\n",
158
+ " m2f_colorized = colorize_semantic_segmentation(m2f_mapped[None], list(mapping), color_map, rgb=rgb_rsz[None])[0]\n",
159
+ " print(f\"colorize took: {datetime.now() - now}\"); now = datetime.now()\n",
160
+ " return m2f_mapped, m2f_colorized\n",
161
+ "\n",
162
+ "def eval_and_store(frame, frame_ix, res_all: list[tuple[np.ndarray], np.ndarray], gt_color: np.ndarray,\n",
163
+ " columns: list[str]):\n",
164
+ " collage_data = []\n",
165
+ " for item in res_all:\n",
166
+ " collage_data.extend([frame, item[1], gt_color])\n",
167
+ " clg = collage_fn2(collage_data, size=gt_color.shape[0:2], rows_cols=(-1, 3))\n",
168
+ " image_write(clg, f\"collage_{frame_ix}.png\")\n",
169
+ " display(Image.fromarray(clg))\n",
170
+ " evals = [eval(item[0], gt_data) for item in res_all]\n",
171
+ "\n",
172
+ " try:\n",
173
+ " metrics = pd.read_csv(\"metrics.csv\", index_col=0)\n",
174
+ " except Exception as e:\n",
175
+ " metrics = pd.DataFrame(None, columns=columns)\n",
176
+ "\n",
177
+ " metrics.loc[frame_ix] = evals\n",
178
+ " display(metrics.sort_index())\n",
179
+ " metrics.to_csv(\"metrics.csv\")\n",
180
+ "\n",
181
+ "for frame_ix in [60, 120, 300, 600, 900, 1200, 1500]:\n",
182
+ " frame, gt_data = video[frame_ix], load_gt(frame_ix)\n",
183
+ " rgb_rsz = image_resize(frame, *gt_data.shape)\n",
184
+ " gt_color = colorize_semantic_segmentation(gt_data[None], classes=list(mapi_mapping), color_map=color_map,\n",
185
+ " rgb=rgb_rsz[None])[0]\n",
186
+ " mapped1, colorized1 = m2f_do_one(m2f_1, frame, gt_data.shape, mapi_mapping)\n",
187
+ " mapped2, colorized2 = m2f_do_one(m2f_2, frame, gt_data.shape, coco_mapping)\n",
188
+ " mapped3, colorized3 = m2f_do_one(m2f_3, frame, gt_data.shape, mapi_mapping)\n",
189
+ "\n",
190
+ " mapped1_rsz, colorized1_rsz = m2f_do_one(m2f_1, rgb_rsz, gt_data.shape, mapi_mapping)\n",
191
+ " mapped2_rsz, colorized2_rsz = m2f_do_one(m2f_2, rgb_rsz, gt_data.shape, coco_mapping)\n",
192
+ " mapped3_rsz, colorized3_rsz = m2f_do_one(m2f_3, rgb_rsz, gt_data.shape, mapi_mapping)\n",
193
+ "\n",
194
+ " all_res = [\n",
195
+ " (mapped1, colorized1), (mapped2, colorized2), (mapped3, colorized3),\n",
196
+ " (mapped1_rsz, colorized1_rsz), (mapped2_rsz, colorized2_rsz), (mapped3_rsz, colorized3_rsz),\n",
197
+ " ]\n",
198
+ " columns = [\"swin_mapillary\", \"swin_coco\", \"r50_mapillary\",\n",
199
+ " \"swin_mapillary_rsz\", \"swin_coco_rsz\", \"r50_mapillary_rsz\"]\n",
200
+ "\n",
201
+ " eval_and_store(frame, frame_ix, all_res, gt_color, columns)\n"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "code",
206
+ "execution_count": null,
207
+ "metadata": {},
208
+ "outputs": [],
209
+ "source": []
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": null,
214
+ "metadata": {},
215
+ "outputs": [],
216
+ "source": []
217
+ }
218
+ ],
219
+ "metadata": {
220
+ "kernelspec": {
221
+ "display_name": "ngc",
222
+ "language": "python",
223
+ "name": "python3"
224
+ },
225
+ "language_info": {
226
+ "codemirror_mode": {
227
+ "name": "ipython",
228
+ "version": 3
229
+ },
230
+ "file_extension": ".py",
231
+ "mimetype": "text/x-python",
232
+ "name": "python",
233
+ "nbconvert_exporter": "python",
234
+ "pygments_lexer": "ipython3",
235
+ "version": "3.10.6"
236
+ }
237
+ },
238
+ "nbformat": 4,
239
+ "nbformat_minor": 2
240
+ }
scripts/m2f_metrics_analysis/metrics.csv ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ,swin_mapillary,swin_coco,r50_mapillary,swin_mapillary_rsz,swin_coco_rsz,r50_mapillary_rsz
2
+ 900,0.1421419978141784,0.2064806669950485,0.3687707483768463,0.2772172391414642,0.206145167350769,0.4239617884159088
3
+ 60,0.2167781591415405,0.2062840312719345,0.4351741373538971,0.3048166632652282,0.1999642997980117,0.4761459827423095
4
+ 120,0.2345813512802124,0.2026010006666183,0.4289666712284088,0.3349316716194153,0.199358657002449,0.45726078748703
5
+ 300,0.1704004108905792,0.2161678075790405,0.4017727971076965,0.2839266061782837,0.2174815833568573,0.4474448561668396
6
+ 600,0.1249909698963165,0.2117721140384674,0.3536447584629059,0.2163735926151275,0.2082659006118774,0.4042723476886749
7
+ 1200,0.2162990868091583,0.2044363170862198,0.3678789138793945,0.2966309785842895,0.2034457772970199,0.3900764584541321
8
+ 1500,0.2148686647415161,0.22754918038845062,0.35765504837036133,0.27770155668258667,0.22701863944530487,0.36417320370674133
scripts/vre_data_analysis.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
scripts/vre_data_analysis/vre_data_analysis.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
scripts/{vre_data_analysis.py → vre_data_analysis/vre_data_analysis.py} RENAMED
@@ -10,6 +10,7 @@ import io
10
  import base64
11
  import bs4
12
  from PIL import Image
 
13
 
14
  def extract_pil_from_b64_image(base64_buf: str) -> Image:
15
  return Image.open(io.BytesIO(base64.b64decode(base64_buf)))
@@ -43,6 +44,7 @@ def save_html(html_imgs: list[str], description: str, out_path: str):
43
  open(out_path, "w").write(str(html))
44
  print(f"Written html at '{out_path}'")
45
 
 
46
  def histogram_from_classification_task(reader: MultiTaskDataset, classif: SemanticRepresentation,
47
  n: int | None = None, mode: str = "sequential", **figkwargs) -> plt.Figure:
48
  fig = plt.Figure(**figkwargs)
@@ -59,10 +61,17 @@ def histogram_from_classification_task(reader: MultiTaskDataset, classif: Semant
59
  df = pd.DataFrame({"Labels": classif.classes, "Values": counts})
60
  df["Values"] = df["Values"] / df["Values"].sum()
61
  df = df.sort_values("Values", ascending=True)
62
- df = df[df["Values"] > 0.01]
63
- df.plot(x="Labels", y="Values", kind="barh", legend=False, color="skyblue", ax=fig.gca(), title=classif.name)
 
 
 
 
 
 
 
 
64
  fig.gca().set_xlim(0, 1)
65
- # fig.gca().set_ylabel("Values")
66
  fig.tight_layout()
67
  plt.close()
68
  return fig
 
10
  import base64
11
  import bs4
12
  from PIL import Image
13
+ import seaborn as sns
14
 
15
  def extract_pil_from_b64_image(base64_buf: str) -> Image:
16
  return Image.open(io.BytesIO(base64.b64decode(base64_buf)))
 
44
  open(out_path, "w").write(str(html))
45
  print(f"Written html at '{out_path}'")
46
 
47
+
48
  def histogram_from_classification_task(reader: MultiTaskDataset, classif: SemanticRepresentation,
49
  n: int | None = None, mode: str = "sequential", **figkwargs) -> plt.Figure:
50
  fig = plt.Figure(**figkwargs)
 
61
  df = pd.DataFrame({"Labels": classif.classes, "Values": counts})
62
  df["Values"] = df["Values"] / df["Values"].sum()
63
  df = df.sort_values("Values", ascending=True)
64
+ df = df[df["Values"] > 0.005]
65
+
66
+ ax = fig.gca()
67
+ sns.barplot(data=df, y="Labels", x="Values", palette="viridis", legend=True, ax=ax, width=1)
68
+
69
+ # Adjust y-axis tick positions and spacing
70
+ ax.set_title(classif.name, fontsize=14, fontweight='bold')
71
+ ax.set_ylabel("Labels", fontsize=12)
72
+
73
+ fig.set_size_inches(8, 2 if len(df) <= 2 else len(df) * 0.5)
74
  fig.gca().set_xlim(0, 1)
 
75
  fig.tight_layout()
76
  plt.close()
77
  return fig