wangerniu commited on
Commit
b7f7c83
·
1 Parent(s): 629144d
Files changed (1) hide show
  1. app.py +95 -4
app.py CHANGED
@@ -1,7 +1,98 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import cv2
3
+ import gradio as gr
4
+ import torch
5
+ from torchvision import transforms
6
+ import requests
7
+ from PIL import Image
8
+ from demo import Demo,read_input_image_test,show_result,vis_image_feature
9
+ from osm.tiling import TileManager
10
+ from osm.viz import Colormap, plot_nodes
11
+ from utils.viz_2d import plot_images
12
+ import numpy as np
13
+ from utils.viz_2d import features_to_RGB
14
+ from utils.viz_localization import (
15
+ likelihood_overlay,
16
+ plot_dense_rotations,
17
+ add_circle_inset,
18
+ )
19
+ from osm.viz import GeoPlotter
20
+ import matplotlib.pyplot as plt
21
+ import random
22
+ from geopy.distance import geodesic
23
+
24
+ experiment_or_path = "weight/last-step-checkpointing.ckpt"
25
+ # experiment_or_path="experiments/maplocanet_0906_diffhight/last-step-checkpointing.ckpt"
26
+ image_path = 'images/00000.jpg'
27
+
28
+ # prior_latlon = (37.75704325989902, -122.435941445631)
29
+ # tile_size_meters = 128
30
+ model = Demo(experiment_or_path=experiment_or_path, num_rotations=128, device='cpu')
31
+
32
+ def demo_localize(image,long,lat,tile_size_meters):
33
+ # inp = Image.fromarray(inp.astype('uint8'), 'RGB')
34
+ # inp = transforms.ToTensor()(inp).unsqueeze(0)
35
+ prior_latlon=(lat,long)
36
+ image, camera, gravity, proj, bbox, true_prior_latlon = read_input_image_test(
37
+ image,
38
+ prior_latlon=prior_latlon,
39
+ tile_size_meters=tile_size_meters, # try 64, 256, etc.
40
+ )
41
+ tiler = TileManager.from_bbox(projection=proj, bbox=bbox, ppm=1, tile_size=tile_size_meters)
42
+ # tiler = TileManager.from_bbox(projection=proj, bbox=bbox + 10,ppm=1,path=root/city/'{}.osm'.format(city), tile_size=1)
43
+ canvas = tiler.query(bbox)
44
+ uv, yaw, prob, neural_map, image_rectified, data_, pred = model.localize(
45
+ image, camera, canvas)
46
+ prior_latlon_pred = proj.unproject(canvas.to_xy(uv))
47
+
48
+ map_viz = Colormap.apply(canvas.raster)
49
+ map_vis_image_result = map_viz * 255
50
+ map_vis_image_result =show_result(map_vis_image_result.astype(np.uint8), uv, yaw)
51
+ # map_vis_image_result = show_result(map_vis_image_result.astype(np.uint8), True_uv,
52
+ # uv,
53
+ # 90.0 - yaw_T,
54
+ # yaw)
55
+ # return prior_latlon_pred
56
+ uab_feature_rgb = vis_image_feature(pred['features_image'][0].cpu().numpy())
57
+ map_viz = cv2.resize(map_viz, (prob.numpy().shape[0], prob.numpy().shape[1]))
58
+ overlay = likelihood_overlay(prob.numpy().max(-1), map_viz.mean(-1, keepdims=True))
59
+ (neural_map_rgb,) = features_to_RGB(neural_map.numpy())
60
+ fig=plot_images([image, map_vis_image_result / 255, overlay, uab_feature_rgb, neural_map_rgb],
61
+ titles=["UAV image", "map","likelihood","UAV feature","map feature"])
62
+ # plot_images([overlay, neural_map_rgb], titles=["prediction", "neural map"])
63
+ # ax = plt.gcf().axes[2]
64
+ # ax.scatter(*canvas.to_uv(bbox.center), s=5, c="red")
65
+ # plot_dense_rotations(ax, prob, w=0.005, s=1 / 25)
66
+ # add_circle_inset(ax, uv)
67
 
68
+ # Plot as interactive figure
69
+ bbox_latlon = proj.unproject(canvas.bbox)
70
+ plot2 = GeoPlotter(zoom=16.5)
71
+ plot2.raster(map_viz, bbox_latlon, opacity=0.5)
72
+ plot2.raster(likelihood_overlay(prob.numpy().max(-1)), proj.unproject(bbox))
73
+ plot2.points(prior_latlon[:2], "red", name="location prior", size=10)
74
+ plot2.points(proj.unproject(canvas.to_xy(uv)), "black", name="argmax", size=10)
75
+ plot2.bbox(bbox_latlon, "blue", name="map tile")
76
+ # plot2.fig.show()
77
+ return fig,plot2.fig,str(prior_latlon_pred)
78
+ # model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
79
+ #标题
80
+ title = "MapLocNet"
81
+ #标题下的描述,支持md格式
82
+ description = "UAV Vision-based Geo-Localization Using Vectorized Maps"
83
 
84
+ # outputs = gr.outputs.Label(num_top_classes=3)
85
+ outputs = gr.Plot()
86
+ interface = gr.Interface(fn=demo_localize,
87
+ inputs=["image",
88
+ gr.Number(label="Prior location-longitude)"),
89
+ gr.Number(label="Prior location-longitude)"),
90
+ gr.Radio([64, 128, 256], label="Search radius (meters)", info="vectorized map size"),
91
+ # gr.inputs.RadioGroup(label="Search radius (meters)",["English", "French", "Spanish"]),
92
+ # gr.Slider(64, 512,label='Search radius (meters)')
93
+ ],
94
+ outputs=["plot","plot","text"],
95
+ title=title,
96
+ description=description,
97
+ examples=[['images/00000.jpg',-122.435941445631,37.75704325989902,128]])
98
+ interface.launch()