MapLocNetGradio / main.py
wangerniu
maplocnet
629144d
raw
history blame
4.66 kB
import gradio as gr
import cv2
import gradio as gr
import torch
from torchvision import transforms
import requests
from PIL import Image
from demo import Demo,read_input_image_test,show_result,vis_image_feature
from osm.tiling import TileManager
from osm.viz import Colormap, plot_nodes
from utils.viz_2d import plot_images
import numpy as np
from utils.viz_2d import features_to_RGB
from utils.viz_localization import (
likelihood_overlay,
plot_dense_rotations,
add_circle_inset,
)
from osm.viz import GeoPlotter
import matplotlib.pyplot as plt
import random
from geopy.distance import geodesic
experiment_or_path = "weight/last-step-checkpointing.ckpt"
# experiment_or_path="experiments/maplocanet_0906_diffhight/last-step-checkpointing.ckpt"
image_path = 'images/00000.jpg'
# prior_latlon = (37.75704325989902, -122.435941445631)
# tile_size_meters = 128
model = Demo(experiment_or_path=experiment_or_path, num_rotations=128, device='cpu')
def demo_localize(image,long,lat,tile_size_meters):
# inp = Image.fromarray(inp.astype('uint8'), 'RGB')
# inp = transforms.ToTensor()(inp).unsqueeze(0)
prior_latlon=(lat,long)
image, camera, gravity, proj, bbox, true_prior_latlon = read_input_image_test(
image,
prior_latlon=prior_latlon,
tile_size_meters=tile_size_meters, # try 64, 256, etc.
)
tiler = TileManager.from_bbox(projection=proj, bbox=bbox, ppm=1, tile_size=tile_size_meters)
# tiler = TileManager.from_bbox(projection=proj, bbox=bbox + 10,ppm=1,path=root/city/'{}.osm'.format(city), tile_size=1)
canvas = tiler.query(bbox)
uv, yaw, prob, neural_map, image_rectified, data_, pred = model.localize(
image, camera, canvas)
prior_latlon_pred = proj.unproject(canvas.to_xy(uv))
map_viz = Colormap.apply(canvas.raster)
map_vis_image_result = map_viz * 255
map_vis_image_result =show_result(map_vis_image_result.astype(np.uint8), uv, yaw)
# map_vis_image_result = show_result(map_vis_image_result.astype(np.uint8), True_uv,
# uv,
# 90.0 - yaw_T,
# yaw)
# return prior_latlon_pred
uab_feature_rgb = vis_image_feature(pred['features_image'][0].cpu().numpy())
map_viz = cv2.resize(map_viz, (prob.numpy().shape[0], prob.numpy().shape[1]))
overlay = likelihood_overlay(prob.numpy().max(-1), map_viz.mean(-1, keepdims=True))
(neural_map_rgb,) = features_to_RGB(neural_map.numpy())
fig=plot_images([image, map_vis_image_result / 255, overlay, uab_feature_rgb, neural_map_rgb],
titles=["UAV image", "map","likelihood","UAV feature","map feature"])
# plot_images([overlay, neural_map_rgb], titles=["prediction", "neural map"])
# ax = plt.gcf().axes[2]
# ax.scatter(*canvas.to_uv(bbox.center), s=5, c="red")
# plot_dense_rotations(ax, prob, w=0.005, s=1 / 25)
# add_circle_inset(ax, uv)
# Plot as interactive figure
bbox_latlon = proj.unproject(canvas.bbox)
plot2 = GeoPlotter(zoom=16.5)
plot2.raster(map_viz, bbox_latlon, opacity=0.5)
plot2.raster(likelihood_overlay(prob.numpy().max(-1)), proj.unproject(bbox))
plot2.points(prior_latlon[:2], "red", name="location prior", size=10)
plot2.points(proj.unproject(canvas.to_xy(uv)), "black", name="argmax", size=10)
plot2.bbox(bbox_latlon, "blue", name="map tile")
# plot2.fig.show()
return fig,plot2.fig,str(prior_latlon_pred)
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
#标题
title = "MapLocNet"
#标题下的描述,支持md格式
description = "UAV Vision-based Geo-Localization Using Vectorized Maps"
# outputs = gr.outputs.Label(num_top_classes=3)
outputs = gr.Plot()
interface = gr.Interface(fn=demo_localize,
inputs=["image",
gr.Number(label="Prior location-longitude)"),
gr.Number(label="Prior location-longitude)"),
gr.Radio([64, 128, 256], label="Search radius (meters)", info="vectorized map size"),
# gr.inputs.RadioGroup(label="Search radius (meters)",["English", "French", "Spanish"]),
# gr.Slider(64, 512,label='Search radius (meters)')
],
outputs=["plot","plot","text"],
title=title,
description=description,
examples=[['images/00000.jpg',-122.435941445631,37.75704325989902,128]])
interface.launch(share=True)