|
|
""" |
|
|
**Author:** Kuoyuan Li |
|
|
""" |
|
|
import itertools |
|
|
import random |
|
|
from itertools import starmap |
|
|
|
|
|
import matplotlib.pyplot as plt |
|
|
import cv2 |
|
|
import os |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import random |
|
|
import math |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
|
|
|
def show_images(image): |
|
|
plt.figure() |
|
|
plt.imshow(image,cmap='gray') |
|
|
|
|
|
|
|
|
def load_images(filename): |
|
|
try: |
|
|
img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) |
|
|
return img |
|
|
except IOError: |
|
|
print("File is not an image\n") |
|
|
exit() |
|
|
|
|
|
|
|
|
def show_lines(image,lines): |
|
|
|
|
|
for line in lines: |
|
|
rho,theta = line[0] |
|
|
a = np.cos(theta) |
|
|
b = np.sin(theta) |
|
|
x0 = a*rho |
|
|
y0 = b*rho |
|
|
pt1 = (int(x0 + 1000*(-b)),int(y0 + 1000*(a))) |
|
|
pt2 = (int(x0 - 1000*(-b)),int(y0 - 1000*(a))) |
|
|
|
|
|
cv2.line(image,pt1,pt2,(255,0,0),1) |
|
|
cv2.imwrite("/root/data2/joonsu0109/project/naive_vp/vanishing_lines.png", image) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_point(image, point, save_paths): |
|
|
|
|
|
cv2.circle(image,point,3,(255,0,0), thickness=3) |
|
|
cv2.imwrite(save_paths, image) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def detect_lines(image): |
|
|
""" |
|
|
Use Canny edge detection and Hough transform to get selected lines |
|
|
(which are useful for locating vanishing point) for all images |
|
|
|
|
|
Args: images: a list of original images |
|
|
|
|
|
Return: blur_images: Blurred images (for report) |
|
|
edge_images: Edge images (for report) |
|
|
valid_lines_all: Detected lines |
|
|
""" |
|
|
|
|
|
gau_kernel = cv2.getGaussianKernel(70,4) |
|
|
gau_kern2d = np.outer(gau_kernel, gau_kernel) |
|
|
gau_kern2d = gau_kern2d/gau_kern2d.sum() |
|
|
|
|
|
blur_image = cv2.filter2D(image,-1,gau_kern2d) |
|
|
|
|
|
edge_image = cv2.Canny(blur_image,40,70,apertureSize=3,L2gradient=True) |
|
|
|
|
|
lines=cv2.HoughLines(edge_image, 1, np.pi/120, 55) |
|
|
valid_lines = [] |
|
|
|
|
|
for line in lines: |
|
|
rho,theta = line[0] |
|
|
if (theta>0.4 and theta < 1.47) or (theta > 1.67 and theta < 2.74): |
|
|
valid_lines.append(line) |
|
|
|
|
|
return blur_image,edge_image,valid_lines |
|
|
|
|
|
|
|
|
def find_intersection_point(line1,line2): |
|
|
"""Implementation is based on code from https://stackoverflow.com/questions/46565975 |
|
|
Original author: StackOverflow contributor alkasm |
|
|
Find an intercept point of 2 lines model |
|
|
|
|
|
Args: line1,line2: 2 lines using rho and theta (polar coordinates) to represent |
|
|
|
|
|
Return: x0,y0: x and y for the intersection point |
|
|
""" |
|
|
|
|
|
rho1, theta1 = line1[0] |
|
|
rho2, theta2 = line2[0] |
|
|
|
|
|
A = np.array([ |
|
|
[np.cos(theta1), np.sin(theta1)], |
|
|
[np.cos(theta2), np.sin(theta2)] |
|
|
]) |
|
|
b = np.array([[rho1], [rho2]]) |
|
|
det_A = np.linalg.det(A) |
|
|
if det_A != 0: |
|
|
x0, y0 = np.linalg.solve(A, b) |
|
|
|
|
|
x0, y0 = int(np.round(x0)), int(np.round(y0)) |
|
|
return x0, y0 |
|
|
else: |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
def find_dist_to_line(point,line): |
|
|
"""Implementation is based on Computer Vision material, owned by the University of Melbourne |
|
|
Find an intercept point of the line model with a normal from point to it, to calculate the |
|
|
distance betwee point and intercept |
|
|
|
|
|
Args: point: the point using x and y to represent |
|
|
line: the line using rho and theta (polar coordinates) to represent |
|
|
|
|
|
Return: dist: the distance from the point to the line |
|
|
""" |
|
|
x0,y0 = point |
|
|
rho, theta = line[0] |
|
|
m = (-1*(np.cos(theta)))/np.sin(theta) |
|
|
c = rho/np.sin(theta) |
|
|
|
|
|
x = (x0 + m*y0 - m*c)/(1 + m**2) |
|
|
y = (m*x0 + (m**2)*y0 - (m**2)*c)/(1 + m**2) + c |
|
|
dist = math.sqrt((x - x0)**2 + (y - y0)**2) |
|
|
return dist |
|
|
|
|
|
|
|
|
|
|
|
def RANSAC(lines,ransac_iterations,ransac_threshold,ransac_ratio): |
|
|
"""Implementation is based on code from Computer Vision material, owned by the University of Melbourne |
|
|
Use RANSAC to identify the vanishing points for all images |
|
|
|
|
|
Args: lines_all: The lines for all images |
|
|
ransac_iterations,ransac_threshold,ransac_ratio: RANSAC hyperparameters |
|
|
|
|
|
Return: vanishing_points: Estimated vanishing points for all images |
|
|
""" |
|
|
|
|
|
inlier_count_ratio = 0. |
|
|
vanishing_point = (0,0) |
|
|
|
|
|
print("NRANSAC") |
|
|
for iteration in range(ransac_iterations): |
|
|
|
|
|
n = 2 |
|
|
selected_lines = random.sample(lines,n) |
|
|
line1 = selected_lines[0] |
|
|
line2 = selected_lines[1] |
|
|
intersection_point = find_intersection_point(line1,line2) |
|
|
if intersection_point is not None: |
|
|
|
|
|
inlier_count = 0 |
|
|
|
|
|
for line in lines: |
|
|
|
|
|
dist = find_dist_to_line(intersection_point,line) |
|
|
|
|
|
if dist < ransac_threshold: |
|
|
inlier_count += 1 |
|
|
|
|
|
|
|
|
|
|
|
if inlier_count/float(len(lines)) > inlier_count_ratio: |
|
|
inlier_count_ratio = inlier_count/float(len(lines)) |
|
|
vanishing_point = intersection_point |
|
|
|
|
|
|
|
|
if inlier_count > len(lines)*ransac_ratio: |
|
|
break |
|
|
return vanishing_point |
|
|
|
|
|
def find_vanishing_point(img, grid_size, intersections): |
|
|
|
|
|
print("img.shape: ",img.shape) |
|
|
image_height = img.shape[0] |
|
|
image_width = img.shape[1] |
|
|
|
|
|
|
|
|
grid_rows = (image_height // grid_size) + 1 |
|
|
grid_columns = (image_width // grid_size) + 1 |
|
|
|
|
|
|
|
|
max_intersections = 0 |
|
|
best_cell = (0.0, 0.0) |
|
|
|
|
|
for i, j in itertools.product(range(grid_columns),range(grid_rows)): |
|
|
|
|
|
cell_left = i * grid_size |
|
|
cell_right = (i + 1) * grid_size |
|
|
cell_bottom = j * grid_size |
|
|
cell_top = (j + 1) * grid_size |
|
|
|
|
|
center_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) |
|
|
|
|
|
cv2.rectangle(img, (cell_left, cell_bottom), (cell_right, cell_top), (0, 0, 255), 5) |
|
|
|
|
|
current_intersections = 0 |
|
|
for x, y in intersections: |
|
|
if cell_left < x < cell_right and cell_bottom < y < cell_top: |
|
|
current_intersections += 1 |
|
|
|
|
|
|
|
|
if current_intersections > max_intersections: |
|
|
max_intersections = current_intersections |
|
|
best_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) |
|
|
print("Best Cell:", best_cell) |
|
|
|
|
|
if best_cell[0] != None and best_cell[1] != None: |
|
|
rx1 = int(best_cell[0] - grid_size / 2) |
|
|
ry1 = int(best_cell[1] - grid_size / 2) |
|
|
rx2 = int(best_cell[0] + grid_size / 2) |
|
|
ry2 = int(best_cell[1] + grid_size / 2) |
|
|
cv2.rectangle(img, (rx1, ry1), (rx2, ry2), (0, 255, 0), 10) |
|
|
cv2.imwrite('/root/data2/joonsu0109/project/naive_vp/vanishing-point-detection/outputs/result.png', img) |
|
|
|
|
|
return best_cell |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def line_intersection(line1, line2): |
|
|
""" |
|
|
Computes the intersection point of two lines in polar coordinates (rho, theta). |
|
|
|
|
|
Args: |
|
|
line1 (np.ndarray): First line, represented as (rho, theta). |
|
|
line2 (np.ndarray): Second line, represented as (rho, theta). |
|
|
|
|
|
Returns: |
|
|
tuple or None: Intersection point (x, y), or None if lines are parallel. |
|
|
""" |
|
|
|
|
|
rho1, theta1 = line1[0] |
|
|
rho2, theta2 = line2[0] |
|
|
|
|
|
|
|
|
a1, b1 = np.cos(theta1), np.sin(theta1) |
|
|
c1 = rho1 |
|
|
a2, b2 = np.cos(theta2), np.sin(theta2) |
|
|
c2 = rho2 |
|
|
|
|
|
|
|
|
A = np.array([[a1, b1], [a2, b2]]) |
|
|
C = np.array([c1, c2]) |
|
|
|
|
|
|
|
|
det = np.linalg.det(A) |
|
|
if abs(det) < 1e-6: |
|
|
return None |
|
|
|
|
|
|
|
|
x, y = np.linalg.solve(A, C) |
|
|
return x, y |
|
|
|
|
|
|
|
|
def find_intersections(lines): |
|
|
""" |
|
|
Finds intersections between pairs of lines. |
|
|
|
|
|
Args: |
|
|
lines (np.ndarray): Array of lines in the format (n, 1, 2), |
|
|
where each line is represented as (rho, theta). |
|
|
|
|
|
Returns: |
|
|
list: List of intersection points [(x, y), ...]. |
|
|
""" |
|
|
intersections = [] |
|
|
for i, line_1 in enumerate(lines): |
|
|
for line_2 in lines[i + 1:]: |
|
|
intersection = line_intersection(line_1, line_2) |
|
|
if intersection is not None: |
|
|
intersections.append(intersection) |
|
|
return intersections |
|
|
|
|
|
def sample_lines(lines, size): |
|
|
if size > len(lines): |
|
|
size = len(lines) |
|
|
return random.sample(lines, size) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
"""For the image, Use Canny+Hough to detect edges, use RANSAC to identify the vanishing points |
|
|
""" |
|
|
input_folder = "/root/data2/joonsu0109/dataset/SemanticKITTI/dataset/sequences/08/image_2" |
|
|
output_folder = "/root/data2/joonsu0109/project/naive_vp/outputs_ransac" |
|
|
os.makedirs(output_folder, exist_ok=True) |
|
|
file_list = os.listdir(input_folder) |
|
|
detected_list = os.listdir(output_folder) |
|
|
for filename in tqdm(file_list): |
|
|
|
|
|
if filename not in detected_list: |
|
|
try: |
|
|
print("Processing image: ",filename) |
|
|
file_path = os.path.join(input_folder, filename) |
|
|
save_paths = os.path.join(output_folder, filename) |
|
|
image = cv2.imread(file_path) |
|
|
|
|
|
blur_image, edge_image, lines = detect_lines(image) |
|
|
print("Number of lines detected: ",len(lines)) |
|
|
|
|
|
|
|
|
|
|
|
ransac_iterations,ransac_threshold,ransac_ratio = 50,10,0.93 |
|
|
vanishing_point = RANSAC(lines, ransac_iterations, ransac_threshold, ransac_ratio) |
|
|
|
|
|
show_point(image, vanishing_point, save_paths) |
|
|
except: |
|
|
print("Error processing image: ",filename) |
|
|
continue |