diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..80b0781a888b9a1e86a91fd0abb77f6b44b4ceaf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/DCN.so filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o filter=lfs diff=lfs merge=lfs -text +vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o filter=lfs diff=lfs merge=lfs -text diff --git a/vanishing_point_extraction/neurvps/TMM17/checkpoint_latest.pth.tar b/vanishing_point_extraction/neurvps/TMM17/checkpoint_latest.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..06dc2f7fe25993bb061965d8c1d65a4852b58325 --- /dev/null +++ b/vanishing_point_extraction/neurvps/TMM17/checkpoint_latest.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:951f12bfe2a3afdef5b95d6a1cb9bbe51e73913c70212c8e628b696bd39a74e7 +size 358844104 diff --git a/vanishing_point_extraction/neurvps/TMM17/config.yaml b/vanishing_point_extraction/neurvps/TMM17/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb07155b2eff57702cef67cec53ececff4122060 --- /dev/null +++ b/vanishing_point_extraction/neurvps/TMM17/config.yaml @@ -0,0 +1,39 @@ +io: + augmentation_level: 2 + datadir: data/tmm17/ + dataset: TMM17 + focal_length: 1.0 + logdir: logs/ + num_vpts: 1 + num_workers: 4 + resume_from: logs/200107-013044-14545f5-tmm17-bugfix-lr1e-4-long + tensorboard_port: 0 + validation_debug: 0 + validation_interval: 8000 +model: + backbone: stacked_hourglass + batch_size: 8 + conic_6x: false + depth: 4 + fc_channel: 1024 + im2col_step: 11 + multires: + - 0.0051941870036646 + - 0.02004838034795 + - 0.0774278195486317 + - 0.299564810864565 + num_blocks: 1 + num_stacks: 1 + output_stride: 4 + smp_multiplier: 2 + smp_neg: 1 + smp_pos: 1 + smp_rnd: 3 + upsample_scale: 1 +optim: + amsgrad: true + lr: 0.0001 + lr_decay_epoch: 60 + max_epoch: 100 + name: Adam + weight_decay: 0.0006 diff --git a/vanishing_point_extraction/neurvps/neurvps/__init__.py b/vanishing_point_extraction/neurvps/neurvps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abeceda76588a7081ffcb7f3658f00287eb4b260 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/__init__.py @@ -0,0 +1,4 @@ +import neurvps.models +import neurvps.trainer +import neurvps.datasets +import neurvps.config diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0a820426cca8f81e1acb4eaeb1e0415f551aca7 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/box.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/box.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0fb7520458a2fadca19a4c3d932f5082bb2f98 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/box.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/config.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31b61227e0ebcb94fc78770b8854a3f0130ef46c Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/config.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aacf11f9d849f650648b624171eeb6488503689 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b778b119b62d38dbf6eb76af24c31a3b66aa5f Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/__pycache__/utils.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62441d88a8458a8df8f40f35eb402b1a2a8af41d Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/__pycache__/utils.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/box.py b/vanishing_point_extraction/neurvps/neurvps/box.py new file mode 100644 index 0000000000000000000000000000000000000000..cb44cf81c25de08cc90dba273ab2ebcf41f3961b --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/box.py @@ -0,0 +1,1110 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +# +# Copyright (c) 2017-2019 - Chris Griffith - MIT License +""" +Improved dictionary access through dot notation with additional tools. +""" +import string +import sys +import json +import re +import copy +from keyword import kwlist +import warnings + +try: + from collections.abc import Iterable, Mapping, Callable +except ImportError: + from collections import Iterable, Mapping, Callable + +yaml_support = True + +try: + import yaml +except ImportError: + try: + import ruamel.yaml as yaml + except ImportError: + yaml = None + yaml_support = False + +if sys.version_info >= (3, 0): + basestring = str +else: + from io import open + +__all__ = ['Box', 'ConfigBox', 'BoxList', 'SBox', + 'BoxError', 'BoxKeyError'] +__author__ = 'Chris Griffith' +__version__ = '3.2.4' + +BOX_PARAMETERS = ('default_box', 'default_box_attr', 'conversion_box', + 'frozen_box', 'camel_killer_box', 'box_it_up', + 'box_safe_prefix', 'box_duplicates', 'ordered_box') + +_first_cap_re = re.compile('(.)([A-Z][a-z]+)') +_all_cap_re = re.compile('([a-z0-9])([A-Z])') + + +class BoxError(Exception): + """Non standard dictionary exceptions""" + + +class BoxKeyError(BoxError, KeyError, AttributeError): + """Key does not exist""" + + +# Abstract converter functions for use in any Box class + + +def _to_json(obj, filename=None, + encoding="utf-8", errors="strict", **json_kwargs): + json_dump = json.dumps(obj, + ensure_ascii=False, **json_kwargs) + if filename: + with open(filename, 'w', encoding=encoding, errors=errors) as f: + f.write(json_dump if sys.version_info >= (3, 0) else + json_dump.decode("utf-8")) + else: + return json_dump + + +def _from_json(json_string=None, filename=None, + encoding="utf-8", errors="strict", multiline=False, **kwargs): + if filename: + with open(filename, 'r', encoding=encoding, errors=errors) as f: + if multiline: + data = [json.loads(line.strip(), **kwargs) for line in f + if line.strip() and not line.strip().startswith("#")] + else: + data = json.load(f, **kwargs) + elif json_string: + data = json.loads(json_string, **kwargs) + else: + raise BoxError('from_json requires a string or filename') + return data + + +def _to_yaml(obj, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + if filename: + with open(filename, 'w', + encoding=encoding, errors=errors) as f: + yaml.dump(obj, stream=f, + default_flow_style=default_flow_style, + **yaml_kwargs) + else: + return yaml.dump(obj, + default_flow_style=default_flow_style, + **yaml_kwargs) + + +def _from_yaml(yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + **kwargs): + if filename: + with open(filename, 'r', + encoding=encoding, errors=errors) as f: + data = yaml.load(f, **kwargs) + elif yaml_string: + data = yaml.load(yaml_string, **kwargs) + else: + raise BoxError('from_yaml requires a string or filename') + return data + + +# Helper functions + + +def _safe_key(key): + try: + return str(key) + except UnicodeEncodeError: + return key.encode("utf-8", "ignore") + + +def _safe_attr(attr, camel_killer=False, replacement_char='x'): + """Convert a key into something that is accessible as an attribute""" + allowed = string.ascii_letters + string.digits + '_' + + attr = _safe_key(attr) + + if camel_killer: + attr = _camel_killer(attr) + + attr = attr.replace(' ', '_') + + out = '' + for character in attr: + out += character if character in allowed else "_" + out = out.strip("_") + + try: + int(out[0]) + except (ValueError, IndexError): + pass + else: + out = '{0}{1}'.format(replacement_char, out) + + if out in kwlist: + out = '{0}{1}'.format(replacement_char, out) + + return re.sub('_+', '_', out) + + +def _camel_killer(attr): + """ + CamelKiller, qu'est-ce que c'est? + + Taken from http://stackoverflow.com/a/1176023/3244542 + """ + try: + attr = str(attr) + except UnicodeEncodeError: + attr = attr.encode("utf-8", "ignore") + + s1 = _first_cap_re.sub(r'\1_\2', attr) + s2 = _all_cap_re.sub(r'\1_\2', s1) + return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else + s2.lower()) + + +def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs): + out_list = [] + for i in iterable: + if isinstance(i, dict): + out_list.append(box_class(i, **kwargs)) + elif isinstance(i, list) or (recreate_tuples and isinstance(i, tuple)): + out_list.append(_recursive_tuples(i, box_class, + recreate_tuples, **kwargs)) + else: + out_list.append(i) + return tuple(out_list) + + +def _conversion_checks(item, keys, box_config, check_only=False, + pre_check=False): + """ + Internal use for checking if a duplicate safe attribute already exists + + :param item: Item to see if a dup exists + :param keys: Keys to check against + :param box_config: Easier to pass in than ask for specfic items + :param check_only: Don't bother doing the conversion work + :param pre_check: Need to add the item to the list of keys to check + :return: the original unmodified key, if exists and not check_only + """ + if box_config['box_duplicates'] != 'ignore': + if pre_check: + keys = list(keys) + [item] + + key_list = [(k, + _safe_attr(k, camel_killer=box_config['camel_killer_box'], + replacement_char=box_config['box_safe_prefix'] + )) for k in keys] + if len(key_list) > len(set(x[1] for x in key_list)): + seen = set() + dups = set() + for x in key_list: + if x[1] in seen: + dups.add("{0}({1})".format(x[0], x[1])) + seen.add(x[1]) + if box_config['box_duplicates'].startswith("warn"): + warnings.warn('Duplicate conversion attributes exist: ' + '{0}'.format(dups)) + else: + raise BoxError('Duplicate conversion attributes exist: ' + '{0}'.format(dups)) + if check_only: + return + # This way will be slower for warnings, as it will have double work + # But faster for the default 'ignore' + for k in keys: + if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], + replacement_char=box_config['box_safe_prefix']): + return k + + +def _get_box_config(cls, kwargs): + return { + # Internal use only + '__converted': set(), + '__box_heritage': kwargs.pop('__box_heritage', None), + '__created': False, + '__ordered_box_values': [], + # Can be changed by user after box creation + 'default_box': kwargs.pop('default_box', False), + 'default_box_attr': kwargs.pop('default_box_attr', cls), + 'conversion_box': kwargs.pop('conversion_box', True), + 'box_safe_prefix': kwargs.pop('box_safe_prefix', 'x'), + 'frozen_box': kwargs.pop('frozen_box', False), + 'camel_killer_box': kwargs.pop('camel_killer_box', False), + 'modify_tuples_box': kwargs.pop('modify_tuples_box', False), + 'box_duplicates': kwargs.pop('box_duplicates', 'ignore'), + 'ordered_box': kwargs.pop('ordered_box', False) + } + + +class Box(dict): + """ + Improved dictionary access through dot notation with additional tools. + + :param default_box: Similar to defaultdict, return a default value + :param default_box_attr: Specify the default replacement. + WARNING: If this is not the default 'Box', it will not be recursive + :param frozen_box: After creation, the box cannot be modified + :param camel_killer_box: Convert CamelCase to snake_case + :param conversion_box: Check for near matching keys as attributes + :param modify_tuples_box: Recreate incoming tuples with dicts into Boxes + :param box_it_up: Recursively create all Boxes from the start + :param box_safe_prefix: Conversion box prefix for unsafe attributes + :param box_duplicates: "ignore", "error" or "warn" when duplicates exists + in a conversion_box + :param ordered_box: Preserve the order of keys entered into the box + """ + + _protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml', + 'from_yaml', 'from_json'] + + def __new__(cls, *args, **kwargs): + """ + Due to the way pickling works in python 3, we need to make sure + the box config is created as early as possible. + """ + obj = super(Box, cls).__new__(cls, *args, **kwargs) + obj._box_config = _get_box_config(cls, kwargs) + return obj + + def __init__(self, *args, **kwargs): + self._box_config = _get_box_config(self.__class__, kwargs) + if self._box_config['ordered_box']: + self._box_config['__ordered_box_values'] = [] + if (not self._box_config['conversion_box'] and + self._box_config['box_duplicates'] != "ignore"): + raise BoxError('box_duplicates are only for conversion_boxes') + if len(args) == 1: + if isinstance(args[0], basestring): + raise ValueError('Cannot extrapolate Box from string') + if isinstance(args[0], Mapping): + for k, v in args[0].items(): + if v is args[0]: + v = self + self[k] = v + self.__add_ordered(k) + elif isinstance(args[0], Iterable): + for k, v in args[0]: + self[k] = v + self.__add_ordered(k) + + else: + raise ValueError('First argument must be mapping or iterable') + elif args: + raise TypeError('Box expected at most 1 argument, ' + 'got {0}'.format(len(args))) + + box_it = kwargs.pop('box_it_up', False) + for k, v in kwargs.items(): + if args and isinstance(args[0], Mapping) and v is args[0]: + v = self + self[k] = v + self.__add_ordered(k) + + if (self._box_config['frozen_box'] or box_it or + self._box_config['box_duplicates'] != 'ignore'): + self.box_it_up() + + self._box_config['__created'] = True + + def __add_ordered(self, key): + if (self._box_config['ordered_box'] and + key not in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].append(key) + + def box_it_up(self): + """ + Perform value lookup for all items in current dictionary, + generating all sub Box objects, while also running `box_it_up` on + any of those sub box objects. + """ + for k in self: + _conversion_checks(k, self.keys(), self._box_config, + check_only=True) + if self[k] is not self and hasattr(self[k], 'box_it_up'): + self[k].box_it_up() + + def __hash__(self): + if self._box_config['frozen_box']: + hashing = 54321 + for item in self.items(): + hashing ^= hash(item) + return hashing + raise TypeError("unhashable type: 'Box'") + + def __dir__(self): + allowed = string.ascii_letters + string.digits + '_' + kill_camel = self._box_config['camel_killer_box'] + items = set(dir(dict) + ['to_dict', 'to_json', + 'from_json', 'box_it_up']) + # Only show items accessible by dot notation + for key in self.keys(): + key = _safe_key(key) + if (' ' not in key and key[0] not in string.digits and + key not in kwlist): + for letter in key: + if letter not in allowed: + break + else: + items.add(key) + + for key in self.keys(): + key = _safe_key(key) + if key not in items: + if self._box_config['conversion_box']: + key = _safe_attr(key, camel_killer=kill_camel, + replacement_char=self._box_config[ + 'box_safe_prefix']) + if key: + items.add(key) + if kill_camel: + snake_key = _camel_killer(key) + if snake_key: + items.remove(key) + items.add(snake_key) + + if yaml_support: + items.add('to_yaml') + items.add('from_yaml') + + return list(items) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + if isinstance(default, dict) and not isinstance(default, Box): + return Box(default) + if isinstance(default, list) and not isinstance(default, BoxList): + return BoxList(default) + return default + + def copy(self): + return self.__class__(super(self.__class__, self).copy()) + + def __copy__(self): + return self.__class__(super(self.__class__, self).copy()) + + def __deepcopy__(self, memodict=None): + out = self.__class__() + memodict = memodict or {} + memodict[id(self)] = out + for k, v in self.items(): + out[copy.deepcopy(k, memodict)] = copy.deepcopy(v, memodict) + return out + + def __setstate__(self, state): + self._box_config = state['_box_config'] + self.__dict__.update(state) + + def __getitem__(self, item, _ignore_default=False): + try: + value = super(Box, self).__getitem__(item) + except KeyError as err: + if item == '_box_config': + raise BoxKeyError('_box_config should only exist as an ' + 'attribute and is never defaulted') + if self._box_config['default_box'] and not _ignore_default: + return self.__get_default(item) + raise BoxKeyError(str(err)) + else: + return self.__convert_and_store(item, value) + + def keys(self): + if self._box_config['ordered_box']: + return self._box_config['__ordered_box_values'] + return super(Box, self).keys() + + def values(self): + return [self[x] for x in self.keys()] + + def items(self): + return [(x, self[x]) for x in self.keys()] + + def __get_default(self, item): + default_value = self._box_config['default_box_attr'] + if default_value is self.__class__: + return self.__class__(__box_heritage=(self, item), + **self.__box_config()) + elif isinstance(default_value, Callable): + return default_value() + elif hasattr(default_value, 'copy'): + return default_value.copy() + return default_value + + def __box_config(self): + out = {} + for k, v in self._box_config.copy().items(): + if not k.startswith("__"): + out[k] = v + return out + + def __convert_and_store(self, item, value): + if item in self._box_config['__converted']: + return value + if isinstance(value, dict) and not isinstance(value, Box): + value = self.__class__(value, __box_heritage=(self, item), + **self.__box_config()) + self[item] = value + elif isinstance(value, list) and not isinstance(value, BoxList): + if self._box_config['frozen_box']: + value = _recursive_tuples(value, self.__class__, + recreate_tuples=self._box_config[ + 'modify_tuples_box'], + __box_heritage=(self, item), + **self.__box_config()) + else: + value = BoxList(value, __box_heritage=(self, item), + box_class=self.__class__, + **self.__box_config()) + self[item] = value + elif (self._box_config['modify_tuples_box'] and + isinstance(value, tuple)): + value = _recursive_tuples(value, self.__class__, + recreate_tuples=True, + __box_heritage=(self, item), + **self.__box_config()) + self[item] = value + self._box_config['__converted'].add(item) + return value + + def __create_lineage(self): + if (self._box_config['__box_heritage'] and + self._box_config['__created']): + past, item = self._box_config['__box_heritage'] + if not past[item]: + past[item] = self + self._box_config['__box_heritage'] = None + + def __getattr__(self, item): + try: + try: + value = self.__getitem__(item, _ignore_default=True) + except KeyError: + value = object.__getattribute__(self, item) + except AttributeError as err: + if item == "__getstate__": + raise AttributeError(item) + if item == '_box_config': + raise BoxError('_box_config key must exist') + kill_camel = self._box_config['camel_killer_box'] + if self._box_config['conversion_box'] and item: + k = _conversion_checks(item, self.keys(), self._box_config) + if k: + return self.__getitem__(k) + if kill_camel: + for k in self.keys(): + if item == _camel_killer(k): + return self.__getitem__(k) + if self._box_config['default_box']: + return self.__get_default(item) + raise BoxKeyError(str(err)) + else: + if item == '_box_config': + return value + return self.__convert_and_store(item, value) + + def __setitem__(self, key, value): + if (key != '_box_config' and self._box_config['__created'] and + self._box_config['frozen_box']): + raise BoxError('Box is frozen') + if self._box_config['conversion_box']: + _conversion_checks(key, self.keys(), self._box_config, + check_only=True, pre_check=True) + super(Box, self).__setitem__(key, value) + self.__add_ordered(key) + self.__create_lineage() + + def __setattr__(self, key, value): + if (key != '_box_config' and self._box_config['frozen_box'] and + self._box_config['__created']): + raise BoxError('Box is frozen') + if key in self._protected_keys: + raise AttributeError("Key name '{0}' is protected".format(key)) + if key == '_box_config': + return object.__setattr__(self, key, value) + try: + object.__getattribute__(self, key) + except (AttributeError, UnicodeEncodeError): + if (key not in self.keys() and + (self._box_config['conversion_box'] or + self._box_config['camel_killer_box'])): + if self._box_config['conversion_box']: + k = _conversion_checks(key, self.keys(), + self._box_config) + self[key if not k else k] = value + elif self._box_config['camel_killer_box']: + for each_key in self: + if key == _camel_killer(each_key): + self[each_key] = value + break + else: + self[key] = value + else: + object.__setattr__(self, key, value) + self.__add_ordered(key) + self.__create_lineage() + + def __delitem__(self, key): + if self._box_config['frozen_box']: + raise BoxError('Box is frozen') + super(Box, self).__delitem__(key) + if (self._box_config['ordered_box'] and + key in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].remove(key) + + def __delattr__(self, item): + if self._box_config['frozen_box']: + raise BoxError('Box is frozen') + if item == '_box_config': + raise BoxError('"_box_config" is protected') + if item in self._protected_keys: + raise AttributeError("Key name '{0}' is protected".format(item)) + try: + object.__getattribute__(self, item) + except AttributeError: + del self[item] + else: + object.__delattr__(self, item) + if (self._box_config['ordered_box'] and + item in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].remove(item) + + def pop(self, key, *args): + if args: + if len(args) != 1: + raise BoxError('pop() takes only one optional' + ' argument "default"') + try: + item = self[key] + except KeyError: + return args[0] + else: + del self[key] + return item + try: + item = self[key] + except KeyError: + raise BoxKeyError('{0}'.format(key)) + else: + del self[key] + return item + + def clear(self): + self._box_config['__ordered_box_values'] = [] + super(Box, self).clear() + + def popitem(self): + try: + key = next(self.__iter__()) + except StopIteration: + raise BoxKeyError('Empty box') + return key, self.pop(key) + + def __repr__(self): + return ''.format(str(self.to_dict())) + + def __str__(self): + return str(self.to_dict()) + + def __iter__(self): + for key in self.keys(): + yield key + + def __reversed__(self): + for key in reversed(list(self.keys())): + yield key + + def to_dict(self): + """ + Turn the Box and sub Boxes back into a native + python dictionary. + + :return: python dictionary of this Box + """ + out_dict = dict(self) + for k, v in out_dict.items(): + if v is self: + out_dict[k] = out_dict + elif hasattr(v, 'to_dict'): + out_dict[k] = v.to_dict() + elif hasattr(v, 'to_list'): + out_dict[k] = v.to_list() + return out_dict + + def update(self, item=None, **kwargs): + if not item: + item = kwargs + iter_over = item.items() if hasattr(item, 'items') else item + for k, v in iter_over: + if isinstance(v, dict): + # Box objects must be created in case they are already + # in the `converted` box_config set + v = self.__class__(v) + if k in self and isinstance(self[k], dict): + self[k].update(v) + continue + if isinstance(v, list): + v = BoxList(v) + try: + self.__setattr__(k, v) + except (AttributeError, TypeError): + self.__setitem__(k, v) + + def setdefault(self, item, default=None): + if item in self: + return self[item] + + if isinstance(default, dict): + default = self.__class__(default) + if isinstance(default, list): + default = BoxList(default) + self[item] = default + return default + + def to_json(self, filename=None, + encoding="utf-8", errors="strict", **json_kwargs): + """ + Transform the Box object into a JSON string. + + :param filename: If provided will save to file + :param encoding: File encoding + :param errors: How to handle encoding errors + :param json_kwargs: additional arguments to pass to json.dump(s) + :return: string of JSON or return of `json.dump` + """ + return _to_json(self.to_dict(), filename=filename, + encoding=encoding, errors=errors, **json_kwargs) + + @classmethod + def from_json(cls, json_string=None, filename=None, + encoding="utf-8", errors="strict", **kwargs): + """ + Transform a json object string into a Box object. If the incoming + json is a list, you must use BoxList.from_json. + + :param json_string: string to pass to `json.loads` + :param filename: filename to open and pass to `json.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param kwargs: parameters to pass to `Box()` or `json.loads` + :return: Box object from json data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_json(json_string, filename=filename, + encoding=encoding, errors=errors, **kwargs) + + if not isinstance(data, dict): + raise BoxError('json data not returned as a dictionary, ' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + if yaml_support: + def to_yaml(self, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + """ + Transform the Box object into a YAML string. + + :param filename: If provided will save to file + :param default_flow_style: False will recursively dump dicts + :param encoding: File encoding + :param errors: How to handle encoding errors + :param yaml_kwargs: additional arguments to pass to yaml.dump + :return: string of YAML or return of `yaml.dump` + """ + return _to_yaml(self.to_dict(), filename=filename, + default_flow_style=default_flow_style, + encoding=encoding, errors=errors, **yaml_kwargs) + + @classmethod + def from_yaml(cls, yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + loader=yaml.SafeLoader, **kwargs): + """ + Transform a yaml object string into a Box object. + + :param yaml_string: string to pass to `yaml.load` + :param filename: filename to open and pass to `yaml.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param loader: YAML Loader, defaults to SafeLoader + :param kwargs: parameters to pass to `Box()` or `yaml.load` + :return: Box object from yaml data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_yaml(yaml_string=yaml_string, filename=filename, + encoding=encoding, errors=errors, + Loader=loader, **kwargs) + if not isinstance(data, dict): + raise BoxError('yaml data not returned as a dictionary' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + +class BoxList(list): + """ + Drop in replacement of list, that converts added objects to Box or BoxList + objects as necessary. + """ + + def __init__(self, iterable=None, box_class=Box, **box_options): + self.box_class = box_class + self.box_options = box_options + self.box_org_ref = self.box_org_ref = id(iterable) if iterable else 0 + if iterable: + for x in iterable: + self.append(x) + if box_options.get('frozen_box'): + def frozen(*args, **kwargs): + raise BoxError('BoxList is frozen') + + for method in ['append', 'extend', 'insert', 'pop', + 'remove', 'reverse', 'sort']: + self.__setattr__(method, frozen) + + def __delitem__(self, key): + if self.box_options.get('frozen_box'): + raise BoxError('BoxList is frozen') + super(BoxList, self).__delitem__(key) + + def __setitem__(self, key, value): + if self.box_options.get('frozen_box'): + raise BoxError('BoxList is frozen') + super(BoxList, self).__setitem__(key, value) + + def append(self, p_object): + if isinstance(p_object, dict): + try: + p_object = self.box_class(p_object, **self.box_options) + except AttributeError as err: + if 'box_class' in self.__dict__: + raise err + elif isinstance(p_object, list): + try: + p_object = (self if id(p_object) == self.box_org_ref else + BoxList(p_object)) + except AttributeError as err: + if 'box_org_ref' in self.__dict__: + raise err + super(BoxList, self).append(p_object) + + def extend(self, iterable): + for item in iterable: + self.append(item) + + def insert(self, index, p_object): + if isinstance(p_object, dict): + p_object = self.box_class(p_object, **self.box_options) + elif isinstance(p_object, list): + p_object = (self if id(p_object) == self.box_org_ref else + BoxList(p_object)) + super(BoxList, self).insert(index, p_object) + + def __repr__(self): + return "".format(self.to_list()) + + def __str__(self): + return str(self.to_list()) + + def __copy__(self): + return BoxList((x for x in self), + self.box_class, + **self.box_options) + + def __deepcopy__(self, memodict=None): + out = self.__class__() + memodict = memodict or {} + memodict[id(self)] = out + for k in self: + out.append(copy.deepcopy(k)) + return out + + def __hash__(self): + if self.box_options.get('frozen_box'): + hashing = 98765 + hashing ^= hash(tuple(self)) + return hashing + raise TypeError("unhashable type: 'BoxList'") + + def to_list(self): + new_list = [] + for x in self: + if x is self: + new_list.append(new_list) + elif isinstance(x, Box): + new_list.append(x.to_dict()) + elif isinstance(x, BoxList): + new_list.append(x.to_list()) + else: + new_list.append(x) + return new_list + + def to_json(self, filename=None, + encoding="utf-8", errors="strict", + multiline=False, **json_kwargs): + """ + Transform the BoxList object into a JSON string. + + :param filename: If provided will save to file + :param encoding: File encoding + :param errors: How to handle encoding errors + :param multiline: Put each item in list onto it's own line + :param json_kwargs: additional arguments to pass to json.dump(s) + :return: string of JSON or return of `json.dump` + """ + if filename and multiline: + lines = [_to_json(item, filename=False, encoding=encoding, + errors=errors, **json_kwargs) for item in self] + with open(filename, 'w', encoding=encoding, errors=errors) as f: + f.write("\n".join(lines).decode('utf-8') if + sys.version_info < (3, 0) else "\n".join(lines)) + else: + return _to_json(self.to_list(), filename=filename, + encoding=encoding, errors=errors, **json_kwargs) + + @classmethod + def from_json(cls, json_string=None, filename=None, encoding="utf-8", + errors="strict", multiline=False, **kwargs): + """ + Transform a json object string into a BoxList object. If the incoming + json is a dict, you must use Box.from_json. + + :param json_string: string to pass to `json.loads` + :param filename: filename to open and pass to `json.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param multiline: One object per line + :param kwargs: parameters to pass to `Box()` or `json.loads` + :return: BoxList object from json data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_json(json_string, filename=filename, encoding=encoding, + errors=errors, multiline=multiline, **kwargs) + + if not isinstance(data, list): + raise BoxError('json data not returned as a list, ' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + if yaml_support: + def to_yaml(self, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + """ + Transform the BoxList object into a YAML string. + + :param filename: If provided will save to file + :param default_flow_style: False will recursively dump dicts + :param encoding: File encoding + :param errors: How to handle encoding errors + :param yaml_kwargs: additional arguments to pass to yaml.dump + :return: string of YAML or return of `yaml.dump` + """ + return _to_yaml(self.to_list(), filename=filename, + default_flow_style=default_flow_style, + encoding=encoding, errors=errors, **yaml_kwargs) + + @classmethod + def from_yaml(cls, yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + loader=yaml.SafeLoader, + **kwargs): + """ + Transform a yaml object string into a BoxList object. + + :param yaml_string: string to pass to `yaml.load` + :param filename: filename to open and pass to `yaml.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param loader: YAML Loader, defaults to SafeLoader + :param kwargs: parameters to pass to `BoxList()` or `yaml.load` + :return: BoxList object from yaml data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_yaml(yaml_string=yaml_string, filename=filename, + encoding=encoding, errors=errors, + Loader=loader, **kwargs) + if not isinstance(data, list): + raise BoxError('yaml data not returned as a list' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + def box_it_up(self): + for v in self: + if hasattr(v, 'box_it_up') and v is not self: + v.box_it_up() + + +class ConfigBox(Box): + """ + Modified box object to add object transforms. + + Allows for build in transforms like: + + cns = ConfigBox(my_bool='yes', my_int='5', my_list='5,4,3,3,2') + + cns.bool('my_bool') # True + cns.int('my_int') # 5 + cns.list('my_list', mod=lambda x: int(x)) # [5, 4, 3, 3, 2] + """ + + _protected_keys = dir({}) + ['to_dict', 'bool', 'int', 'float', + 'list', 'getboolean', 'to_json', 'to_yaml', + 'getfloat', 'getint', + 'from_json', 'from_yaml'] + + def __getattr__(self, item): + """Config file keys are stored in lower case, be a little more + loosey goosey""" + try: + return super(ConfigBox, self).__getattr__(item) + except AttributeError: + return super(ConfigBox, self).__getattr__(item.lower()) + + def __dir__(self): + return super(ConfigBox, self).__dir__() + ['bool', 'int', 'float', + 'list', 'getboolean', + 'getfloat', 'getint'] + + def bool(self, item, default=None): + """ Return value of key as a boolean + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: approximated bool of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + + if isinstance(item, (bool, int)): + return bool(item) + + if (isinstance(item, str) and + item.lower() in ('n', 'no', 'false', 'f', '0')): + return False + + return True if item else False + + def int(self, item, default=None): + """ Return value of key as an int + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: int of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + return int(item) + + def float(self, item, default=None): + """ Return value of key as a float + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: float of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + return float(item) + + def list(self, item, default=None, spliter=",", strip=True, mod=None): + """ Return value of key as a list + + :param item: key of value to transform + :param mod: function to map against list + :param default: value to return if item does not exist + :param spliter: character to split str on + :param strip: clean the list with the `strip` + :return: list of items + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + if strip: + item = item.lstrip('[').rstrip(']') + out = [x.strip() if strip else x for x in item.split(spliter)] + if mod: + return list(map(mod, out)) + return out + + # loose configparser compatibility + + def getboolean(self, item, default=None): + return self.bool(item, default) + + def getint(self, item, default=None): + return self.int(item, default) + + def getfloat(self, item, default=None): + return self.float(item, default) + + def __repr__(self): + return ''.format(str(self.to_dict())) + + +class SBox(Box): + """ + ShorthandBox (SBox) allows for + property access of `dict` `json` and `yaml` + """ + _protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml', + 'json', 'yaml', 'from_yaml', 'from_json', + 'dict'] + + @property + def dict(self): + return self.to_dict() + + @property + def json(self): + return self.to_json() + + if yaml_support: + @property + def yaml(self): + return self.to_yaml() + + def __repr__(self): + return ''.format(str(self.to_dict())) diff --git a/vanishing_point_extraction/neurvps/neurvps/config.py b/vanishing_point_extraction/neurvps/neurvps/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7fb3e2c3fa0b83089bf93be6c4184333ae19614d --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/config.py @@ -0,0 +1,9 @@ +import numpy as np + +from neurvps.box import Box + +# C is a dict storing all the configuration +C = Box() + +# shortcut for C.model +M = Box() diff --git a/vanishing_point_extraction/neurvps/neurvps/datasets.py b/vanishing_point_extraction/neurvps/neurvps/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..502508da5f65f920657a604fae71aa95cc4712b8 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/datasets.py @@ -0,0 +1,184 @@ +import os +import json +import math +import random +import os.path as osp +from glob import glob + +import numpy as np +import torch +import skimage.io +import numpy.linalg as LA +import matplotlib.pyplot as plt +import skimage.transform +from torch.utils.data import Dataset +from torch.utils.data.dataloader import default_collate + +from neurvps.config import C + + +class WireframeDataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + filelist = sorted(glob(f"{rootdir}/*/*.png")) + + self.split = split + if split == "train": + self.filelist = filelist[500:] + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + self.filelist = [f for f in filelist[:500] if "a1" not in f] + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname).astype(float)[:, :, :3] + image = np.rollaxis(image, 2).copy() + with np.load(iname.replace(".png", "_label.npz")) as npz: + vpts = npz["vpts"] + return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()}) + + +class ScanNetDataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + self.split = split + + dirs = np.genfromtxt(f"{rootdir}/scannetv2_{split}.txt", dtype=str) + self.filelist = sum([glob(f"{rootdir}/{d}/*.png") for d in dirs], []) + if split == "train": + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + random.seed(0) + random.shuffle(self.filelist) + self.filelist = self.filelist[:500] + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname)[:, :, :3] + with np.load(iname.replace("color.png", "vanish.npz")) as npz: + vpts = np.array([npz[d] for d in ["x", "y", "z"]]) + vpts[:, 1] *= -1 + # plt.imshow(image) + # cc = ["blue", "cyan", "orange"] + # for c, w in zip(cc, vpts): + # x = w[0] / w[2] * C.io.focal_length * 256 + 256 + # y = -w[1] / w[2] * C.io.focal_length * 256 + 256 + # plt.scatter(x, y, color=c) + # for xy in np.linspace(0, 512, 10): + # plt.plot( + # [x, xy, x, xy, x, 0, x, 511], + # [y, 0, y, 511, y, xy, y, xy], + # color=c, + # ) + # plt.show() + image = np.rollaxis(image.astype(np.float), 2).copy() + return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()}) + + +class Tmm17Dataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + self.split = split + + filelist = np.genfromtxt(f"{rootdir}/{split}.txt", dtype=str) + self.filelist = [osp.join(rootdir, f) for f in filelist] + if split == "train": + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname) + tname = iname.replace(".jpg", ".txt") + axy, bxy = np.genfromtxt(tname, skip_header=1) + + a0, a1 = np.array(axy[:2]), np.array(axy[2:]) + b0, b1 = np.array(bxy[:2]), np.array(bxy[2:]) + xy = intersect(a0, a1, b0, b1) - 0.5 + xy[0] *= 512 / image.shape[1] + xy[1] *= 512 / image.shape[0] + image = skimage.transform.resize(image, (512, 512)) + if image.ndim == 2: + image = image[:, :, None].repeat(3, 2) + if self.split == "train": + i, j, h, w = crop(image.shape) + else: + i, j, h, w = 0, 0, image.shape[0], image.shape[1] + image = skimage.transform.resize(image[j : j + h, i : i + w], (512, 512)) + xy[1] = (xy[1] - j) / h * 512 + xy[0] = (xy[0] - i) / w * 512 + # plt.imshow(image) + # plt.scatter(xy[0], xy[1]) + # plt.show() + vpts = np.array([[xy[0] / 256 - 1, 1 - xy[1] / 256, C.io.focal_length]]) + vpts[0] /= LA.norm(vpts[0]) + + image, vpts = augment(image, vpts, idx // len(self.filelist)) + image = np.rollaxis(image, 2) + return (torch.tensor(image * 255).float(), {"vpts": torch.tensor(vpts).float()}) + + +def augment(image, vpts, division): + if division == 1: # left-right flip + return image[:, ::-1].copy(), (vpts * [-1, 1, 1]).copy() + elif division == 2: # up-down flip + return image[::-1, :].copy(), (vpts * [1, -1, 1]).copy() + elif division == 3: # all flip + return image[::-1, ::-1].copy(), (vpts * [-1, -1, 1]).copy() + return image, vpts + + +def intersect(a0, a1, b0, b1): + c0 = ccw(a0, a1, b0) + c1 = ccw(a0, a1, b1) + d0 = ccw(b0, b1, a0) + d1 = ccw(b0, b1, a1) + if abs(d1 - d0) > abs(c1 - c0): + return (a0 * d1 - a1 * d0) / (d1 - d0) + else: + return (b0 * c1 - b1 * c0) / (c1 - c0) + + +def ccw(c, a, b): + a0 = a - c + b0 = b - c + return a0[0] * b0[1] - b0[0] * a0[1] + + +def crop(shape, scale=(0.35, 1.0), ratio=(9 / 16, 16 / 9)): + for attempt in range(20): + area = shape[0] * shape[1] + target_area = random.uniform(*scale) * area + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if h <= shape[0] and w <= shape[1]: + j = random.randint(0, shape[0] - h) + i = random.randint(0, shape[1] - w) + return i, j, h, w + + # Fallback + w = min(shape[0], shape[1]) + i = (shape[1] - w) // 2 + j = (shape[0] - w) // 2 + return i, j, w, w diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__init__.py b/vanishing_point_extraction/neurvps/neurvps/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66ae8864d8bd89d2f5a85435b7c89bf339b55af3 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/__init__.py @@ -0,0 +1,2 @@ +from .hourglass_pose import hg +from .vanishing_net import VanishingNet diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c635ad31385f1c63c1baf6f73ec691c8d841c26 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f86df3984fce8aee0367028546901204a290d46 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec57aa46d0a60c5f3d834246c63031065309477 Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88b04502ae1ac736bb2b8415ba2f9eb304246bdf Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9757be8c3ab255c94a547912ed39fa019dceac1d Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/conic.py b/vanishing_point_extraction/neurvps/neurvps/models/conic.py new file mode 100644 index 0000000000000000000000000000000000000000..0d56c3da6c55b6d9056d0f2b43438e71eb76029a --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/conic.py @@ -0,0 +1,50 @@ +import torch +from torch import nn +from torch.nn.modules.utils import _pair + +from neurvps.config import M +from neurvps.models.deformable import DeformConv + + +class ConicConv(nn.Module): + def __init__(self, c_in, c_out, kernel_size=3, bias=False): + super().__init__() + self.deform_conv = DeformConv( + c_in, + c_out, + kernel_size=kernel_size, + stride=1, + padding=1, + im2col_step=M.im2col_step, + bias=bias, + ) + self.kernel_size = _pair(kernel_size) + + def forward(self, input, vpts): + N, C, H, W = input.shape + Kh, Kw = self.kernel_size + + with torch.no_grad(): + ys, xs = torch.meshgrid( + torch.arange(0, H).float().to(input.device), + torch.arange(0, W).float().to(input.device), + ) + # d: [N, H, W, 2] + d = torch.cat( + [ + (vpts[:, 0, None, None] - ys)[..., None], + (vpts[:, 1, None, None] - xs)[..., None], + ], + dim=-1, + ) + d /= torch.norm(d, dim=-1, keepdim=True).clamp(min=1e-5) + n = torch.cat([-d[..., 1:2], d[..., 0:1]], dim=-1) + + offset = torch.zeros((N, H, W, Kh, Kw, 2)).to(input.device) + for i in range(Kh): + for j in range(Kw): + offset[..., i, j, :] = d * (1 - i) + n * (1 - j) + offset[..., i, j, 0] += 1 - i + offset[..., i, j, 1] += 1 - j + offset = offset.permute(0, 3, 4, 5, 1, 2).reshape((N, -1, H, W)) + return self.deform_conv(input, offset) diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..eb3bfc41f8734871526cd6ea6ebd2163ffe242ae Binary files /dev/null and b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps differ diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_log b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..42b21d53db3a1e9adaee326d1771f5943bc9ab69 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/.ninja_log @@ -0,0 +1,7 @@ +# ninja log v5 +0 16103 1705843691094256220 deform_conv_cuda.cuda.o faf06c0154fdd95 +0 17978 1705843692978288598 deform_conv.o 9bdf84a104d95de9 +17978 18346 1705843693342294852 DCN.so d5002c9f854b5479 +1 14024 1720225807965090925 deform_conv_cuda.cuda.o 12c1d8fa6984d93 +1 16540 1720225810493145171 deform_conv.o 84f97a3edd60cf1e +16540 16855 1720225810805151862 DCN.so d5002c9f854b5479 diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so new file mode 100644 index 0000000000000000000000000000000000000000..4343ec02d3a6186831523c6c0f9cb74f82024caf --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/DCN.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d858d752cacb6eedb4f05258437d2dfdf45a2a4e8fbbba467b8e7f8553b0140 +size 580640 diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/build.ninja b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..4fc2e71389e6bc69e78053165cebdef4cc1d3804 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/build.ninja @@ -0,0 +1,30 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /usr/local/cuda/bin/nvcc + +cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++14 -O3 +post_cflags = +cuda_cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 --compiler-options '-fPIC' -std=c++14 +cuda_post_cflags = +ldflags = -shared -L/opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/lib -lc10 -lc10_cuda -ltorch_cpu -ltorch_cuda_cu -ltorch_cuda_cpp -ltorch -ltorch_python -L/usr/local/cuda/lib64 -lcudart + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags + +rule link + command = $cxx $in $ldflags -o $out + +build deform_conv_cuda.cuda.o: cuda_compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu +build deform_conv.o: compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp + +build DCN.so: link deform_conv_cuda.cuda.o deform_conv.o + +default DCN.so + diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o new file mode 100644 index 0000000000000000000000000000000000000000..5bf6ea6b3d2484bf8f04b8ae51752dbab69a1804 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe3c7f68e8eefb0ce25c505d4e1c74ebdc200d2bf2dbdb335750788635a1e114 +size 234296 diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o new file mode 100644 index 0000000000000000000000000000000000000000..e5ac751f871f21423258a5bec8b55e76dc3d5ba2 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b0f98276530eb69dd8ad586e105adb457b4f506c4acbfe8418d192f49dcf7e +size 603176 diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv.cpp b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d64ac48f32efda5836af839024b7239864f2ff3 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv.cpp @@ -0,0 +1,75 @@ +#include "deform_conv_cpu.h" +#include "deform_conv_cuda.h" + +at::Tensor +deform_conv_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + if (input.type().is_cuda()) + { + return deform_conv_cuda_forward(input, weight, bias, offset, + kernel_h, kernel_w, + stride_h, stride_w, + pad_h, pad_w, + dilation_h, dilation_w, + group, + deformable_group, + im2col_step); + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +deform_conv_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + if (input.type().is_cuda()) + { + return deform_conv_cuda_backward(input, + weight, + bias, + offset, + grad_output, + kernel_h, kernel_w, + stride_h, stride_w, + pad_h, pad_w, + dilation_h, dilation_w, + group, + deformable_group, + im2col_step); + } + AT_ERROR("Not implemented on the CPU"); +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_conv_forward", &deform_conv_forward, "Backward pass of deformable convolution"); + m.def("deform_conv_backward", &deform_conv_backward, "Backward pass of deformable convolution"); +} diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cpu.h b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8f4c76a54ff93d50615abd42610ab8f6d350e629 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cpu.h @@ -0,0 +1,39 @@ +#pragma once +#include + +at::Tensor +deform_conv_cpu_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + +std::vector +deform_conv_cpu_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + + diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.cu b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a93c49ac7cae61c8d8055993bef0f925cea32a8 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.cu @@ -0,0 +1,271 @@ +#include +#include "deform_im2col_cuda.cuh" + +#include +#include +#include +#include + +// #include +// #include +// #include + +// extern THCState *state; + +// author: Charles Shang +// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu + + +at::Tensor +deform_conv_cuda_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); + AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), + "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group); + + // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); + // printf("Channels: %d %d\n", channels, channels_kernel); + // printf("Channels: %d %d\n", channels_out, channels_kernel); + + AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); + + AT_ASSERTM(channels == (channels_kernel * group), + "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); + + const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + auto output = at::empty({batch * height_out * width_out, channels_out}, input.options()); + + // prepare group weight and bias + auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto bias_g = bias.view({group, channels_out/group}); + + // define alias for easy use + const int batch_n = im2col_step_; + const int per_input_size = channels * height * width; + const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); + auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out}); + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options()); + AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] { + deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + deformable_group, + columns.data()); + + })); + + // auto columns_m = columns.t(); + // auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t(); + // output = at::addmm(bias, columns_m, weight_m); + auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); + auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group}); + for (int g = 0; g < group; ++g) + { + auto columns_gm = columns_g.select(0, g).t(); + auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); + auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm); + output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group}); + } + + } + + output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous(); + + return output; +} + +std::vector deform_conv_cuda_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); + AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + const int batch_ = grad_output.size(0); + const int channels_out_ = grad_output.size(1); + const int height_out_ = grad_output.size(2); + const int width_out_ = grad_output.size(3); + + const int im2col_step_ = std::min(im2col_step, batch); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), + "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group); + + AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); + + AT_ASSERTM(channels == (channels_kernel * group), + "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); + + const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + AT_ASSERTM(batch == batch_, + "Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_); + + AT_ASSERTM(channels_out == channels_out_, + "Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_); + + AT_ASSERTM(height_out == height_out_ && width_out == width_out_, + "Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_); + + auto grad_input = at::zeros_like(input); + auto grad_offset = at::zeros_like(offset); + auto grad_weight = at::zeros_like(weight); + auto grad_bias = at::zeros_like(bias); + + // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); + // auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t(); + // columns = at::mm(weight_m, grad_output_m); + + // prepare group weight and bias + auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto grad_bias_g = grad_bias.view({group, channels_out/group}); + + const int batch_n = im2col_step_; + const int per_input_size = channels * height * width; + const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out}); + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out}); + auto ones = at::ones({batch_n * height_out * width_out}, input.options()); + auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options()); + auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); + for (int g = 0; g < group; ++g) + { + auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); + auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); + columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm); + } + + AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] { + deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(), + columns.data(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + grad_offset.data() + n * im2col_step_ * per_offset_size); + // gradient w.r.t. input data + deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + columns.data(), + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + grad_input.data() + n * im2col_step_ * per_input_size); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and group + deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + columns.data()); + + })); + + // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); + // grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight); + // grad_bias = at::mv(grad_output_m, ones); + // auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out}); + // auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out}); + for (int g = 0; g < group; ++g) + { + auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); + auto columns_gm = columns_g.select(0, g).t(); + auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}); + auto grad_bias_gm = grad_bias_g.select(0, g); + grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g)); + grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones); + } + + } + + return { + grad_input, grad_offset, grad_weight, grad_bias + }; +} diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.h b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..61d811a9c133dad1f73973909d00125aea5aea9b --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_conv_cuda.h @@ -0,0 +1,38 @@ +#pragma once +#include + +at::Tensor +deform_conv_cuda_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + +std::vector +deform_conv_cuda_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + diff --git a/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b49033e9a06c96ae997331d367c2b886660d2e78 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh @@ -0,0 +1,388 @@ +#include +#include +#include + +#include +#include + +// #include +#include +// #include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_im, const scalar_t *data_offset, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + // launch channels * batch_size * height_col * width_col cores + CUDA_KERNEL_LOOP(index, n) + { + // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) + // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis + // NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow) + // here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis + + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const scalar_t map_h = i * dilation_h + offset_h; + //const scalar_t map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +__global__ void deformable_col2im_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_im, + const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_offset) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + } +} + +template +void deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_im, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, scalar_t* data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_im, data_offset, height_im, width_im, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, channels, deformable_group, height_col, width_col, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* data_col, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, scalar_t* grad_im){ + + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; + deformable_col2im_gpu_kernel + <<>>( + num_kernels, data_col, data_offset, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void deformable_col2im_coord_cuda(cudaStream_t stream, + const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, + scalar_t* grad_offset) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; + const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; + deformable_col2im_coord_gpu_kernel + <<>>( + num_kernels, data_col, data_im, data_offset, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, + grad_offset); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); + } +} \ No newline at end of file diff --git a/vanishing_point_extraction/neurvps/neurvps/models/deformable.py b/vanishing_point_extraction/neurvps/neurvps/models/deformable.py new file mode 100644 index 0000000000000000000000000000000000000000..107382556f1712dcf0a83b41fce3d9b024819014 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/deformable.py @@ -0,0 +1,193 @@ +import os +import math +import warnings +from glob import glob + +import torch +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair +from torch.autograd.function import once_differentiable + + +def load_cpp_ext(ext_name): + root_dir = os.path.join(os.path.split(__file__)[0]) + src_dir = os.path.join(root_dir, "cpp") + tar_dir = os.path.join(src_dir, "build", ext_name) + os.makedirs(tar_dir, exist_ok=True) + srcs = glob(f"{src_dir}/*.cu") + glob(f"{src_dir}/*.cpp") + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from torch.utils.cpp_extension import load + + ext = load( + name=ext_name, + sources=srcs, + extra_cflags=["-O3"], + extra_cuda_cflags=[], + build_directory=tar_dir, + ) + return ext + + +# defer calling load_cpp_ext to make CUDA_VISIBLE_DEVICES happy +DCN = None + + +class DeformConvFunction(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + bias, + stride, + padding, + dilation, + group, + deformable_groups, + im2col_step, + ): + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.kernel_size = _pair(weight.shape[2:4]) + ctx.group = group + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + output = DCN.deform_conv_forward( + input, + weight, + bias, + offset, + ctx.kernel_size[0], + ctx.kernel_size[1], + ctx.stride[0], + ctx.stride[1], + ctx.padding[0], + ctx.padding[1], + ctx.dilation[0], + ctx.dilation[1], + ctx.group, + ctx.deformable_groups, + ctx.im2col_step, + ) + ctx.save_for_backward(input, offset, weight, bias) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight, bias = ctx.saved_tensors + grad_input, grad_offset, grad_weight, grad_bias = DCN.deform_conv_backward( + input, + weight, + bias, + offset, + grad_output, + ctx.kernel_size[0], + ctx.kernel_size[1], + ctx.stride[0], + ctx.stride[1], + ctx.padding[0], + ctx.padding[1], + ctx.dilation[0], + ctx.dilation[1], + ctx.group, + ctx.deformable_groups, + ctx.im2col_step, + ) + + return ( + grad_input, + grad_offset, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + None, + ) + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=11, + bias=True, + ): + global DCN + DCN = load_cpp_ext("DCN") + super(DeformConv, self).__init__() + + if in_channels % groups != 0: + raise ValueError( + "in_channels {} must be divisible by groups {}".format( + in_channels, groups + ) + ) + if out_channels % groups != 0: + raise ValueError( + "out_channels {} must be divisible by groups {}".format( + out_channels, groups + ) + ) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.im2col_step = im2col_step + self.use_bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + self.bias = nn.Parameter(torch.Tensor(out_channels)) + self.reset_parameters() + if not self.use_bias: + self.bias.requires_grad = False + + def reset_parameters(self): + nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + if self.use_bias: + fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + nn.init.uniform_(self.bias, -bound, bound) + else: + nn.init.zeros_(self.bias) + + def forward(self, input, offset): + assert ( + 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] + == offset.shape[1] + ) + return DeformConvFunction.apply( + input.contiguous(), + offset.contiguous(), + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + self.im2col_step, + ) diff --git a/vanishing_point_extraction/neurvps/neurvps/models/hourglass_pose.py b/vanishing_point_extraction/neurvps/neurvps/models/hourglass_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..dc380cfdda967ae7519877c15573fbce2f9fae4a --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/hourglass_pose.py @@ -0,0 +1,192 @@ +""" +Hourglass network inserted in the pre-activated Resnet +Use lr=0.01 for current version +(c) Yichao Zhou (VanishingNet) +(c) Yichao Zhou (LCNN) +(c) YANG, Wei +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ["HourglassNet", "hg"] + + +class Bottleneck2D(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, resample=None): + super(Bottleneck2D, self).__init__() + + self.bn1 = nn.BatchNorm2d(inplanes) + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1) + self.bn2 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1) + self.bn3 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * Bottleneck2D.expansion, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.resample = resample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.bn1(x) + out = self.relu(out) + out = self.conv1(out) + + out = self.bn2(out) + out = self.relu(out) + out = self.conv2(out) + + out = self.bn3(out) + out = self.relu(out) + out = self.conv3(out) + + if self.resample is not None: + residual = self.resample(x) + + out += residual + + return out + + +class Hourglass(nn.Module): + def __init__(self, block, num_blocks, planes, depth): + super(Hourglass, self).__init__() + self.depth = depth + self.block = block + self.hg = self._make_hour_glass(block, num_blocks, planes, depth) + + def _make_residual(self, block, num_blocks, planes): + layers = [] + for i in range(0, num_blocks): + layers.append(block(planes * block.expansion, planes)) + return nn.Sequential(*layers) + + def _make_hour_glass(self, block, num_blocks, planes, depth): + hg = [] + for i in range(depth): + res = [] + for j in range(3): + res.append(self._make_residual(block, num_blocks, planes)) + if i == 0: + res.append(self._make_residual(block, num_blocks, planes)) + hg.append(nn.ModuleList(res)) + return nn.ModuleList(hg) + + def _hour_glass_forward(self, n, x): + up1 = self.hg[n - 1][0](x) + low1 = F.max_pool2d(x, 2, stride=2) + low1 = self.hg[n - 1][1](low1) + + if n > 1: + low2 = self._hour_glass_forward(n - 1, low1) + else: + low2 = self.hg[n - 1][3](low1) + low3 = self.hg[n - 1][2](low2) + up2 = F.interpolate(low3, scale_factor=2) + out = up1 + up2 + return out + + def forward(self, x): + return self._hour_glass_forward(self.depth, x) + + +class HourglassNet(nn.Module): + def __init__(self, planes, block, head, depth, num_stacks, num_blocks): + super(HourglassNet, self).__init__() + + self.inplanes = 64 + self.num_feats = 128 + self.num_stacks = num_stacks + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3) + self.bn1 = nn.BatchNorm2d(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_residual(block, self.inplanes, 1) + self.layer2 = self._make_residual(block, self.inplanes, 1) + self.layer3 = self._make_residual(block, self.num_feats, 1) + self.maxpool = nn.MaxPool2d(2, stride=2) + + # build hourglass modules + ch = self.num_feats * block.expansion + + hg, res, fc, score, fc_, score_ = [], [], [], [], [], [] + for i in range(num_stacks): + hg.append(Hourglass(block, num_blocks, self.num_feats, depth)) + res.append(self._make_residual(block, self.num_feats, num_blocks)) + fc.append(self._make_fc(ch, ch)) + score.append(head(ch, planes)) + if i < num_stacks - 1: + fc_.append(nn.Conv2d(ch, ch, kernel_size=1)) + score_.append(nn.Conv2d(planes, ch, kernel_size=1)) + + self.hg = nn.ModuleList(hg) + self.res = nn.ModuleList(res) + self.fc = nn.ModuleList(fc) + self.score = nn.ModuleList(score) + self.fc_ = nn.ModuleList(fc_) + self.score_ = nn.ModuleList(score_) + + def _make_residual(self, block, planes, blocks, stride=1): + resample = None + if stride != 1 or self.inplanes != planes * block.expansion: + resample = nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=1, stride=stride + ) + layers = [block(self.inplanes, planes, stride, resample)] + self.inplanes = planes * block.expansion + for i in range(blocks - 1): + layers.append(block(self.inplanes, planes)) + return nn.Sequential(*layers) + + def _make_fc(self, inplanes, outplanes): + return nn.Sequential( + nn.Conv2d(inplanes, outplanes, kernel_size=1), + nn.BatchNorm2d(inplanes), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.layer1(x) + x = self.maxpool(x) + x = self.layer2(x) + x = self.layer3(x) + + for i in range(self.num_stacks): + y = self.hg[i](x) + y = self.res[i](y) + y = self.fc[i](y) + score = self.score[i](y) + out.append(score) + if i < self.num_stacks - 1: + fc_ = self.fc_[i](y) + score_ = self.score_[i](score) + x = x + fc_ + score_ + + return out[::-1] + + +def hg(**kwargs): + model = HourglassNet( + planes=kwargs["planes"], + block=Bottleneck2D, + head=kwargs.get("head", lambda c_in, c_out: nn.Conv2d(c_in, c_out, 1)), + depth=kwargs["depth"], + num_stacks=kwargs["num_stacks"], + num_blocks=kwargs["num_blocks"], + ) + return model + + +def main(): + hg(depth=2, num_stacks=1, num_blocks=1) + + +if __name__ == "__main__": + main() diff --git a/vanishing_point_extraction/neurvps/neurvps/models/vanishing_net.py b/vanishing_point_extraction/neurvps/neurvps/models/vanishing_net.py new file mode 100644 index 0000000000000000000000000000000000000000..9d0386c480d41b89a55f66452456d16e522da238 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/models/vanishing_net.py @@ -0,0 +1,181 @@ +import sys +import math +import random +import itertools +from collections import defaultdict + +import numpy as np +import torch +import torch.nn as nn +import numpy.linalg as LA +import matplotlib.pyplot as plt +import torch.nn.functional as F + +from neurvps.utils import plot_image_grid +from neurvps.config import C, M +from neurvps.models.conic import ConicConv + + +class VanishingNet(nn.Module): + def __init__(self, backbone, output_stride=4, upsample_scale=1): + super().__init__() + self.backbone = backbone + self.anet = ApolloniusNet(output_stride, upsample_scale) + self.loss = nn.BCEWithLogitsLoss(reduction="none") + + def forward(self, input_dict): + x = self.backbone(input_dict["image"])[0] + N, _, H, W = x.shape + test = input_dict.get("test", False) + if test: + c = len(input_dict["vpts"]) + else: + c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg) + x = x[:, None].repeat(1, c, 1, 1, 1).reshape(N * c, _, H, W) + + if test: + vpts = [to_pixel(v) for v in input_dict["vpts"]] + vpts = torch.tensor(vpts, device=x.device) + return self.anet(x, vpts).sigmoid() + + vpts_gt = input_dict["vpts"].cpu().numpy() + vpts, y = [], [] + for n in range(N): + + def add_sample(p): + vpts.append(to_pixel(p)) + y.append(to_label(p, vpts_gt[n])) + + for vgt in vpts_gt[n]: + for st, ed in zip([0] + M.multires[:-1], M.multires): + # positive samples + for _ in range(M.smp_pos): + add_sample(sample_sphere(vgt, st, ed)) + # negative samples + for _ in range(M.smp_neg): + add_sample(sample_sphere(vgt, ed, ed * M.smp_multiplier)) + # random samples + for _ in range(M.smp_rnd): + add_sample(sample_sphere(np.array([0, 0, 1]), 0, math.pi / 2)) + + y = torch.tensor(y, device=x.device, dtype=torch.float) + vpts = torch.tensor(vpts, device=x.device) + + x = self.anet(x, vpts) + L = self.loss(x, y) + maskn = (y == 0).float() + maskp = (y == 1).float() + losses = {} + for i in range(len(M.multires)): + assert maskn[:, i].sum().item() != 0 + assert maskp[:, i].sum().item() != 0 + losses[f"lneg{i}"] = (L[:, i] * maskn[:, i]).sum() / maskn[:, i].sum() + losses[f"lpos{i}"] = (L[:, i] * maskp[:, i]).sum() / maskp[:, i].sum() + + return { + "losses": [losses], + "preds": {"vpts": vpts, "scores": x.sigmoid(), "ys": y}, + } + + +class ApolloniusNet(nn.Module): + def __init__(self, output_stride, upsample_scale): + super().__init__() + self.fc0 = nn.Conv2d(64, 32, 1) + self.relu = nn.ReLU(inplace=True) + self.pool = nn.MaxPool2d(2, 2) + + if M.conic_6x: + self.bn00 = nn.BatchNorm2d(32) + self.conv00 = ConicConv(32, 32) + self.bn0 = nn.BatchNorm2d(32) + self.conv0 = ConicConv(32, 32) + + self.bn1 = nn.BatchNorm2d(32) + self.conv1 = ConicConv(32, 64) + self.bn2 = nn.BatchNorm2d(64) + self.conv2 = ConicConv(64, 128) + self.bn3 = nn.BatchNorm2d(128) + self.conv3 = ConicConv(128, 256) + self.bn4 = nn.BatchNorm2d(256) + self.conv4 = ConicConv(256, 256) + + self.fc1 = nn.Linear(16384, M.fc_channel) + self.fc2 = nn.Linear(M.fc_channel, M.fc_channel) + self.fc3 = nn.Linear(M.fc_channel, len(M.multires)) + + self.upsample_scale = upsample_scale + self.stride = output_stride / upsample_scale + + def forward(self, input, vpts): + # for now we did not do interpolation + if self.upsample_scale != 1: + input = F.interpolate(input, scale_factor=self.upsample_scale) + x = self.fc0(input) + + if M.conic_6x: + x = self.bn00(x) + x = self.relu(x) + x = self.conv00(x, vpts / self.stride - 0.5) + x = self.bn0(x) + x = self.relu(x) + x = self.conv0(x, vpts / self.stride - 0.5) + + # 128 + x = self.bn1(x) + x = self.relu(x) + x = self.conv1(x, vpts / self.stride - 0.5) + x = self.pool(x) + # 64 + x = self.bn2(x) + x = self.relu(x) + x = self.conv2(x, vpts / self.stride / 2 - 0.5) + x = self.pool(x) + # 32 + x = self.bn3(x) + x = self.relu(x) + x = self.conv3(x, vpts / self.stride / 4 - 0.5) + x = self.pool(x) + # 16 + x = self.bn4(x) + x = self.relu(x) + x = self.conv4(x, vpts / self.stride / 8 - 0.5) + x = self.pool(x) + # 8 + x = x.view(x.shape[0], -1) + x = self.relu(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + + return x + + +def orth(v): + x, y, z = v + o = np.array([0.0, -z, y] if abs(x) < abs(y) else [-z, 0.0, x]) + o /= LA.norm(o) + return o + + +def sample_sphere(v, theta0, theta1): + costheta = random.uniform(math.cos(theta1), math.cos(theta0)) + phi = random.random() * math.pi * 2 + v1 = orth(v) + v2 = np.cross(v, v1) + r = math.sqrt(1 - costheta ** 2) + w = v * costheta + r * (v1 * math.cos(phi) + v2 * math.sin(phi)) + return w / LA.norm(w) + + +def to_label(w, vpts): + degree = np.min(np.arccos(np.abs(vpts @ w).clip(max=1))) + return [int(degree < res + 1e-6) for res in M.multires] + + +def to_pixel(w): + x = w[0] / w[2] * C.io.focal_length * 256 + 256 + y = -w[1] / w[2] * C.io.focal_length * 256 + 256 + return y, x diff --git a/vanishing_point_extraction/neurvps/neurvps/trainer.py b/vanishing_point_extraction/neurvps/neurvps/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..d244a67b917f8d420063a310154c2ec3ec8ec7e1 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/trainer.py @@ -0,0 +1,304 @@ +import os +import atexit +import random +import shutil +import signal +import os.path as osp +import threading +import subprocess +from timeit import default_timer as timer + +import numpy as np +import torch +import matplotlib as mpl +import matplotlib.pyplot as plt +import torch.nn.functional as F +from skimage import io +from tensorboardX import SummaryWriter + +import neurvps.utils as utils +from neurvps.config import C, M + + +class Trainer(object): + def __init__( + self, device, model, optimizer, train_loader, val_loader, batch_size, out + ): + self.device = device + + self.model = model + self.optim = optimizer + + self.train_loader = train_loader + self.val_loader = val_loader + self.batch_size = batch_size + + self.out = out + if not osp.exists(self.out): + os.makedirs(self.out) + + board_out = osp.join(self.out, "tensorboard") + if not osp.exists(board_out): + os.makedirs(board_out) + self.writer = SummaryWriter(board_out) + # self.run_tensorboard(board_out) + # time.sleep(1) + + self.epoch = 0 + self.iteration = 0 + self.max_epoch = C.optim.max_epoch + self.lr_decay_epoch = C.optim.lr_decay_epoch + self.num_stacks = C.model.num_stacks + self.mean_loss = self.best_mean_loss = 1e1000 + + self.loss_labels = None + self.avg_metrics = None + self.metrics = np.zeros(0) + + def run_tensorboard(self, board_out): + os.environ["CUDA_VISIBLE_DEVICES"] = "" + p = subprocess.Popen( + ["tensorboard", f"--logdir={board_out}", f"--port={C.io.tensorboard_port}"] + ) + + def killme(): + os.kill(p.pid, signal.SIGTERM) + + atexit.register(killme) + + def _loss(self, result): + losses = result["losses"] + # Don't move loss label to other place. + # If I want to change the loss, I just need to change this function. + if self.loss_labels is None: + self.loss_labels = ["sum"] + list(losses[0].keys()) + self.metrics = np.zeros([self.num_stacks, len(self.loss_labels)]) + print() + print( + "| ".join( + ["progress "] + + list(map("{:7}".format, self.loss_labels)) + + ["speed"] + ) + ) + with open(f"{self.out}/loss.csv", "a") as fout: + print(",".join(["progress"] + self.loss_labels), file=fout) + + total_loss = 0 + for i in range(self.num_stacks): + for j, name in enumerate(self.loss_labels): + if name == "sum": + continue + if name not in losses[i]: + assert i != 0 + continue + loss = losses[i][name].mean() + self.metrics[i, 0] += loss.item() + self.metrics[i, j] += loss.item() + total_loss += loss + return total_loss + + def validate(self): + tprint("Running validation...", " " * 75) + training = self.model.training + self.model.eval() + + viz = osp.join(self.out, "viz", f"{self.iteration * self.batch_size:09d}") + npz = osp.join(self.out, "npz", f"{self.iteration * self.batch_size:09d}") + osp.exists(viz) or os.makedirs(viz) + osp.exists(npz) or os.makedirs(npz) + + total_loss = 0 + self.metrics[...] = 0 + c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg) + with torch.no_grad(): + for batch_idx, (image, target) in enumerate(self.val_loader): + image = image.to(self.device) + input_dict = {"image": image, "vpts": target["vpts"], "eval": True} + result = self.model(input_dict) + total_loss += self._loss(result) + # permute output to be (batch x (nneg + npos) x 2) + preds = result["preds"] + vpts = preds["vpts"].reshape(-1, c, 2).cpu().numpy() + scores = preds["scores"].reshape(-1, c, len(M.multires)).cpu().numpy() + ys = preds["ys"].reshape(-1, c, len(M.multires)).cpu().numpy() + for i in range(self.batch_size): + index = batch_idx * self.batch_size + i + np.savez( + f"{npz}/{index:06}.npz", + **{k: v[i].cpu().numpy() for k, v in preds.items()}, + ) + if index >= 8: + continue + self.plot(index, image[i], vpts[i], scores[i], ys[i], f"{viz}/{index:06}") + + self._write_metrics(len(self.val_loader), total_loss, "validation", True) + self.mean_loss = total_loss / len(self.val_loader) + + torch.save( + { + "iteration": self.iteration, + "arch": self.model.__class__.__name__, + "optim_state_dict": self.optim.state_dict(), + "model_state_dict": self.model.state_dict(), + "best_mean_loss": self.best_mean_loss, + }, + osp.join(self.out, "checkpoint_latest.pth.tar"), + ) + shutil.copy( + osp.join(self.out, "checkpoint_latest.pth.tar"), + osp.join(npz, "checkpoint.pth.tar"), + ) + if self.mean_loss < self.best_mean_loss: + self.best_mean_loss = self.mean_loss + shutil.copy( + osp.join(self.out, "checkpoint_latest.pth.tar"), + osp.join(self.out, "checkpoint_best.pth.tar"), + ) + + if training: + self.model.train() + + def train_epoch(self): + self.model.train() + time = timer() + for batch_idx, (image, target) in enumerate(self.train_loader): + self.optim.zero_grad() + self.metrics[...] = 0 + + image = image.to(self.device) + input_dict = {"image": image, "vpts": target["vpts"], "eval": False} + result = self.model(input_dict) + + loss = self._loss(result) + if np.isnan(loss.item()): + raise ValueError("loss is nan while training") + loss.backward() + self.optim.step() + + if self.avg_metrics is None: + self.avg_metrics = self.metrics + else: + self.avg_metrics = self.avg_metrics * 0.9 + self.metrics * 0.1 + self.iteration += 1 + self._write_metrics(1, loss.item(), "training", do_print=False) + + if self.iteration % 4 == 0: + tprint( + f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| " + + "| ".join(map("{:.5f}".format, self.avg_metrics[0])) + + f"| {4 * self.batch_size / (timer() - time):04.1f} " + ) + time = timer() + num_images = self.batch_size * self.iteration + if ( + num_images % C.io.validation_interval == 0 + or num_images == C.io.validation_debug + ): + self.validate() + time = timer() + + def _write_metrics(self, size, total_loss, prefix, do_print=False): + for i, metrics in enumerate(self.metrics): + for label, metric in zip(self.loss_labels, metrics): + self.writer.add_scalar( + f"{prefix}/{i}/{label}", metric / size, self.iteration + ) + if i == 0 and do_print: + csv_str = ( + f"{self.epoch:03}/{self.iteration * self.batch_size:07}," + + ",".join(map("{:.11f}".format, metrics / size)) + ) + prt_str = ( + f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| " + + "| ".join(map("{:.5f}".format, metrics / size)) + ) + with open(f"{self.out}/loss.csv", "a") as fout: + print(csv_str, file=fout) + pprint(prt_str, " " * 7) + self.writer.add_scalar( + f"{prefix}/total_loss", total_loss / size, self.iteration + ) + return total_loss + + def plot(self, index, image, vpts, scores, ys, prefix): + for idx, (vp, score, y) in enumerate(zip(vpts, scores, ys)): + plt.imshow(image[0].cpu().numpy()) + color = (random.random(), random.random(), random.random()) + plt.scatter(vp[1], vp[0]) + plt.text( + vp[1] - 20, + vp[0] - 10, + " ".join(map("{:.3f}".format, score)) + + "\n" + + " ".join(map("{:.3f}".format, y)), + bbox=dict(facecolor=color), + fontsize=12, + ) + for xy in np.linspace(0, 512, 10): + plt.plot( + [vp[1], xy, vp[1], xy, vp[1], 0, vp[1], 511], + [vp[0], 0, vp[0], 511, vp[0], xy, vp[0], xy], + color=color, + ) + plt.savefig(f"{prefix}_vpts_{idx}.jpg"), plt.close() + + def train(self): + plt.rcParams["figure.figsize"] = (24, 24) + epoch_size = len(self.train_loader) + start_epoch = self.iteration // epoch_size + for self.epoch in range(start_epoch, self.max_epoch): + if self.epoch == self.lr_decay_epoch: + self.optim.param_groups[0]["lr"] /= 10 + self.train_epoch() + + def move(self, obj): + if isinstance(obj, torch.Tensor): + return obj.to(self.device) + if isinstance(obj, dict): + for name in obj: + if isinstance(obj[name], torch.Tensor): + obj[name] = obj[name].to(self.device) + return obj + assert False + + +cmap = plt.get_cmap("jet") +norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0) +sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) +sm.set_array([]) + + +def c(x): + return sm.to_rgba(x) + + +def imshow(im): + plt.close() + plt.tight_layout() + plt.imshow(im) + plt.colorbar(sm, fraction=0.046) + plt.xlim([0, im.shape[0]]) + plt.ylim([im.shape[0], 0]) + + +def tprint(*args): + """Temporarily prints things on the screen""" + print("\r", end="") + print(*args, end="") + + +def pprint(*args): + """Permanently prints things on the screen""" + print("\r", end="") + print(*args) + + +def _launch_tensorboard(board_out, port, out): + os.environ["CUDA_VISIBLE_DEVICES"] = "" + p = subprocess.Popen(["tensorboard", f"--logdir={board_out}", f"--port={port}"]) + + def kill(): + os.kill(p.pid, signal.SIGTERM) + + atexit.register(kill) diff --git a/vanishing_point_extraction/neurvps/neurvps/utils.py b/vanishing_point_extraction/neurvps/neurvps/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e7cf69e8e1951a11b4694fe35c3baaebda90fce5 --- /dev/null +++ b/vanishing_point_extraction/neurvps/neurvps/utils.py @@ -0,0 +1,96 @@ +import math +import random +import os.path as osp +import multiprocessing +from timeit import default_timer as timer + +import numpy as np +import numpy.linalg as LA +import matplotlib.pyplot as plt + + +class benchmark(object): + def __init__(self, msg, enable=True, fmt="%0.3g"): + self.msg = msg + self.fmt = fmt + self.enable = enable + + def __enter__(self): + if self.enable: + self.start = timer() + return self + + def __exit__(self, *args): + if self.enable: + t = timer() - self.start + print(("%s : " + self.fmt + " seconds") % (self.msg, t)) + self.time = t + + +def plot_image_grid(im, title): + plt.figure() + for i in range(16): + plt.subplot(4, 4, i + 1) + plt.imshow(im[i]) + plt.colorbar() + plt.title(title) + + +def quiver(x, y, ax): + ax.set_xlim(0, x.shape[1]) + ax.set_ylim(x.shape[0], 0) + ax.quiver( + x, + y, + units="xy", + angles="xy", + scale_units="xy", + scale=1, + minlength=0.01, + width=0.1, + color="b", + ) + + +def np_softmax(x, axis=0): + """Compute softmax values for each sets of scores in x.""" + e_x = np.exp(x - np.max(x)) + return e_x / e_x.sum(axis=axis, keepdims=True) + + +def argsort2d(arr): + return np.dstack(np.unravel_index(np.argsort(arr.ravel()), arr.shape))[0] + + +def __parallel_handle(f, q_in, q_out): + while True: + i, x = q_in.get() + if i is None: + break + q_out.put((i, f(x))) + + +def parmap(f, X, nprocs=multiprocessing.cpu_count(), progress_bar=lambda x: x): + if nprocs == 0: + nprocs = multiprocessing.cpu_count() + q_in = multiprocessing.Queue(1) + q_out = multiprocessing.Queue() + + proc = [ + multiprocessing.Process(target=__parallel_handle, args=(f, q_in, q_out)) + for _ in range(nprocs) + ] + for p in proc: + p.daemon = True + p.start() + + try: + sent = [q_in.put((i, x)) for i, x in enumerate(X)] + [q_in.put((None, None)) for _ in range(nprocs)] + res = [q_out.get() for _ in progress_bar(range(len(sent)))] + [p.join() for p in proc] + except KeyboardInterrupt: + q_in.close() + q_out.close() + raise + return [x for i, x in sorted(res)] diff --git a/vanishing_point_extraction/neurvps/vp_estim.py b/vanishing_point_extraction/neurvps/vp_estim.py new file mode 100644 index 0000000000000000000000000000000000000000..528adc61ff495f44d0ef9d4e4a1282683aa67535 --- /dev/null +++ b/vanishing_point_extraction/neurvps/vp_estim.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +import os +import math +import random + +import numpy as np +import torch +import skimage.io +import numpy.linalg as LA +from tqdm import tqdm +import argparse + +import neurvps +from neurvps.config import C, M +from tqdm import tqdm +import re +import json + + +def AA(x, y, threshold): + index = np.searchsorted(x, threshold) + x = np.concatenate([x[:index], [threshold]]) + y = np.concatenate([y[:index], [threshold]]) + return ((x[1:] - x[:-1]) * y[:-1]).sum() / threshold + +def sample_sphere(v, alpha, num_pts): + v1 = orth(v) + v2 = np.cross(v, v1) + v, v1, v2 = v[:, None], v1[:, None], v2[:, None] + indices = np.linspace(1, num_pts, num_pts) + phi = np.arccos(1 + (math.cos(alpha) - 1) * indices / num_pts) + theta = np.pi * (1 + 5 ** 0.5) * indices + r = np.sin(phi) + return (v * np.cos(phi) + r * (v1 * np.cos(theta) + v2 * np.sin(theta))).T + + +def orth(v): + x, y, z = v + o = np.array([0.0, -z, y] if abs(x) < abs(y) else [-z, 0.0, x]) + o /= LA.norm(o) + return o + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--devices', type=str, help='cuda device') + parser.add_argument('--config_file', type=str, help='configuration file path') + parser.add_argument('--checkpoint', type=str, help='model checkpoint path') + parser.add_argument('--dataset', type=str, help='dataset (e.g. SemanticKITTI | KITTI360)') + parser.add_argument('--root_path', type=str, help='dataset root path') + parser.add_argument('--save_path', type=str, help='result path') + + args = parser.parse_args() + config_file = args.config_file + C.update(C.from_yaml(filename=config_file)) + C.model.im2col_step = 32 # override im2col_step for evaluation + M.update(C.model) + + random.seed(0) + np.random.seed(0) + torch.manual_seed(0) + + device_name = "cpu" + os.environ["CUDA_VISIBLE_DEVICES"] = args.devices + if torch.cuda.is_available(): + device_name = "cuda" + torch.backends.cudnn.deterministic = True + torch.cuda.manual_seed(0) + print("Let's use", torch.cuda.device_count(), "GPU(s)!") + else: + print("CUDA is not available") + device = torch.device(device_name) + + if M.backbone == "stacked_hourglass": + model = neurvps.models.hg( + planes=64, depth=M.depth, num_stacks=M.num_stacks, num_blocks=M.num_blocks + ) + else: + raise NotImplementedError + + checkpoint = torch.load(args.checkpoint) + model = neurvps.models.VanishingNet( + model, C.model.output_stride, C.model.upsample_scale + ) + model = model.to(device) + model = torch.nn.DataParallel( + model, device_ids=list(range(args.devices.count(",") + 1)) + ) + model.load_state_dict(checkpoint["model_state_dict"]) + model.eval() + + dataset = args.dataset + root_path = args.root_path + save_root_path = args.save_path + + if dataset == "SemanticKITTI": + sequences = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10'] + folder_paths = [os.path.join(root_path, 'dataset/sequences', str(sequence), 'image_2') for sequence in sequences] + dataset_prefixes = ['SemanticKITTI/dataset/sequences/'+str(sequence)+'/image_2/' for sequence in sequences] + if not os.path.exists(save_root_path): + os.makedirs(save_root_path) + save_paths = [os.path.join(save_root_path, "seq_" + sequence + '.json') for sequence in sequences] + + elif dataset == "KITTI-360": + sequences = ['2013_05_28_drive_0000_sync', '2013_05_28_drive_0002_sync', '2013_05_28_drive_0003_sync',\ + '2013_05_28_drive_0004_sync', '2013_05_28_drive_0005_sync', '2013_05_28_drive_0006_sync',\ + '2013_05_28_drive_0007_sync','2013_05_28_drive_0009_sync','2013_05_28_drive_0010_sync'] + folder_paths = [os.path.join(root_path, 'data_2d_raw',str(sequence), 'image_00/data_rect') for sequence in sequences] + dataset_prefixes = ['KITTI-360/'+str(sequence)+'/image_00/' for sequence in sequences] + if not os.path.exists(save_root_path): + os.makedirs(save_root_path) + save_sequences = ['00', '02', '03', '04', '05', '06', '07', '09', '10'] + save_paths = [os.path.join(save_root_path, "seq_" + sequence + '.json') for sequence in save_sequences] + + for seq in range(len(sequences)): + print("sequence : ", seq) + folder_path = folder_paths[seq] + all_files = os.listdir(folder_path) + all_files = sorted(all_files, key=lambda s: int(re.search(r'\d+', s).group())) + + image_extensions = ['.jpg', '.png', '.jpeg'] + + VP = {} + + for file in tqdm(all_files): + if any(file.endswith(ext) for ext in image_extensions): + image_path = os.path.join(folder_path, file) + + image_origin = skimage.io.imread(image_path) + + original_height, original_width = image_origin.shape[:2] + + image = skimage.transform.resize(image_origin, (512, 512)) + + if image.ndim == 2: + image = image[:, :, None].repeat(3, 2) + + image = np.rollaxis(image, 2) + image_tensor = torch.tensor(image * 255).float().to(device).unsqueeze(0) + + input_dict = {"image": image_tensor, "test": True} + vpts = sample_sphere(np.array([0, 0, 1]), np.pi / 2, 64) + input_dict["vpts"] = vpts + with torch.no_grad(): + score = model(input_dict)[:, -1].cpu().numpy() + index = np.argsort(-score) + candidate = [index[0]] + n = C.io.num_vpts + for i in index[1:]: + if len(candidate) == n: + break + dst = np.min(np.arccos(np.abs(vpts[candidate] @ vpts[i]))) + if dst < np.pi / n: + continue + candidate.append(i) + vpts_pd = vpts[candidate] + + for res in range(1, len(M.multires)): + vpts = [sample_sphere(vpts_pd[vp], M.multires[-res], 64) for vp in range(n)] + input_dict["vpts"] = np.vstack(vpts) + with torch.no_grad(): + score = model(input_dict)[:, -res - 1].cpu().numpy().reshape(n, -1) + for i, s in enumerate(score): + vpts_pd[i] = vpts[i][np.argmax(s)] + + Vanishing_point = [] + + for vp in vpts_pd: + x = vp[0] * original_width / 2 + original_width / 2 + y = original_height / 2 - vp[1] * original_height / 2 + + Vanishing_point.append([x,y]) + + VP[os.path.join(dataset_prefixes[seq], file)] = Vanishing_point + + with open(save_paths[seq], 'w') as f: + json.dump(VP, f) + +if __name__ == "__main__": + main() diff --git a/vanishing_point_extraction/vanishing_point/neurvps/TMM17/checkpoint_latest.pth.tar b/vanishing_point_extraction/vanishing_point/neurvps/TMM17/checkpoint_latest.pth.tar new file mode 100644 index 0000000000000000000000000000000000000000..06dc2f7fe25993bb061965d8c1d65a4852b58325 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/TMM17/checkpoint_latest.pth.tar @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:951f12bfe2a3afdef5b95d6a1cb9bbe51e73913c70212c8e628b696bd39a74e7 +size 358844104 diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__init__.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abeceda76588a7081ffcb7f3658f00287eb4b260 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__init__.py @@ -0,0 +1,4 @@ +import neurvps.models +import neurvps.trainer +import neurvps.datasets +import neurvps.config diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0a820426cca8f81e1acb4eaeb1e0415f551aca7 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/__init__.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/box.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/box.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd0fb7520458a2fadca19a4c3d932f5082bb2f98 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/box.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/config.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/config.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31b61227e0ebcb94fc78770b8854a3f0130ef46c Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/config.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aacf11f9d849f650648b624171eeb6488503689 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/datasets.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92b778b119b62d38dbf6eb76af24c31a3b66aa5f Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/trainer.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/utils.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62441d88a8458a8df8f40f35eb402b1a2a8af41d Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/__pycache__/utils.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/box.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/box.py new file mode 100644 index 0000000000000000000000000000000000000000..cb44cf81c25de08cc90dba273ab2ebcf41f3961b --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/box.py @@ -0,0 +1,1110 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- +# +# Copyright (c) 2017-2019 - Chris Griffith - MIT License +""" +Improved dictionary access through dot notation with additional tools. +""" +import string +import sys +import json +import re +import copy +from keyword import kwlist +import warnings + +try: + from collections.abc import Iterable, Mapping, Callable +except ImportError: + from collections import Iterable, Mapping, Callable + +yaml_support = True + +try: + import yaml +except ImportError: + try: + import ruamel.yaml as yaml + except ImportError: + yaml = None + yaml_support = False + +if sys.version_info >= (3, 0): + basestring = str +else: + from io import open + +__all__ = ['Box', 'ConfigBox', 'BoxList', 'SBox', + 'BoxError', 'BoxKeyError'] +__author__ = 'Chris Griffith' +__version__ = '3.2.4' + +BOX_PARAMETERS = ('default_box', 'default_box_attr', 'conversion_box', + 'frozen_box', 'camel_killer_box', 'box_it_up', + 'box_safe_prefix', 'box_duplicates', 'ordered_box') + +_first_cap_re = re.compile('(.)([A-Z][a-z]+)') +_all_cap_re = re.compile('([a-z0-9])([A-Z])') + + +class BoxError(Exception): + """Non standard dictionary exceptions""" + + +class BoxKeyError(BoxError, KeyError, AttributeError): + """Key does not exist""" + + +# Abstract converter functions for use in any Box class + + +def _to_json(obj, filename=None, + encoding="utf-8", errors="strict", **json_kwargs): + json_dump = json.dumps(obj, + ensure_ascii=False, **json_kwargs) + if filename: + with open(filename, 'w', encoding=encoding, errors=errors) as f: + f.write(json_dump if sys.version_info >= (3, 0) else + json_dump.decode("utf-8")) + else: + return json_dump + + +def _from_json(json_string=None, filename=None, + encoding="utf-8", errors="strict", multiline=False, **kwargs): + if filename: + with open(filename, 'r', encoding=encoding, errors=errors) as f: + if multiline: + data = [json.loads(line.strip(), **kwargs) for line in f + if line.strip() and not line.strip().startswith("#")] + else: + data = json.load(f, **kwargs) + elif json_string: + data = json.loads(json_string, **kwargs) + else: + raise BoxError('from_json requires a string or filename') + return data + + +def _to_yaml(obj, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + if filename: + with open(filename, 'w', + encoding=encoding, errors=errors) as f: + yaml.dump(obj, stream=f, + default_flow_style=default_flow_style, + **yaml_kwargs) + else: + return yaml.dump(obj, + default_flow_style=default_flow_style, + **yaml_kwargs) + + +def _from_yaml(yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + **kwargs): + if filename: + with open(filename, 'r', + encoding=encoding, errors=errors) as f: + data = yaml.load(f, **kwargs) + elif yaml_string: + data = yaml.load(yaml_string, **kwargs) + else: + raise BoxError('from_yaml requires a string or filename') + return data + + +# Helper functions + + +def _safe_key(key): + try: + return str(key) + except UnicodeEncodeError: + return key.encode("utf-8", "ignore") + + +def _safe_attr(attr, camel_killer=False, replacement_char='x'): + """Convert a key into something that is accessible as an attribute""" + allowed = string.ascii_letters + string.digits + '_' + + attr = _safe_key(attr) + + if camel_killer: + attr = _camel_killer(attr) + + attr = attr.replace(' ', '_') + + out = '' + for character in attr: + out += character if character in allowed else "_" + out = out.strip("_") + + try: + int(out[0]) + except (ValueError, IndexError): + pass + else: + out = '{0}{1}'.format(replacement_char, out) + + if out in kwlist: + out = '{0}{1}'.format(replacement_char, out) + + return re.sub('_+', '_', out) + + +def _camel_killer(attr): + """ + CamelKiller, qu'est-ce que c'est? + + Taken from http://stackoverflow.com/a/1176023/3244542 + """ + try: + attr = str(attr) + except UnicodeEncodeError: + attr = attr.encode("utf-8", "ignore") + + s1 = _first_cap_re.sub(r'\1_\2', attr) + s2 = _all_cap_re.sub(r'\1_\2', s1) + return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else + s2.lower()) + + +def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs): + out_list = [] + for i in iterable: + if isinstance(i, dict): + out_list.append(box_class(i, **kwargs)) + elif isinstance(i, list) or (recreate_tuples and isinstance(i, tuple)): + out_list.append(_recursive_tuples(i, box_class, + recreate_tuples, **kwargs)) + else: + out_list.append(i) + return tuple(out_list) + + +def _conversion_checks(item, keys, box_config, check_only=False, + pre_check=False): + """ + Internal use for checking if a duplicate safe attribute already exists + + :param item: Item to see if a dup exists + :param keys: Keys to check against + :param box_config: Easier to pass in than ask for specfic items + :param check_only: Don't bother doing the conversion work + :param pre_check: Need to add the item to the list of keys to check + :return: the original unmodified key, if exists and not check_only + """ + if box_config['box_duplicates'] != 'ignore': + if pre_check: + keys = list(keys) + [item] + + key_list = [(k, + _safe_attr(k, camel_killer=box_config['camel_killer_box'], + replacement_char=box_config['box_safe_prefix'] + )) for k in keys] + if len(key_list) > len(set(x[1] for x in key_list)): + seen = set() + dups = set() + for x in key_list: + if x[1] in seen: + dups.add("{0}({1})".format(x[0], x[1])) + seen.add(x[1]) + if box_config['box_duplicates'].startswith("warn"): + warnings.warn('Duplicate conversion attributes exist: ' + '{0}'.format(dups)) + else: + raise BoxError('Duplicate conversion attributes exist: ' + '{0}'.format(dups)) + if check_only: + return + # This way will be slower for warnings, as it will have double work + # But faster for the default 'ignore' + for k in keys: + if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], + replacement_char=box_config['box_safe_prefix']): + return k + + +def _get_box_config(cls, kwargs): + return { + # Internal use only + '__converted': set(), + '__box_heritage': kwargs.pop('__box_heritage', None), + '__created': False, + '__ordered_box_values': [], + # Can be changed by user after box creation + 'default_box': kwargs.pop('default_box', False), + 'default_box_attr': kwargs.pop('default_box_attr', cls), + 'conversion_box': kwargs.pop('conversion_box', True), + 'box_safe_prefix': kwargs.pop('box_safe_prefix', 'x'), + 'frozen_box': kwargs.pop('frozen_box', False), + 'camel_killer_box': kwargs.pop('camel_killer_box', False), + 'modify_tuples_box': kwargs.pop('modify_tuples_box', False), + 'box_duplicates': kwargs.pop('box_duplicates', 'ignore'), + 'ordered_box': kwargs.pop('ordered_box', False) + } + + +class Box(dict): + """ + Improved dictionary access through dot notation with additional tools. + + :param default_box: Similar to defaultdict, return a default value + :param default_box_attr: Specify the default replacement. + WARNING: If this is not the default 'Box', it will not be recursive + :param frozen_box: After creation, the box cannot be modified + :param camel_killer_box: Convert CamelCase to snake_case + :param conversion_box: Check for near matching keys as attributes + :param modify_tuples_box: Recreate incoming tuples with dicts into Boxes + :param box_it_up: Recursively create all Boxes from the start + :param box_safe_prefix: Conversion box prefix for unsafe attributes + :param box_duplicates: "ignore", "error" or "warn" when duplicates exists + in a conversion_box + :param ordered_box: Preserve the order of keys entered into the box + """ + + _protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml', + 'from_yaml', 'from_json'] + + def __new__(cls, *args, **kwargs): + """ + Due to the way pickling works in python 3, we need to make sure + the box config is created as early as possible. + """ + obj = super(Box, cls).__new__(cls, *args, **kwargs) + obj._box_config = _get_box_config(cls, kwargs) + return obj + + def __init__(self, *args, **kwargs): + self._box_config = _get_box_config(self.__class__, kwargs) + if self._box_config['ordered_box']: + self._box_config['__ordered_box_values'] = [] + if (not self._box_config['conversion_box'] and + self._box_config['box_duplicates'] != "ignore"): + raise BoxError('box_duplicates are only for conversion_boxes') + if len(args) == 1: + if isinstance(args[0], basestring): + raise ValueError('Cannot extrapolate Box from string') + if isinstance(args[0], Mapping): + for k, v in args[0].items(): + if v is args[0]: + v = self + self[k] = v + self.__add_ordered(k) + elif isinstance(args[0], Iterable): + for k, v in args[0]: + self[k] = v + self.__add_ordered(k) + + else: + raise ValueError('First argument must be mapping or iterable') + elif args: + raise TypeError('Box expected at most 1 argument, ' + 'got {0}'.format(len(args))) + + box_it = kwargs.pop('box_it_up', False) + for k, v in kwargs.items(): + if args and isinstance(args[0], Mapping) and v is args[0]: + v = self + self[k] = v + self.__add_ordered(k) + + if (self._box_config['frozen_box'] or box_it or + self._box_config['box_duplicates'] != 'ignore'): + self.box_it_up() + + self._box_config['__created'] = True + + def __add_ordered(self, key): + if (self._box_config['ordered_box'] and + key not in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].append(key) + + def box_it_up(self): + """ + Perform value lookup for all items in current dictionary, + generating all sub Box objects, while also running `box_it_up` on + any of those sub box objects. + """ + for k in self: + _conversion_checks(k, self.keys(), self._box_config, + check_only=True) + if self[k] is not self and hasattr(self[k], 'box_it_up'): + self[k].box_it_up() + + def __hash__(self): + if self._box_config['frozen_box']: + hashing = 54321 + for item in self.items(): + hashing ^= hash(item) + return hashing + raise TypeError("unhashable type: 'Box'") + + def __dir__(self): + allowed = string.ascii_letters + string.digits + '_' + kill_camel = self._box_config['camel_killer_box'] + items = set(dir(dict) + ['to_dict', 'to_json', + 'from_json', 'box_it_up']) + # Only show items accessible by dot notation + for key in self.keys(): + key = _safe_key(key) + if (' ' not in key and key[0] not in string.digits and + key not in kwlist): + for letter in key: + if letter not in allowed: + break + else: + items.add(key) + + for key in self.keys(): + key = _safe_key(key) + if key not in items: + if self._box_config['conversion_box']: + key = _safe_attr(key, camel_killer=kill_camel, + replacement_char=self._box_config[ + 'box_safe_prefix']) + if key: + items.add(key) + if kill_camel: + snake_key = _camel_killer(key) + if snake_key: + items.remove(key) + items.add(snake_key) + + if yaml_support: + items.add('to_yaml') + items.add('from_yaml') + + return list(items) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + if isinstance(default, dict) and not isinstance(default, Box): + return Box(default) + if isinstance(default, list) and not isinstance(default, BoxList): + return BoxList(default) + return default + + def copy(self): + return self.__class__(super(self.__class__, self).copy()) + + def __copy__(self): + return self.__class__(super(self.__class__, self).copy()) + + def __deepcopy__(self, memodict=None): + out = self.__class__() + memodict = memodict or {} + memodict[id(self)] = out + for k, v in self.items(): + out[copy.deepcopy(k, memodict)] = copy.deepcopy(v, memodict) + return out + + def __setstate__(self, state): + self._box_config = state['_box_config'] + self.__dict__.update(state) + + def __getitem__(self, item, _ignore_default=False): + try: + value = super(Box, self).__getitem__(item) + except KeyError as err: + if item == '_box_config': + raise BoxKeyError('_box_config should only exist as an ' + 'attribute and is never defaulted') + if self._box_config['default_box'] and not _ignore_default: + return self.__get_default(item) + raise BoxKeyError(str(err)) + else: + return self.__convert_and_store(item, value) + + def keys(self): + if self._box_config['ordered_box']: + return self._box_config['__ordered_box_values'] + return super(Box, self).keys() + + def values(self): + return [self[x] for x in self.keys()] + + def items(self): + return [(x, self[x]) for x in self.keys()] + + def __get_default(self, item): + default_value = self._box_config['default_box_attr'] + if default_value is self.__class__: + return self.__class__(__box_heritage=(self, item), + **self.__box_config()) + elif isinstance(default_value, Callable): + return default_value() + elif hasattr(default_value, 'copy'): + return default_value.copy() + return default_value + + def __box_config(self): + out = {} + for k, v in self._box_config.copy().items(): + if not k.startswith("__"): + out[k] = v + return out + + def __convert_and_store(self, item, value): + if item in self._box_config['__converted']: + return value + if isinstance(value, dict) and not isinstance(value, Box): + value = self.__class__(value, __box_heritage=(self, item), + **self.__box_config()) + self[item] = value + elif isinstance(value, list) and not isinstance(value, BoxList): + if self._box_config['frozen_box']: + value = _recursive_tuples(value, self.__class__, + recreate_tuples=self._box_config[ + 'modify_tuples_box'], + __box_heritage=(self, item), + **self.__box_config()) + else: + value = BoxList(value, __box_heritage=(self, item), + box_class=self.__class__, + **self.__box_config()) + self[item] = value + elif (self._box_config['modify_tuples_box'] and + isinstance(value, tuple)): + value = _recursive_tuples(value, self.__class__, + recreate_tuples=True, + __box_heritage=(self, item), + **self.__box_config()) + self[item] = value + self._box_config['__converted'].add(item) + return value + + def __create_lineage(self): + if (self._box_config['__box_heritage'] and + self._box_config['__created']): + past, item = self._box_config['__box_heritage'] + if not past[item]: + past[item] = self + self._box_config['__box_heritage'] = None + + def __getattr__(self, item): + try: + try: + value = self.__getitem__(item, _ignore_default=True) + except KeyError: + value = object.__getattribute__(self, item) + except AttributeError as err: + if item == "__getstate__": + raise AttributeError(item) + if item == '_box_config': + raise BoxError('_box_config key must exist') + kill_camel = self._box_config['camel_killer_box'] + if self._box_config['conversion_box'] and item: + k = _conversion_checks(item, self.keys(), self._box_config) + if k: + return self.__getitem__(k) + if kill_camel: + for k in self.keys(): + if item == _camel_killer(k): + return self.__getitem__(k) + if self._box_config['default_box']: + return self.__get_default(item) + raise BoxKeyError(str(err)) + else: + if item == '_box_config': + return value + return self.__convert_and_store(item, value) + + def __setitem__(self, key, value): + if (key != '_box_config' and self._box_config['__created'] and + self._box_config['frozen_box']): + raise BoxError('Box is frozen') + if self._box_config['conversion_box']: + _conversion_checks(key, self.keys(), self._box_config, + check_only=True, pre_check=True) + super(Box, self).__setitem__(key, value) + self.__add_ordered(key) + self.__create_lineage() + + def __setattr__(self, key, value): + if (key != '_box_config' and self._box_config['frozen_box'] and + self._box_config['__created']): + raise BoxError('Box is frozen') + if key in self._protected_keys: + raise AttributeError("Key name '{0}' is protected".format(key)) + if key == '_box_config': + return object.__setattr__(self, key, value) + try: + object.__getattribute__(self, key) + except (AttributeError, UnicodeEncodeError): + if (key not in self.keys() and + (self._box_config['conversion_box'] or + self._box_config['camel_killer_box'])): + if self._box_config['conversion_box']: + k = _conversion_checks(key, self.keys(), + self._box_config) + self[key if not k else k] = value + elif self._box_config['camel_killer_box']: + for each_key in self: + if key == _camel_killer(each_key): + self[each_key] = value + break + else: + self[key] = value + else: + object.__setattr__(self, key, value) + self.__add_ordered(key) + self.__create_lineage() + + def __delitem__(self, key): + if self._box_config['frozen_box']: + raise BoxError('Box is frozen') + super(Box, self).__delitem__(key) + if (self._box_config['ordered_box'] and + key in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].remove(key) + + def __delattr__(self, item): + if self._box_config['frozen_box']: + raise BoxError('Box is frozen') + if item == '_box_config': + raise BoxError('"_box_config" is protected') + if item in self._protected_keys: + raise AttributeError("Key name '{0}' is protected".format(item)) + try: + object.__getattribute__(self, item) + except AttributeError: + del self[item] + else: + object.__delattr__(self, item) + if (self._box_config['ordered_box'] and + item in self._box_config['__ordered_box_values']): + self._box_config['__ordered_box_values'].remove(item) + + def pop(self, key, *args): + if args: + if len(args) != 1: + raise BoxError('pop() takes only one optional' + ' argument "default"') + try: + item = self[key] + except KeyError: + return args[0] + else: + del self[key] + return item + try: + item = self[key] + except KeyError: + raise BoxKeyError('{0}'.format(key)) + else: + del self[key] + return item + + def clear(self): + self._box_config['__ordered_box_values'] = [] + super(Box, self).clear() + + def popitem(self): + try: + key = next(self.__iter__()) + except StopIteration: + raise BoxKeyError('Empty box') + return key, self.pop(key) + + def __repr__(self): + return ''.format(str(self.to_dict())) + + def __str__(self): + return str(self.to_dict()) + + def __iter__(self): + for key in self.keys(): + yield key + + def __reversed__(self): + for key in reversed(list(self.keys())): + yield key + + def to_dict(self): + """ + Turn the Box and sub Boxes back into a native + python dictionary. + + :return: python dictionary of this Box + """ + out_dict = dict(self) + for k, v in out_dict.items(): + if v is self: + out_dict[k] = out_dict + elif hasattr(v, 'to_dict'): + out_dict[k] = v.to_dict() + elif hasattr(v, 'to_list'): + out_dict[k] = v.to_list() + return out_dict + + def update(self, item=None, **kwargs): + if not item: + item = kwargs + iter_over = item.items() if hasattr(item, 'items') else item + for k, v in iter_over: + if isinstance(v, dict): + # Box objects must be created in case they are already + # in the `converted` box_config set + v = self.__class__(v) + if k in self and isinstance(self[k], dict): + self[k].update(v) + continue + if isinstance(v, list): + v = BoxList(v) + try: + self.__setattr__(k, v) + except (AttributeError, TypeError): + self.__setitem__(k, v) + + def setdefault(self, item, default=None): + if item in self: + return self[item] + + if isinstance(default, dict): + default = self.__class__(default) + if isinstance(default, list): + default = BoxList(default) + self[item] = default + return default + + def to_json(self, filename=None, + encoding="utf-8", errors="strict", **json_kwargs): + """ + Transform the Box object into a JSON string. + + :param filename: If provided will save to file + :param encoding: File encoding + :param errors: How to handle encoding errors + :param json_kwargs: additional arguments to pass to json.dump(s) + :return: string of JSON or return of `json.dump` + """ + return _to_json(self.to_dict(), filename=filename, + encoding=encoding, errors=errors, **json_kwargs) + + @classmethod + def from_json(cls, json_string=None, filename=None, + encoding="utf-8", errors="strict", **kwargs): + """ + Transform a json object string into a Box object. If the incoming + json is a list, you must use BoxList.from_json. + + :param json_string: string to pass to `json.loads` + :param filename: filename to open and pass to `json.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param kwargs: parameters to pass to `Box()` or `json.loads` + :return: Box object from json data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_json(json_string, filename=filename, + encoding=encoding, errors=errors, **kwargs) + + if not isinstance(data, dict): + raise BoxError('json data not returned as a dictionary, ' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + if yaml_support: + def to_yaml(self, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + """ + Transform the Box object into a YAML string. + + :param filename: If provided will save to file + :param default_flow_style: False will recursively dump dicts + :param encoding: File encoding + :param errors: How to handle encoding errors + :param yaml_kwargs: additional arguments to pass to yaml.dump + :return: string of YAML or return of `yaml.dump` + """ + return _to_yaml(self.to_dict(), filename=filename, + default_flow_style=default_flow_style, + encoding=encoding, errors=errors, **yaml_kwargs) + + @classmethod + def from_yaml(cls, yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + loader=yaml.SafeLoader, **kwargs): + """ + Transform a yaml object string into a Box object. + + :param yaml_string: string to pass to `yaml.load` + :param filename: filename to open and pass to `yaml.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param loader: YAML Loader, defaults to SafeLoader + :param kwargs: parameters to pass to `Box()` or `yaml.load` + :return: Box object from yaml data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_yaml(yaml_string=yaml_string, filename=filename, + encoding=encoding, errors=errors, + Loader=loader, **kwargs) + if not isinstance(data, dict): + raise BoxError('yaml data not returned as a dictionary' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + +class BoxList(list): + """ + Drop in replacement of list, that converts added objects to Box or BoxList + objects as necessary. + """ + + def __init__(self, iterable=None, box_class=Box, **box_options): + self.box_class = box_class + self.box_options = box_options + self.box_org_ref = self.box_org_ref = id(iterable) if iterable else 0 + if iterable: + for x in iterable: + self.append(x) + if box_options.get('frozen_box'): + def frozen(*args, **kwargs): + raise BoxError('BoxList is frozen') + + for method in ['append', 'extend', 'insert', 'pop', + 'remove', 'reverse', 'sort']: + self.__setattr__(method, frozen) + + def __delitem__(self, key): + if self.box_options.get('frozen_box'): + raise BoxError('BoxList is frozen') + super(BoxList, self).__delitem__(key) + + def __setitem__(self, key, value): + if self.box_options.get('frozen_box'): + raise BoxError('BoxList is frozen') + super(BoxList, self).__setitem__(key, value) + + def append(self, p_object): + if isinstance(p_object, dict): + try: + p_object = self.box_class(p_object, **self.box_options) + except AttributeError as err: + if 'box_class' in self.__dict__: + raise err + elif isinstance(p_object, list): + try: + p_object = (self if id(p_object) == self.box_org_ref else + BoxList(p_object)) + except AttributeError as err: + if 'box_org_ref' in self.__dict__: + raise err + super(BoxList, self).append(p_object) + + def extend(self, iterable): + for item in iterable: + self.append(item) + + def insert(self, index, p_object): + if isinstance(p_object, dict): + p_object = self.box_class(p_object, **self.box_options) + elif isinstance(p_object, list): + p_object = (self if id(p_object) == self.box_org_ref else + BoxList(p_object)) + super(BoxList, self).insert(index, p_object) + + def __repr__(self): + return "".format(self.to_list()) + + def __str__(self): + return str(self.to_list()) + + def __copy__(self): + return BoxList((x for x in self), + self.box_class, + **self.box_options) + + def __deepcopy__(self, memodict=None): + out = self.__class__() + memodict = memodict or {} + memodict[id(self)] = out + for k in self: + out.append(copy.deepcopy(k)) + return out + + def __hash__(self): + if self.box_options.get('frozen_box'): + hashing = 98765 + hashing ^= hash(tuple(self)) + return hashing + raise TypeError("unhashable type: 'BoxList'") + + def to_list(self): + new_list = [] + for x in self: + if x is self: + new_list.append(new_list) + elif isinstance(x, Box): + new_list.append(x.to_dict()) + elif isinstance(x, BoxList): + new_list.append(x.to_list()) + else: + new_list.append(x) + return new_list + + def to_json(self, filename=None, + encoding="utf-8", errors="strict", + multiline=False, **json_kwargs): + """ + Transform the BoxList object into a JSON string. + + :param filename: If provided will save to file + :param encoding: File encoding + :param errors: How to handle encoding errors + :param multiline: Put each item in list onto it's own line + :param json_kwargs: additional arguments to pass to json.dump(s) + :return: string of JSON or return of `json.dump` + """ + if filename and multiline: + lines = [_to_json(item, filename=False, encoding=encoding, + errors=errors, **json_kwargs) for item in self] + with open(filename, 'w', encoding=encoding, errors=errors) as f: + f.write("\n".join(lines).decode('utf-8') if + sys.version_info < (3, 0) else "\n".join(lines)) + else: + return _to_json(self.to_list(), filename=filename, + encoding=encoding, errors=errors, **json_kwargs) + + @classmethod + def from_json(cls, json_string=None, filename=None, encoding="utf-8", + errors="strict", multiline=False, **kwargs): + """ + Transform a json object string into a BoxList object. If the incoming + json is a dict, you must use Box.from_json. + + :param json_string: string to pass to `json.loads` + :param filename: filename to open and pass to `json.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param multiline: One object per line + :param kwargs: parameters to pass to `Box()` or `json.loads` + :return: BoxList object from json data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_json(json_string, filename=filename, encoding=encoding, + errors=errors, multiline=multiline, **kwargs) + + if not isinstance(data, list): + raise BoxError('json data not returned as a list, ' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + if yaml_support: + def to_yaml(self, filename=None, default_flow_style=False, + encoding="utf-8", errors="strict", + **yaml_kwargs): + """ + Transform the BoxList object into a YAML string. + + :param filename: If provided will save to file + :param default_flow_style: False will recursively dump dicts + :param encoding: File encoding + :param errors: How to handle encoding errors + :param yaml_kwargs: additional arguments to pass to yaml.dump + :return: string of YAML or return of `yaml.dump` + """ + return _to_yaml(self.to_list(), filename=filename, + default_flow_style=default_flow_style, + encoding=encoding, errors=errors, **yaml_kwargs) + + @classmethod + def from_yaml(cls, yaml_string=None, filename=None, + encoding="utf-8", errors="strict", + loader=yaml.SafeLoader, + **kwargs): + """ + Transform a yaml object string into a BoxList object. + + :param yaml_string: string to pass to `yaml.load` + :param filename: filename to open and pass to `yaml.load` + :param encoding: File encoding + :param errors: How to handle encoding errors + :param loader: YAML Loader, defaults to SafeLoader + :param kwargs: parameters to pass to `BoxList()` or `yaml.load` + :return: BoxList object from yaml data + """ + bx_args = {} + for arg in kwargs.copy(): + if arg in BOX_PARAMETERS: + bx_args[arg] = kwargs.pop(arg) + + data = _from_yaml(yaml_string=yaml_string, filename=filename, + encoding=encoding, errors=errors, + Loader=loader, **kwargs) + if not isinstance(data, list): + raise BoxError('yaml data not returned as a list' + 'but rather a {0}'.format(type(data).__name__)) + return cls(data, **bx_args) + + def box_it_up(self): + for v in self: + if hasattr(v, 'box_it_up') and v is not self: + v.box_it_up() + + +class ConfigBox(Box): + """ + Modified box object to add object transforms. + + Allows for build in transforms like: + + cns = ConfigBox(my_bool='yes', my_int='5', my_list='5,4,3,3,2') + + cns.bool('my_bool') # True + cns.int('my_int') # 5 + cns.list('my_list', mod=lambda x: int(x)) # [5, 4, 3, 3, 2] + """ + + _protected_keys = dir({}) + ['to_dict', 'bool', 'int', 'float', + 'list', 'getboolean', 'to_json', 'to_yaml', + 'getfloat', 'getint', + 'from_json', 'from_yaml'] + + def __getattr__(self, item): + """Config file keys are stored in lower case, be a little more + loosey goosey""" + try: + return super(ConfigBox, self).__getattr__(item) + except AttributeError: + return super(ConfigBox, self).__getattr__(item.lower()) + + def __dir__(self): + return super(ConfigBox, self).__dir__() + ['bool', 'int', 'float', + 'list', 'getboolean', + 'getfloat', 'getint'] + + def bool(self, item, default=None): + """ Return value of key as a boolean + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: approximated bool of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + + if isinstance(item, (bool, int)): + return bool(item) + + if (isinstance(item, str) and + item.lower() in ('n', 'no', 'false', 'f', '0')): + return False + + return True if item else False + + def int(self, item, default=None): + """ Return value of key as an int + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: int of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + return int(item) + + def float(self, item, default=None): + """ Return value of key as a float + + :param item: key of value to transform + :param default: value to return if item does not exist + :return: float of value + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + return float(item) + + def list(self, item, default=None, spliter=",", strip=True, mod=None): + """ Return value of key as a list + + :param item: key of value to transform + :param mod: function to map against list + :param default: value to return if item does not exist + :param spliter: character to split str on + :param strip: clean the list with the `strip` + :return: list of items + """ + try: + item = self.__getattr__(item) + except AttributeError as err: + if default is not None: + return default + raise err + if strip: + item = item.lstrip('[').rstrip(']') + out = [x.strip() if strip else x for x in item.split(spliter)] + if mod: + return list(map(mod, out)) + return out + + # loose configparser compatibility + + def getboolean(self, item, default=None): + return self.bool(item, default) + + def getint(self, item, default=None): + return self.int(item, default) + + def getfloat(self, item, default=None): + return self.float(item, default) + + def __repr__(self): + return ''.format(str(self.to_dict())) + + +class SBox(Box): + """ + ShorthandBox (SBox) allows for + property access of `dict` `json` and `yaml` + """ + _protected_keys = dir({}) + ['to_dict', 'tree_view', 'to_json', 'to_yaml', + 'json', 'yaml', 'from_yaml', 'from_json', + 'dict'] + + @property + def dict(self): + return self.to_dict() + + @property + def json(self): + return self.to_json() + + if yaml_support: + @property + def yaml(self): + return self.to_yaml() + + def __repr__(self): + return ''.format(str(self.to_dict())) diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/config.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7fb3e2c3fa0b83089bf93be6c4184333ae19614d --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/config.py @@ -0,0 +1,9 @@ +import numpy as np + +from neurvps.box import Box + +# C is a dict storing all the configuration +C = Box() + +# shortcut for C.model +M = Box() diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/datasets.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..502508da5f65f920657a604fae71aa95cc4712b8 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/datasets.py @@ -0,0 +1,184 @@ +import os +import json +import math +import random +import os.path as osp +from glob import glob + +import numpy as np +import torch +import skimage.io +import numpy.linalg as LA +import matplotlib.pyplot as plt +import skimage.transform +from torch.utils.data import Dataset +from torch.utils.data.dataloader import default_collate + +from neurvps.config import C + + +class WireframeDataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + filelist = sorted(glob(f"{rootdir}/*/*.png")) + + self.split = split + if split == "train": + self.filelist = filelist[500:] + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + self.filelist = [f for f in filelist[:500] if "a1" not in f] + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname).astype(float)[:, :, :3] + image = np.rollaxis(image, 2).copy() + with np.load(iname.replace(".png", "_label.npz")) as npz: + vpts = npz["vpts"] + return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()}) + + +class ScanNetDataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + self.split = split + + dirs = np.genfromtxt(f"{rootdir}/scannetv2_{split}.txt", dtype=str) + self.filelist = sum([glob(f"{rootdir}/{d}/*.png") for d in dirs], []) + if split == "train": + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + random.seed(0) + random.shuffle(self.filelist) + self.filelist = self.filelist[:500] + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname)[:, :, :3] + with np.load(iname.replace("color.png", "vanish.npz")) as npz: + vpts = np.array([npz[d] for d in ["x", "y", "z"]]) + vpts[:, 1] *= -1 + # plt.imshow(image) + # cc = ["blue", "cyan", "orange"] + # for c, w in zip(cc, vpts): + # x = w[0] / w[2] * C.io.focal_length * 256 + 256 + # y = -w[1] / w[2] * C.io.focal_length * 256 + 256 + # plt.scatter(x, y, color=c) + # for xy in np.linspace(0, 512, 10): + # plt.plot( + # [x, xy, x, xy, x, 0, x, 511], + # [y, 0, y, 511, y, xy, y, xy], + # color=c, + # ) + # plt.show() + image = np.rollaxis(image.astype(np.float), 2).copy() + return (torch.tensor(image).float(), {"vpts": torch.tensor(vpts).float()}) + + +class Tmm17Dataset(Dataset): + def __init__(self, rootdir, split): + self.rootdir = rootdir + self.split = split + + filelist = np.genfromtxt(f"{rootdir}/{split}.txt", dtype=str) + self.filelist = [osp.join(rootdir, f) for f in filelist] + if split == "train": + self.size = len(self.filelist) * C.io.augmentation_level + elif split == "valid": + self.size = len(self.filelist) + print(f"n{split}:", self.size) + + def __len__(self): + return self.size + + def __getitem__(self, idx): + iname = self.filelist[idx % len(self.filelist)] + image = skimage.io.imread(iname) + tname = iname.replace(".jpg", ".txt") + axy, bxy = np.genfromtxt(tname, skip_header=1) + + a0, a1 = np.array(axy[:2]), np.array(axy[2:]) + b0, b1 = np.array(bxy[:2]), np.array(bxy[2:]) + xy = intersect(a0, a1, b0, b1) - 0.5 + xy[0] *= 512 / image.shape[1] + xy[1] *= 512 / image.shape[0] + image = skimage.transform.resize(image, (512, 512)) + if image.ndim == 2: + image = image[:, :, None].repeat(3, 2) + if self.split == "train": + i, j, h, w = crop(image.shape) + else: + i, j, h, w = 0, 0, image.shape[0], image.shape[1] + image = skimage.transform.resize(image[j : j + h, i : i + w], (512, 512)) + xy[1] = (xy[1] - j) / h * 512 + xy[0] = (xy[0] - i) / w * 512 + # plt.imshow(image) + # plt.scatter(xy[0], xy[1]) + # plt.show() + vpts = np.array([[xy[0] / 256 - 1, 1 - xy[1] / 256, C.io.focal_length]]) + vpts[0] /= LA.norm(vpts[0]) + + image, vpts = augment(image, vpts, idx // len(self.filelist)) + image = np.rollaxis(image, 2) + return (torch.tensor(image * 255).float(), {"vpts": torch.tensor(vpts).float()}) + + +def augment(image, vpts, division): + if division == 1: # left-right flip + return image[:, ::-1].copy(), (vpts * [-1, 1, 1]).copy() + elif division == 2: # up-down flip + return image[::-1, :].copy(), (vpts * [1, -1, 1]).copy() + elif division == 3: # all flip + return image[::-1, ::-1].copy(), (vpts * [-1, -1, 1]).copy() + return image, vpts + + +def intersect(a0, a1, b0, b1): + c0 = ccw(a0, a1, b0) + c1 = ccw(a0, a1, b1) + d0 = ccw(b0, b1, a0) + d1 = ccw(b0, b1, a1) + if abs(d1 - d0) > abs(c1 - c0): + return (a0 * d1 - a1 * d0) / (d1 - d0) + else: + return (b0 * c1 - b1 * c0) / (c1 - c0) + + +def ccw(c, a, b): + a0 = a - c + b0 = b - c + return a0[0] * b0[1] - b0[0] * a0[1] + + +def crop(shape, scale=(0.35, 1.0), ratio=(9 / 16, 16 / 9)): + for attempt in range(20): + area = shape[0] * shape[1] + target_area = random.uniform(*scale) * area + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if h <= shape[0] and w <= shape[1]: + j = random.randint(0, shape[0] - h) + i = random.randint(0, shape[1] - w) + return i, j, h, w + + # Fallback + w = min(shape[0], shape[1]) + i = (shape[1] - w) // 2 + j = (shape[0] - w) // 2 + return i, j, w, w diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__init__.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66ae8864d8bd89d2f5a85435b7c89bf339b55af3 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__init__.py @@ -0,0 +1,2 @@ +from .hourglass_pose import hg +from .vanishing_net import VanishingNet diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c635ad31385f1c63c1baf6f73ec691c8d841c26 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f86df3984fce8aee0367028546901204a290d46 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/conic.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec57aa46d0a60c5f3d834246c63031065309477 Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/deformable.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88b04502ae1ac736bb2b8415ba2f9eb304246bdf Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/hourglass_pose.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9757be8c3ab255c94a547912ed39fa019dceac1d Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/__pycache__/vanishing_net.cpython-38.pyc differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/conic.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/conic.py new file mode 100644 index 0000000000000000000000000000000000000000..0d56c3da6c55b6d9056d0f2b43438e71eb76029a --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/conic.py @@ -0,0 +1,50 @@ +import torch +from torch import nn +from torch.nn.modules.utils import _pair + +from neurvps.config import M +from neurvps.models.deformable import DeformConv + + +class ConicConv(nn.Module): + def __init__(self, c_in, c_out, kernel_size=3, bias=False): + super().__init__() + self.deform_conv = DeformConv( + c_in, + c_out, + kernel_size=kernel_size, + stride=1, + padding=1, + im2col_step=M.im2col_step, + bias=bias, + ) + self.kernel_size = _pair(kernel_size) + + def forward(self, input, vpts): + N, C, H, W = input.shape + Kh, Kw = self.kernel_size + + with torch.no_grad(): + ys, xs = torch.meshgrid( + torch.arange(0, H).float().to(input.device), + torch.arange(0, W).float().to(input.device), + ) + # d: [N, H, W, 2] + d = torch.cat( + [ + (vpts[:, 0, None, None] - ys)[..., None], + (vpts[:, 1, None, None] - xs)[..., None], + ], + dim=-1, + ) + d /= torch.norm(d, dim=-1, keepdim=True).clamp(min=1e-5) + n = torch.cat([-d[..., 1:2], d[..., 0:1]], dim=-1) + + offset = torch.zeros((N, H, W, Kh, Kw, 2)).to(input.device) + for i in range(Kh): + for j in range(Kw): + offset[..., i, j, :] = d * (1 - i) + n * (1 - j) + offset[..., i, j, 0] += 1 - i + offset[..., i, j, 1] += 1 - j + offset = offset.permute(0, 3, 4, 5, 1, 2).reshape((N, -1, H, W)) + return self.deform_conv(input, offset) diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..eb3bfc41f8734871526cd6ea6ebd2163ffe242ae Binary files /dev/null and b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_deps differ diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_log b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..42b21d53db3a1e9adaee326d1771f5943bc9ab69 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/.ninja_log @@ -0,0 +1,7 @@ +# ninja log v5 +0 16103 1705843691094256220 deform_conv_cuda.cuda.o faf06c0154fdd95 +0 17978 1705843692978288598 deform_conv.o 9bdf84a104d95de9 +17978 18346 1705843693342294852 DCN.so d5002c9f854b5479 +1 14024 1720225807965090925 deform_conv_cuda.cuda.o 12c1d8fa6984d93 +1 16540 1720225810493145171 deform_conv.o 84f97a3edd60cf1e +16540 16855 1720225810805151862 DCN.so d5002c9f854b5479 diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/DCN.so b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/DCN.so new file mode 100644 index 0000000000000000000000000000000000000000..4343ec02d3a6186831523c6c0f9cb74f82024caf --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/DCN.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d858d752cacb6eedb4f05258437d2dfdf45a2a4e8fbbba467b8e7f8553b0140 +size 580640 diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/build.ninja b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..4fc2e71389e6bc69e78053165cebdef4cc1d3804 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/build.ninja @@ -0,0 +1,30 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /usr/local/cuda/bin/nvcc + +cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++14 -O3 +post_cflags = +cuda_cflags = -DTORCH_EXTENSION_NAME=DCN -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\"_gcc\" -DPYBIND11_STDLIB=\"_libstdcpp\" -DPYBIND11_BUILD_ABI=\"_cxxabi1011\" -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/TH -isystem /opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /opt/conda/envs/neurvps/include/python3.8 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 --compiler-options '-fPIC' -std=c++14 +cuda_post_cflags = +ldflags = -shared -L/opt/conda/envs/neurvps/lib/python3.8/site-packages/torch/lib -lc10 -lc10_cuda -ltorch_cpu -ltorch_cuda_cu -ltorch_cuda_cpp -ltorch -ltorch_python -L/usr/local/cuda/lib64 -lcudart + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc $cuda_cflags -c $in -o $out $cuda_post_cflags + +rule link + command = $cxx $in $ldflags -o $out + +build deform_conv_cuda.cuda.o: cuda_compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu +build deform_conv.o: compile /root/dev/junhee/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp + +build DCN.so: link deform_conv_cuda.cuda.o deform_conv.o + +default DCN.so + diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o new file mode 100644 index 0000000000000000000000000000000000000000..5bf6ea6b3d2484bf8f04b8ae51752dbab69a1804 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe3c7f68e8eefb0ce25c505d4e1c74ebdc200d2bf2dbdb335750788635a1e114 +size 234296 diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o new file mode 100644 index 0000000000000000000000000000000000000000..e5ac751f871f21423258a5bec8b55e76dc3d5ba2 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/build/DCN/deform_conv_cuda.cuda.o @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b0f98276530eb69dd8ad586e105adb457b4f506c4acbfe8418d192f49dcf7e +size 603176 diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d64ac48f32efda5836af839024b7239864f2ff3 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv.cpp @@ -0,0 +1,75 @@ +#include "deform_conv_cpu.h" +#include "deform_conv_cuda.h" + +at::Tensor +deform_conv_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + if (input.type().is_cuda()) + { + return deform_conv_cuda_forward(input, weight, bias, offset, + kernel_h, kernel_w, + stride_h, stride_w, + pad_h, pad_w, + dilation_h, dilation_w, + group, + deformable_group, + im2col_step); + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +deform_conv_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + if (input.type().is_cuda()) + { + return deform_conv_cuda_backward(input, + weight, + bias, + offset, + grad_output, + kernel_h, kernel_w, + stride_h, stride_w, + pad_h, pad_w, + dilation_h, dilation_w, + group, + deformable_group, + im2col_step); + } + AT_ERROR("Not implemented on the CPU"); +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_conv_forward", &deform_conv_forward, "Backward pass of deformable convolution"); + m.def("deform_conv_backward", &deform_conv_backward, "Backward pass of deformable convolution"); +} diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cpu.h b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8f4c76a54ff93d50615abd42610ab8f6d350e629 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cpu.h @@ -0,0 +1,39 @@ +#pragma once +#include + +at::Tensor +deform_conv_cpu_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + +std::vector +deform_conv_cpu_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + + diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a93c49ac7cae61c8d8055993bef0f925cea32a8 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.cu @@ -0,0 +1,271 @@ +#include +#include "deform_im2col_cuda.cuh" + +#include +#include +#include +#include + +// #include +// #include +// #include + +// extern THCState *state; + +// author: Charles Shang +// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu + + +at::Tensor +deform_conv_cuda_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + // THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask)); + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); + AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), + "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group); + + // printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h); + // printf("Channels: %d %d\n", channels, channels_kernel); + // printf("Channels: %d %d\n", channels_out, channels_kernel); + + AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); + + AT_ASSERTM(channels == (channels_kernel * group), + "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); + + const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + auto output = at::empty({batch * height_out * width_out, channels_out}, input.options()); + + // prepare group weight and bias + auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto bias_g = bias.view({group, channels_out/group}); + + // define alias for easy use + const int batch_n = im2col_step_; + const int per_input_size = channels * height * width; + const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); + auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out}); + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options()); + AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] { + deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + deformable_group, + columns.data()); + + })); + + // auto columns_m = columns.t(); + // auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t(); + // output = at::addmm(bias, columns_m, weight_m); + auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); + auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group}); + for (int g = 0; g < group; ++g) + { + auto columns_gm = columns_g.select(0, g).t(); + auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); + auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm); + output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group}); + } + + } + + output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous(); + + return output; +} + +std::vector deform_conv_cuda_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step) +{ + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor"); + AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + const int batch_ = grad_output.size(0); + const int channels_out_ = grad_output.size(1); + const int height_out_ = grad_output.size(2); + const int width_out_ = grad_output.size(3); + + const int im2col_step_ = std::min(im2col_step, batch); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + AT_ASSERTM((channels % group == 0) && (channels_out % group == 0), + "channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group); + + AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w, + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_); + + AT_ASSERTM(channels == (channels_kernel * group), + "Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); + + const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + AT_ASSERTM(batch == batch_, + "Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_); + + AT_ASSERTM(channels_out == channels_out_, + "Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_); + + AT_ASSERTM(height_out == height_out_ && width_out == width_out_, + "Input shape and grad_out shape wont match: (%d x %d vs %d x %d).", height_out, height_out_, width_out, width_out_); + + auto grad_input = at::zeros_like(input); + auto grad_offset = at::zeros_like(offset); + auto grad_weight = at::zeros_like(weight); + auto grad_bias = at::zeros_like(bias); + + // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); + // auto weight_m = weight.view({channels_out, channels_kernel * kernel_h * kernel_w}).t(); + // columns = at::mm(weight_m, grad_output_m); + + // prepare group weight and bias + auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w}); + auto grad_bias_g = grad_bias.view({group, channels_out/group}); + + const int batch_n = im2col_step_; + const int per_input_size = channels * height * width; + const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3); + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, height_out, width_out}); + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, height_out, width_out}); + auto ones = at::ones({batch_n * height_out * width_out}, input.options()); + auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * 1 * height_out * width_out}, input.options()); + auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out}); + for (int g = 0; g < group; ++g) + { + auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); + auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t(); + columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm); + } + + AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] { + deformable_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(), + columns.data(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + grad_offset.data() + n * im2col_step_ * per_offset_size); + // gradient w.r.t. input data + deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + columns.data(), + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + grad_input.data() + n * im2col_step_ * per_input_size); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and group + deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + n * im2col_step_ * per_offset_size, + batch_n, channels, height, width, + height_out, width_out, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, + columns.data()); + + })); + + // auto grad_output_m = grad_output.permute({1, 0, 2, 3}).contiguous().view({channels_out, batch * height_out * width_out}); + // grad_weight = at::mm(grad_output_m, columns.t()).view_as(weight); + // grad_bias = at::mv(grad_output_m, ones); + // auto grad_output_g = grad_output.view({batch, group, channels_out/group, height_out, width_out}); + // auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch * height_out * width_out}); + for (int g = 0; g < group; ++g) + { + auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3}).contiguous().view({channels_out/group, batch_n * height_out * width_out}); + auto columns_gm = columns_g.select(0, g).t(); + auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}); + auto grad_bias_gm = grad_bias_g.select(0, g); + grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g)); + grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones); + } + + } + + return { + grad_input, grad_offset, grad_weight, grad_bias + }; +} diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.h b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..61d811a9c133dad1f73973909d00125aea5aea9b --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_conv_cuda.h @@ -0,0 +1,38 @@ +#pragma once +#include + +at::Tensor +deform_conv_cuda_forward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + +std::vector +deform_conv_cuda_backward(const at::Tensor &input, + const at::Tensor &weight, + const at::Tensor &bias, + const at::Tensor &offset, + const at::Tensor &grad_output, + const int kernel_h, + const int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const int im2col_step); + diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b49033e9a06c96ae997331d367c2b886660d2e78 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/cpp/deform_im2col_cuda.cuh @@ -0,0 +1,388 @@ +#include +#include +#include + +#include +#include + +// #include +#include +// #include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_im, const scalar_t *data_offset, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + // launch channels * batch_size * height_col * width_col cores + CUDA_KERNEL_LOOP(index, n) + { + // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) + // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis + // NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow) + // here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis + + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const scalar_t map_h = i * dilation_h + offset_h; + //const scalar_t map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +__global__ void deformable_col2im_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_im, + const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_offset) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + } +} + +template +void deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_im, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, scalar_t* data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_im, data_offset, height_im, width_im, kernel_h, kernel_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, channels, deformable_group, height_col, width_col, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* data_col, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, scalar_t* grad_im){ + + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; + deformable_col2im_gpu_kernel + <<>>( + num_kernels, data_col, data_offset, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void deformable_col2im_coord_cuda(cudaStream_t stream, + const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, + scalar_t* grad_offset) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; + const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; + deformable_col2im_coord_gpu_kernel + <<>>( + num_kernels, data_col, data_im, data_offset, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, + grad_offset); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); + } +} \ No newline at end of file diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/deformable.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/deformable.py new file mode 100644 index 0000000000000000000000000000000000000000..107382556f1712dcf0a83b41fce3d9b024819014 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/deformable.py @@ -0,0 +1,193 @@ +import os +import math +import warnings +from glob import glob + +import torch +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair +from torch.autograd.function import once_differentiable + + +def load_cpp_ext(ext_name): + root_dir = os.path.join(os.path.split(__file__)[0]) + src_dir = os.path.join(root_dir, "cpp") + tar_dir = os.path.join(src_dir, "build", ext_name) + os.makedirs(tar_dir, exist_ok=True) + srcs = glob(f"{src_dir}/*.cu") + glob(f"{src_dir}/*.cpp") + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from torch.utils.cpp_extension import load + + ext = load( + name=ext_name, + sources=srcs, + extra_cflags=["-O3"], + extra_cuda_cflags=[], + build_directory=tar_dir, + ) + return ext + + +# defer calling load_cpp_ext to make CUDA_VISIBLE_DEVICES happy +DCN = None + + +class DeformConvFunction(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + bias, + stride, + padding, + dilation, + group, + deformable_groups, + im2col_step, + ): + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.kernel_size = _pair(weight.shape[2:4]) + ctx.group = group + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + output = DCN.deform_conv_forward( + input, + weight, + bias, + offset, + ctx.kernel_size[0], + ctx.kernel_size[1], + ctx.stride[0], + ctx.stride[1], + ctx.padding[0], + ctx.padding[1], + ctx.dilation[0], + ctx.dilation[1], + ctx.group, + ctx.deformable_groups, + ctx.im2col_step, + ) + ctx.save_for_backward(input, offset, weight, bias) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight, bias = ctx.saved_tensors + grad_input, grad_offset, grad_weight, grad_bias = DCN.deform_conv_backward( + input, + weight, + bias, + offset, + grad_output, + ctx.kernel_size[0], + ctx.kernel_size[1], + ctx.stride[0], + ctx.stride[1], + ctx.padding[0], + ctx.padding[1], + ctx.dilation[0], + ctx.dilation[1], + ctx.group, + ctx.deformable_groups, + ctx.im2col_step, + ) + + return ( + grad_input, + grad_offset, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + None, + ) + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=11, + bias=True, + ): + global DCN + DCN = load_cpp_ext("DCN") + super(DeformConv, self).__init__() + + if in_channels % groups != 0: + raise ValueError( + "in_channels {} must be divisible by groups {}".format( + in_channels, groups + ) + ) + if out_channels % groups != 0: + raise ValueError( + "out_channels {} must be divisible by groups {}".format( + out_channels, groups + ) + ) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.im2col_step = im2col_step + self.use_bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + self.bias = nn.Parameter(torch.Tensor(out_channels)) + self.reset_parameters() + if not self.use_bias: + self.bias.requires_grad = False + + def reset_parameters(self): + nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + if self.use_bias: + fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + nn.init.uniform_(self.bias, -bound, bound) + else: + nn.init.zeros_(self.bias) + + def forward(self, input, offset): + assert ( + 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] + == offset.shape[1] + ) + return DeformConvFunction.apply( + input.contiguous(), + offset.contiguous(), + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + self.im2col_step, + ) diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/hourglass_pose.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/hourglass_pose.py new file mode 100644 index 0000000000000000000000000000000000000000..dc380cfdda967ae7519877c15573fbce2f9fae4a --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/hourglass_pose.py @@ -0,0 +1,192 @@ +""" +Hourglass network inserted in the pre-activated Resnet +Use lr=0.01 for current version +(c) Yichao Zhou (VanishingNet) +(c) Yichao Zhou (LCNN) +(c) YANG, Wei +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ["HourglassNet", "hg"] + + +class Bottleneck2D(nn.Module): + expansion = 2 + + def __init__(self, inplanes, planes, stride=1, resample=None): + super(Bottleneck2D, self).__init__() + + self.bn1 = nn.BatchNorm2d(inplanes) + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1) + self.bn2 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1) + self.bn3 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * Bottleneck2D.expansion, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.resample = resample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.bn1(x) + out = self.relu(out) + out = self.conv1(out) + + out = self.bn2(out) + out = self.relu(out) + out = self.conv2(out) + + out = self.bn3(out) + out = self.relu(out) + out = self.conv3(out) + + if self.resample is not None: + residual = self.resample(x) + + out += residual + + return out + + +class Hourglass(nn.Module): + def __init__(self, block, num_blocks, planes, depth): + super(Hourglass, self).__init__() + self.depth = depth + self.block = block + self.hg = self._make_hour_glass(block, num_blocks, planes, depth) + + def _make_residual(self, block, num_blocks, planes): + layers = [] + for i in range(0, num_blocks): + layers.append(block(planes * block.expansion, planes)) + return nn.Sequential(*layers) + + def _make_hour_glass(self, block, num_blocks, planes, depth): + hg = [] + for i in range(depth): + res = [] + for j in range(3): + res.append(self._make_residual(block, num_blocks, planes)) + if i == 0: + res.append(self._make_residual(block, num_blocks, planes)) + hg.append(nn.ModuleList(res)) + return nn.ModuleList(hg) + + def _hour_glass_forward(self, n, x): + up1 = self.hg[n - 1][0](x) + low1 = F.max_pool2d(x, 2, stride=2) + low1 = self.hg[n - 1][1](low1) + + if n > 1: + low2 = self._hour_glass_forward(n - 1, low1) + else: + low2 = self.hg[n - 1][3](low1) + low3 = self.hg[n - 1][2](low2) + up2 = F.interpolate(low3, scale_factor=2) + out = up1 + up2 + return out + + def forward(self, x): + return self._hour_glass_forward(self.depth, x) + + +class HourglassNet(nn.Module): + def __init__(self, planes, block, head, depth, num_stacks, num_blocks): + super(HourglassNet, self).__init__() + + self.inplanes = 64 + self.num_feats = 128 + self.num_stacks = num_stacks + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3) + self.bn1 = nn.BatchNorm2d(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_residual(block, self.inplanes, 1) + self.layer2 = self._make_residual(block, self.inplanes, 1) + self.layer3 = self._make_residual(block, self.num_feats, 1) + self.maxpool = nn.MaxPool2d(2, stride=2) + + # build hourglass modules + ch = self.num_feats * block.expansion + + hg, res, fc, score, fc_, score_ = [], [], [], [], [], [] + for i in range(num_stacks): + hg.append(Hourglass(block, num_blocks, self.num_feats, depth)) + res.append(self._make_residual(block, self.num_feats, num_blocks)) + fc.append(self._make_fc(ch, ch)) + score.append(head(ch, planes)) + if i < num_stacks - 1: + fc_.append(nn.Conv2d(ch, ch, kernel_size=1)) + score_.append(nn.Conv2d(planes, ch, kernel_size=1)) + + self.hg = nn.ModuleList(hg) + self.res = nn.ModuleList(res) + self.fc = nn.ModuleList(fc) + self.score = nn.ModuleList(score) + self.fc_ = nn.ModuleList(fc_) + self.score_ = nn.ModuleList(score_) + + def _make_residual(self, block, planes, blocks, stride=1): + resample = None + if stride != 1 or self.inplanes != planes * block.expansion: + resample = nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=1, stride=stride + ) + layers = [block(self.inplanes, planes, stride, resample)] + self.inplanes = planes * block.expansion + for i in range(blocks - 1): + layers.append(block(self.inplanes, planes)) + return nn.Sequential(*layers) + + def _make_fc(self, inplanes, outplanes): + return nn.Sequential( + nn.Conv2d(inplanes, outplanes, kernel_size=1), + nn.BatchNorm2d(inplanes), + nn.ReLU(inplace=True), + ) + + def forward(self, x): + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.layer1(x) + x = self.maxpool(x) + x = self.layer2(x) + x = self.layer3(x) + + for i in range(self.num_stacks): + y = self.hg[i](x) + y = self.res[i](y) + y = self.fc[i](y) + score = self.score[i](y) + out.append(score) + if i < self.num_stacks - 1: + fc_ = self.fc_[i](y) + score_ = self.score_[i](score) + x = x + fc_ + score_ + + return out[::-1] + + +def hg(**kwargs): + model = HourglassNet( + planes=kwargs["planes"], + block=Bottleneck2D, + head=kwargs.get("head", lambda c_in, c_out: nn.Conv2d(c_in, c_out, 1)), + depth=kwargs["depth"], + num_stacks=kwargs["num_stacks"], + num_blocks=kwargs["num_blocks"], + ) + return model + + +def main(): + hg(depth=2, num_stacks=1, num_blocks=1) + + +if __name__ == "__main__": + main() diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/vanishing_net.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/vanishing_net.py new file mode 100644 index 0000000000000000000000000000000000000000..9d0386c480d41b89a55f66452456d16e522da238 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/models/vanishing_net.py @@ -0,0 +1,181 @@ +import sys +import math +import random +import itertools +from collections import defaultdict + +import numpy as np +import torch +import torch.nn as nn +import numpy.linalg as LA +import matplotlib.pyplot as plt +import torch.nn.functional as F + +from neurvps.utils import plot_image_grid +from neurvps.config import C, M +from neurvps.models.conic import ConicConv + + +class VanishingNet(nn.Module): + def __init__(self, backbone, output_stride=4, upsample_scale=1): + super().__init__() + self.backbone = backbone + self.anet = ApolloniusNet(output_stride, upsample_scale) + self.loss = nn.BCEWithLogitsLoss(reduction="none") + + def forward(self, input_dict): + x = self.backbone(input_dict["image"])[0] + N, _, H, W = x.shape + test = input_dict.get("test", False) + if test: + c = len(input_dict["vpts"]) + else: + c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg) + x = x[:, None].repeat(1, c, 1, 1, 1).reshape(N * c, _, H, W) + + if test: + vpts = [to_pixel(v) for v in input_dict["vpts"]] + vpts = torch.tensor(vpts, device=x.device) + return self.anet(x, vpts).sigmoid() + + vpts_gt = input_dict["vpts"].cpu().numpy() + vpts, y = [], [] + for n in range(N): + + def add_sample(p): + vpts.append(to_pixel(p)) + y.append(to_label(p, vpts_gt[n])) + + for vgt in vpts_gt[n]: + for st, ed in zip([0] + M.multires[:-1], M.multires): + # positive samples + for _ in range(M.smp_pos): + add_sample(sample_sphere(vgt, st, ed)) + # negative samples + for _ in range(M.smp_neg): + add_sample(sample_sphere(vgt, ed, ed * M.smp_multiplier)) + # random samples + for _ in range(M.smp_rnd): + add_sample(sample_sphere(np.array([0, 0, 1]), 0, math.pi / 2)) + + y = torch.tensor(y, device=x.device, dtype=torch.float) + vpts = torch.tensor(vpts, device=x.device) + + x = self.anet(x, vpts) + L = self.loss(x, y) + maskn = (y == 0).float() + maskp = (y == 1).float() + losses = {} + for i in range(len(M.multires)): + assert maskn[:, i].sum().item() != 0 + assert maskp[:, i].sum().item() != 0 + losses[f"lneg{i}"] = (L[:, i] * maskn[:, i]).sum() / maskn[:, i].sum() + losses[f"lpos{i}"] = (L[:, i] * maskp[:, i]).sum() / maskp[:, i].sum() + + return { + "losses": [losses], + "preds": {"vpts": vpts, "scores": x.sigmoid(), "ys": y}, + } + + +class ApolloniusNet(nn.Module): + def __init__(self, output_stride, upsample_scale): + super().__init__() + self.fc0 = nn.Conv2d(64, 32, 1) + self.relu = nn.ReLU(inplace=True) + self.pool = nn.MaxPool2d(2, 2) + + if M.conic_6x: + self.bn00 = nn.BatchNorm2d(32) + self.conv00 = ConicConv(32, 32) + self.bn0 = nn.BatchNorm2d(32) + self.conv0 = ConicConv(32, 32) + + self.bn1 = nn.BatchNorm2d(32) + self.conv1 = ConicConv(32, 64) + self.bn2 = nn.BatchNorm2d(64) + self.conv2 = ConicConv(64, 128) + self.bn3 = nn.BatchNorm2d(128) + self.conv3 = ConicConv(128, 256) + self.bn4 = nn.BatchNorm2d(256) + self.conv4 = ConicConv(256, 256) + + self.fc1 = nn.Linear(16384, M.fc_channel) + self.fc2 = nn.Linear(M.fc_channel, M.fc_channel) + self.fc3 = nn.Linear(M.fc_channel, len(M.multires)) + + self.upsample_scale = upsample_scale + self.stride = output_stride / upsample_scale + + def forward(self, input, vpts): + # for now we did not do interpolation + if self.upsample_scale != 1: + input = F.interpolate(input, scale_factor=self.upsample_scale) + x = self.fc0(input) + + if M.conic_6x: + x = self.bn00(x) + x = self.relu(x) + x = self.conv00(x, vpts / self.stride - 0.5) + x = self.bn0(x) + x = self.relu(x) + x = self.conv0(x, vpts / self.stride - 0.5) + + # 128 + x = self.bn1(x) + x = self.relu(x) + x = self.conv1(x, vpts / self.stride - 0.5) + x = self.pool(x) + # 64 + x = self.bn2(x) + x = self.relu(x) + x = self.conv2(x, vpts / self.stride / 2 - 0.5) + x = self.pool(x) + # 32 + x = self.bn3(x) + x = self.relu(x) + x = self.conv3(x, vpts / self.stride / 4 - 0.5) + x = self.pool(x) + # 16 + x = self.bn4(x) + x = self.relu(x) + x = self.conv4(x, vpts / self.stride / 8 - 0.5) + x = self.pool(x) + # 8 + x = x.view(x.shape[0], -1) + x = self.relu(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + + return x + + +def orth(v): + x, y, z = v + o = np.array([0.0, -z, y] if abs(x) < abs(y) else [-z, 0.0, x]) + o /= LA.norm(o) + return o + + +def sample_sphere(v, theta0, theta1): + costheta = random.uniform(math.cos(theta1), math.cos(theta0)) + phi = random.random() * math.pi * 2 + v1 = orth(v) + v2 = np.cross(v, v1) + r = math.sqrt(1 - costheta ** 2) + w = v * costheta + r * (v1 * math.cos(phi) + v2 * math.sin(phi)) + return w / LA.norm(w) + + +def to_label(w, vpts): + degree = np.min(np.arccos(np.abs(vpts @ w).clip(max=1))) + return [int(degree < res + 1e-6) for res in M.multires] + + +def to_pixel(w): + x = w[0] / w[2] * C.io.focal_length * 256 + 256 + y = -w[1] / w[2] * C.io.focal_length * 256 + 256 + return y, x diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/trainer.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..d244a67b917f8d420063a310154c2ec3ec8ec7e1 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/trainer.py @@ -0,0 +1,304 @@ +import os +import atexit +import random +import shutil +import signal +import os.path as osp +import threading +import subprocess +from timeit import default_timer as timer + +import numpy as np +import torch +import matplotlib as mpl +import matplotlib.pyplot as plt +import torch.nn.functional as F +from skimage import io +from tensorboardX import SummaryWriter + +import neurvps.utils as utils +from neurvps.config import C, M + + +class Trainer(object): + def __init__( + self, device, model, optimizer, train_loader, val_loader, batch_size, out + ): + self.device = device + + self.model = model + self.optim = optimizer + + self.train_loader = train_loader + self.val_loader = val_loader + self.batch_size = batch_size + + self.out = out + if not osp.exists(self.out): + os.makedirs(self.out) + + board_out = osp.join(self.out, "tensorboard") + if not osp.exists(board_out): + os.makedirs(board_out) + self.writer = SummaryWriter(board_out) + # self.run_tensorboard(board_out) + # time.sleep(1) + + self.epoch = 0 + self.iteration = 0 + self.max_epoch = C.optim.max_epoch + self.lr_decay_epoch = C.optim.lr_decay_epoch + self.num_stacks = C.model.num_stacks + self.mean_loss = self.best_mean_loss = 1e1000 + + self.loss_labels = None + self.avg_metrics = None + self.metrics = np.zeros(0) + + def run_tensorboard(self, board_out): + os.environ["CUDA_VISIBLE_DEVICES"] = "" + p = subprocess.Popen( + ["tensorboard", f"--logdir={board_out}", f"--port={C.io.tensorboard_port}"] + ) + + def killme(): + os.kill(p.pid, signal.SIGTERM) + + atexit.register(killme) + + def _loss(self, result): + losses = result["losses"] + # Don't move loss label to other place. + # If I want to change the loss, I just need to change this function. + if self.loss_labels is None: + self.loss_labels = ["sum"] + list(losses[0].keys()) + self.metrics = np.zeros([self.num_stacks, len(self.loss_labels)]) + print() + print( + "| ".join( + ["progress "] + + list(map("{:7}".format, self.loss_labels)) + + ["speed"] + ) + ) + with open(f"{self.out}/loss.csv", "a") as fout: + print(",".join(["progress"] + self.loss_labels), file=fout) + + total_loss = 0 + for i in range(self.num_stacks): + for j, name in enumerate(self.loss_labels): + if name == "sum": + continue + if name not in losses[i]: + assert i != 0 + continue + loss = losses[i][name].mean() + self.metrics[i, 0] += loss.item() + self.metrics[i, j] += loss.item() + total_loss += loss + return total_loss + + def validate(self): + tprint("Running validation...", " " * 75) + training = self.model.training + self.model.eval() + + viz = osp.join(self.out, "viz", f"{self.iteration * self.batch_size:09d}") + npz = osp.join(self.out, "npz", f"{self.iteration * self.batch_size:09d}") + osp.exists(viz) or os.makedirs(viz) + osp.exists(npz) or os.makedirs(npz) + + total_loss = 0 + self.metrics[...] = 0 + c = M.smp_rnd + C.io.num_vpts * len(M.multires) * (M.smp_pos + M.smp_neg) + with torch.no_grad(): + for batch_idx, (image, target) in enumerate(self.val_loader): + image = image.to(self.device) + input_dict = {"image": image, "vpts": target["vpts"], "eval": True} + result = self.model(input_dict) + total_loss += self._loss(result) + # permute output to be (batch x (nneg + npos) x 2) + preds = result["preds"] + vpts = preds["vpts"].reshape(-1, c, 2).cpu().numpy() + scores = preds["scores"].reshape(-1, c, len(M.multires)).cpu().numpy() + ys = preds["ys"].reshape(-1, c, len(M.multires)).cpu().numpy() + for i in range(self.batch_size): + index = batch_idx * self.batch_size + i + np.savez( + f"{npz}/{index:06}.npz", + **{k: v[i].cpu().numpy() for k, v in preds.items()}, + ) + if index >= 8: + continue + self.plot(index, image[i], vpts[i], scores[i], ys[i], f"{viz}/{index:06}") + + self._write_metrics(len(self.val_loader), total_loss, "validation", True) + self.mean_loss = total_loss / len(self.val_loader) + + torch.save( + { + "iteration": self.iteration, + "arch": self.model.__class__.__name__, + "optim_state_dict": self.optim.state_dict(), + "model_state_dict": self.model.state_dict(), + "best_mean_loss": self.best_mean_loss, + }, + osp.join(self.out, "checkpoint_latest.pth.tar"), + ) + shutil.copy( + osp.join(self.out, "checkpoint_latest.pth.tar"), + osp.join(npz, "checkpoint.pth.tar"), + ) + if self.mean_loss < self.best_mean_loss: + self.best_mean_loss = self.mean_loss + shutil.copy( + osp.join(self.out, "checkpoint_latest.pth.tar"), + osp.join(self.out, "checkpoint_best.pth.tar"), + ) + + if training: + self.model.train() + + def train_epoch(self): + self.model.train() + time = timer() + for batch_idx, (image, target) in enumerate(self.train_loader): + self.optim.zero_grad() + self.metrics[...] = 0 + + image = image.to(self.device) + input_dict = {"image": image, "vpts": target["vpts"], "eval": False} + result = self.model(input_dict) + + loss = self._loss(result) + if np.isnan(loss.item()): + raise ValueError("loss is nan while training") + loss.backward() + self.optim.step() + + if self.avg_metrics is None: + self.avg_metrics = self.metrics + else: + self.avg_metrics = self.avg_metrics * 0.9 + self.metrics * 0.1 + self.iteration += 1 + self._write_metrics(1, loss.item(), "training", do_print=False) + + if self.iteration % 4 == 0: + tprint( + f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| " + + "| ".join(map("{:.5f}".format, self.avg_metrics[0])) + + f"| {4 * self.batch_size / (timer() - time):04.1f} " + ) + time = timer() + num_images = self.batch_size * self.iteration + if ( + num_images % C.io.validation_interval == 0 + or num_images == C.io.validation_debug + ): + self.validate() + time = timer() + + def _write_metrics(self, size, total_loss, prefix, do_print=False): + for i, metrics in enumerate(self.metrics): + for label, metric in zip(self.loss_labels, metrics): + self.writer.add_scalar( + f"{prefix}/{i}/{label}", metric / size, self.iteration + ) + if i == 0 and do_print: + csv_str = ( + f"{self.epoch:03}/{self.iteration * self.batch_size:07}," + + ",".join(map("{:.11f}".format, metrics / size)) + ) + prt_str = ( + f"{self.epoch:03}/{self.iteration * self.batch_size // 1000:04}k| " + + "| ".join(map("{:.5f}".format, metrics / size)) + ) + with open(f"{self.out}/loss.csv", "a") as fout: + print(csv_str, file=fout) + pprint(prt_str, " " * 7) + self.writer.add_scalar( + f"{prefix}/total_loss", total_loss / size, self.iteration + ) + return total_loss + + def plot(self, index, image, vpts, scores, ys, prefix): + for idx, (vp, score, y) in enumerate(zip(vpts, scores, ys)): + plt.imshow(image[0].cpu().numpy()) + color = (random.random(), random.random(), random.random()) + plt.scatter(vp[1], vp[0]) + plt.text( + vp[1] - 20, + vp[0] - 10, + " ".join(map("{:.3f}".format, score)) + + "\n" + + " ".join(map("{:.3f}".format, y)), + bbox=dict(facecolor=color), + fontsize=12, + ) + for xy in np.linspace(0, 512, 10): + plt.plot( + [vp[1], xy, vp[1], xy, vp[1], 0, vp[1], 511], + [vp[0], 0, vp[0], 511, vp[0], xy, vp[0], xy], + color=color, + ) + plt.savefig(f"{prefix}_vpts_{idx}.jpg"), plt.close() + + def train(self): + plt.rcParams["figure.figsize"] = (24, 24) + epoch_size = len(self.train_loader) + start_epoch = self.iteration // epoch_size + for self.epoch in range(start_epoch, self.max_epoch): + if self.epoch == self.lr_decay_epoch: + self.optim.param_groups[0]["lr"] /= 10 + self.train_epoch() + + def move(self, obj): + if isinstance(obj, torch.Tensor): + return obj.to(self.device) + if isinstance(obj, dict): + for name in obj: + if isinstance(obj[name], torch.Tensor): + obj[name] = obj[name].to(self.device) + return obj + assert False + + +cmap = plt.get_cmap("jet") +norm = mpl.colors.Normalize(vmin=0.4, vmax=1.0) +sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) +sm.set_array([]) + + +def c(x): + return sm.to_rgba(x) + + +def imshow(im): + plt.close() + plt.tight_layout() + plt.imshow(im) + plt.colorbar(sm, fraction=0.046) + plt.xlim([0, im.shape[0]]) + plt.ylim([im.shape[0], 0]) + + +def tprint(*args): + """Temporarily prints things on the screen""" + print("\r", end="") + print(*args, end="") + + +def pprint(*args): + """Permanently prints things on the screen""" + print("\r", end="") + print(*args) + + +def _launch_tensorboard(board_out, port, out): + os.environ["CUDA_VISIBLE_DEVICES"] = "" + p = subprocess.Popen(["tensorboard", f"--logdir={board_out}", f"--port={port}"]) + + def kill(): + os.kill(p.pid, signal.SIGTERM) + + atexit.register(kill) diff --git a/vanishing_point_extraction/vanishing_point/neurvps/neurvps/utils.py b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e7cf69e8e1951a11b4694fe35c3baaebda90fce5 --- /dev/null +++ b/vanishing_point_extraction/vanishing_point/neurvps/neurvps/utils.py @@ -0,0 +1,96 @@ +import math +import random +import os.path as osp +import multiprocessing +from timeit import default_timer as timer + +import numpy as np +import numpy.linalg as LA +import matplotlib.pyplot as plt + + +class benchmark(object): + def __init__(self, msg, enable=True, fmt="%0.3g"): + self.msg = msg + self.fmt = fmt + self.enable = enable + + def __enter__(self): + if self.enable: + self.start = timer() + return self + + def __exit__(self, *args): + if self.enable: + t = timer() - self.start + print(("%s : " + self.fmt + " seconds") % (self.msg, t)) + self.time = t + + +def plot_image_grid(im, title): + plt.figure() + for i in range(16): + plt.subplot(4, 4, i + 1) + plt.imshow(im[i]) + plt.colorbar() + plt.title(title) + + +def quiver(x, y, ax): + ax.set_xlim(0, x.shape[1]) + ax.set_ylim(x.shape[0], 0) + ax.quiver( + x, + y, + units="xy", + angles="xy", + scale_units="xy", + scale=1, + minlength=0.01, + width=0.1, + color="b", + ) + + +def np_softmax(x, axis=0): + """Compute softmax values for each sets of scores in x.""" + e_x = np.exp(x - np.max(x)) + return e_x / e_x.sum(axis=axis, keepdims=True) + + +def argsort2d(arr): + return np.dstack(np.unravel_index(np.argsort(arr.ravel()), arr.shape))[0] + + +def __parallel_handle(f, q_in, q_out): + while True: + i, x = q_in.get() + if i is None: + break + q_out.put((i, f(x))) + + +def parmap(f, X, nprocs=multiprocessing.cpu_count(), progress_bar=lambda x: x): + if nprocs == 0: + nprocs = multiprocessing.cpu_count() + q_in = multiprocessing.Queue(1) + q_out = multiprocessing.Queue() + + proc = [ + multiprocessing.Process(target=__parallel_handle, args=(f, q_in, q_out)) + for _ in range(nprocs) + ] + for p in proc: + p.daemon = True + p.start() + + try: + sent = [q_in.put((i, x)) for i, x in enumerate(X)] + [q_in.put((None, None)) for _ in range(nprocs)] + res = [q_out.get() for _ in progress_bar(range(len(sent)))] + [p.join() for p in proc] + except KeyboardInterrupt: + q_in.close() + q_out.close() + raise + return [x for i, x in sorted(res)] diff --git a/vanishing_point_extraction/vp_grid.py b/vanishing_point_extraction/vp_grid.py new file mode 100644 index 0000000000000000000000000000000000000000..066831b96eeb3390cec561de9018c3f66856d820 --- /dev/null +++ b/vanishing_point_extraction/vp_grid.py @@ -0,0 +1,329 @@ +""" +**Author:** Kuoyuan Li +""" +import itertools +import random +from itertools import starmap +# Import needed libraries +import matplotlib.pyplot as plt +import cv2 +import os +import numpy as np +import pandas as pd +import random +import math +# from tkinter import Tk # from tkinter import Tk for Python 3.x +# from tkinter.filedialog import askopenfilename +from tqdm import tqdm +# Helper functions +import copy +# Show images given a list of images +def show_images(image): + plt.figure() + plt.imshow(image,cmap='gray') + +# Load images from a folder given their filenames +def load_images(filename): + try: + img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) + return img + except IOError: + print("File is not an image\n") + exit() + +# Plot lines on original images +def show_lines(image,lines): + # Implementation is based on workshop material + for line in lines: + rho,theta = line[0] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + pt1 = (int(x0 + 1000*(-b)),int(y0 + 1000*(a))) + pt2 = (int(x0 - 1000*(-b)),int(y0 - 1000*(a))) + # Draws a line segment connecting two points, colour=(255,0,0) and thickness=2. + cv2.line(image,pt1,pt2,(255,0,0),1) + cv2.imwrite("/root/data2/joonsu0109/project/naive_vp/vanishing_lines.png", image) + # plt.imshow(image) + # plt.axis('off') + # plt.show() + + + +# Plot lines and points on original images +def show_point(image, point): + # Implementation is based on workshop material + cv2.circle(image,point,3,(0,255,0), thickness=3) + cv2.imwrite("/root/data2/joonsu0109/project/naive_vp/vanishing_point.png", image) + # plt.imshow(image) + # plt.axis('off') + # plt.show() + +## 1. Detect lines in the image +## Use the Canny edge detector and Hough transform to detect lines in the image. + +def detect_lines(image): + """ + Use Canny edge detection and Hough transform to get selected lines + (which are useful for locating vanishing point) for all images + + Args: images: a list of original images + + Return: blur_images: Blurred images (for report) + edge_images: Edge images (for report) + valid_lines_all: Detected lines + """ + # Do blurry to smooth the image, try to remove edges from textures + gau_kernel = cv2.getGaussianKernel(70,4)# 1d gaussian kernel (size, sigma) + gau_kern2d = np.outer(gau_kernel, gau_kernel) + gau_kern2d = gau_kern2d/gau_kern2d.sum() # 2d gaussian kernel to do blurry + # Apply blurry filter + blur_image = cv2.filter2D(image,-1,gau_kern2d) + # Canny edge detection with OpenCV for all blurry images + edge_image = cv2.Canny(blur_image,40,70,apertureSize=3,L2gradient=True) + # Use hough transform to detect all lines + lines=cv2.HoughLines(edge_image, 1, np.pi/120, 55) + valid_lines = [] + # Remove horizontal and vertical lines as they would not converge to vanishing point + for line in lines: + rho,theta = line[0] + if (theta>0.4 and theta < 1.47) or (theta > 1.67 and theta < 2.74): + valid_lines.append(line) + + return blur_image,edge_image,valid_lines + +### 2. Locate the vanishing point +### Use RANSAC to locate the vanishing point from the detected lines. + + +#### 2.1 RANSAC functions +#### Define two fuctions required by RANSAC: a function to find the point where lines intersect, and a function to compute the distance from a point to a line. + +# Find the intersection point +def find_intersection_point(line1,line2): + """Implementation is based on code from https://stackoverflow.com/questions/46565975 + Original author: StackOverflow contributor alkasm + Find an intercept point of 2 lines model + + Args: line1,line2: 2 lines using rho and theta (polar coordinates) to represent + + Return: x0,y0: x and y for the intersection point + """ + # rho and theta for each line + rho1, theta1 = line1[0] + rho2, theta2 = line2[0] + # Use formula from https://stackoverflow.com/a/383527/5087436 to solve for intersection between 2 lines + A = np.array([ + [np.cos(theta1), np.sin(theta1)], + [np.cos(theta2), np.sin(theta2)] + ]) + b = np.array([[rho1], [rho2]]) + det_A = np.linalg.det(A) + if det_A != 0: + x0, y0 = np.linalg.solve(A, b) + # Round up x and y because pixel cannot have float number + x0, y0 = int(np.round(x0)), int(np.round(y0)) + return x0, y0 + else: + return None + + +def find_vanishing_point(img, grid_size, intersections): + # Image dimensions + image_height = img.shape[0] + image_width = img.shape[1] + + # Grid dimensions + grid_rows = (image_height // grid_size) + 1 + grid_columns = (image_width // grid_size) + 1 + + # Current cell with most intersection points + max_intersections = 0 + best_cell = (0.0, 0.0) + + for i, j in itertools.product(range(grid_columns),range(grid_rows)): + + cell_left = i * grid_size + cell_right = (i + 1) * grid_size + cell_bottom = j * grid_size + cell_top = (j + 1) * grid_size + + center_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) + + # cv2.rectangle(img, (cell_left, cell_bottom), (cell_right, cell_top), (0, 0, 255), 5) + + current_intersections = 0 # Number of intersections in the current cell + for x, y in intersections: + if cell_left < x < cell_right and cell_bottom < y < cell_top: + current_intersections += 1 + + # Current cell has more intersections that previous cell (better) + if current_intersections > max_intersections: + max_intersections = current_intersections + best_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) + + if best_cell[0] != None and best_cell[1] != None: + rx1 = int(best_cell[0] - grid_size / 2) + ry1 = int(best_cell[1] - grid_size / 2) + rx2 = int(best_cell[0] + grid_size / 2) + ry2 = int(best_cell[1] + grid_size / 2) + # cv2.rectangle(img, (rx1, ry1), (rx2, ry2), (0, 255, 0), 10) + # center = (int(best_cell[0]), int(best_cell[1])) + # cv2.circle(img,center,5,(255,0,0), thickness=5) + # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + # cv2.imwrite(save_path, img) + + else: + raise ValueError("No best cell found!: ", save_path) + + return best_cell + +### 3. Main function +### Run your vanishing point detection method on a folder of images, return the (x,y) locations of the vanishing points + + +# RANSAC parameters: + +def line_intersection(line1, line2): + """ + Computes the intersection point of two lines in polar coordinates (rho, theta). + + Args: + line1 (np.ndarray): First line, represented as (rho, theta). + line2 (np.ndarray): Second line, represented as (rho, theta). + + Returns: + tuple or None: Intersection point (x, y), or None if lines are parallel. + """ + # Extract (rho, theta) for each line + rho1, theta1 = line1[0] + rho2, theta2 = line2[0] + + # Represent lines in the form: a1*x + b1*y = c1 + a1, b1 = np.cos(theta1), np.sin(theta1) + c1 = rho1 + a2, b2 = np.cos(theta2), np.sin(theta2) + c2 = rho2 + + # Solve the system of linear equations + A = np.array([[a1, b1], [a2, b2]]) + C = np.array([c1, c2]) + + # Check if determinant is close to zero (parallel lines) + det = np.linalg.det(A) + if abs(det) < 1e-6: + return None + + # Find the intersection point + x, y = np.linalg.solve(A, C) + return x, y + + +def find_intersections(lines): + """ + Finds intersections between pairs of lines. + + Args: + lines (np.ndarray): Array of lines in the format (n, 1, 2), + where each line is represented as (rho, theta). + + Returns: + list: List of intersection points [(x, y), ...]. + """ + intersections = [] + for i, line_1 in enumerate(lines): + for line_2 in lines[i + 1:]: + intersection = line_intersection(line_1, line_2) + if intersection is not None: # If lines intersect, add the point + intersections.append(intersection) + return intersections + +def sample_lines(lines, size): + if size > len(lines): + size = len(lines) + return random.sample(lines, size) + +if __name__ == "__main__": + # Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing + # filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file + """For the image, Use Canny+Hough to detect edges, use RANSAC to identify the vanishing points + """ + input_folder = "/root/data2/joonsu0109/dataset/SemanticKITTI/dataset/sequences" + output_folder = "/root/data2/joonsu0109/dataset/SemanticKITTI/grid_vp" + os.makedirs(output_folder, exist_ok=True) + + input_dir_list = os.listdir(input_folder) + input_dir_list.sort() + edge = [] + seq_list = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10"] + for dirname in seq_list: + print("Processing folder: ", dirname) + file_list = os.listdir(os.path.join(input_folder, dirname, "image_2")) + file_list.sort() + vp_dict = dict() + for filename in tqdm(file_list): + idx = filename.split(".")[0] + if int(idx) % 5 == 0: + print("Processing: ", filename) + file_path = os.path.join(input_folder, dirname, "image_2", filename) + + # Read images from folder + image = cv2.imread(file_path) + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + # Task1: Detect lines using Canny + Hough + image_copy = image.copy() + blur_image, edge_image, lines = detect_lines(image) + + if len(lines) > 100: + lines = sample_lines(lines, 100) + else: + pass + + intersections = find_intersections(lines) + + best_cell = find_vanishing_point(image, 50, intersections) + print("Best cell: ", best_cell) + save_key = os.path.join("SemanticKITTI/dataset/sequences", dirname, "image_2", filename) + vp_dict[save_key] = [best_cell] + + print("Edge images: ", len(edge)) + save_path = os.path.join(output_folder, f'ransac_vp_seq_{dirname}.json') + + with open(save_path, 'w') as f: + json.dump(vp_dict, f) + + print("Edge images: ", edge) + print("Edge images num: ", len(edge)) + + + +""" +Hyper parameters tunning +Note: the following code is not expected to run while marking. It will take hours to run. +In addition, tuning for Canny edge detection and Hough Transform are done as well. They are described in the report. +""" + +""" +# Use grid search to tune hyperparameters for RANSAC +best_result = 10000 +best_model = None +for ransac_iterations in range (150,351,50): + for ransac_threshold in range (10,15,1): + for ransac_ratio in np.arange(0.81,1,0.02): + sum_mse = 0 + # Run 10 times to alleviate the effect of stochasticity + for it in range(0,10): + vanishing_points = RANSAC(lines_all,ransac_iterations,ransac_threshold,ransac_ratio) + single_mse = MSE(ground_truth, vanishing_points) + sum_mse += single_mse + mse_avg = sum_mse/10 + if mse_avg < best_result: + best_result = mse_avg + best_model = ransac_iterations,ransac_threshold,ransac_ratio +print("Best model is:"+ + "ransac_iterations="+str(best_model[0])+ + ", ransac_threshold="+str(best_model[1])+ + ", ransac_ratio="+str(best_model[2])+ + ", which give MSE "+str(best_result)) +""" \ No newline at end of file diff --git a/vanishing_point_extraction/vp_ransac.py b/vanishing_point_extraction/vp_ransac.py new file mode 100644 index 0000000000000000000000000000000000000000..06d2dcba2a49a6eea138f390e31d0c2b998cdd5d --- /dev/null +++ b/vanishing_point_extraction/vp_ransac.py @@ -0,0 +1,330 @@ +""" +**Author:** Kuoyuan Li +""" +import itertools +import random +from itertools import starmap +# Import needed libraries +import matplotlib.pyplot as plt +import cv2 +import os +import numpy as np +import pandas as pd +import random +import math +from tqdm import tqdm +# Helper functions + +# Show images given a list of images +def show_images(image): + plt.figure() + plt.imshow(image,cmap='gray') + +# Load images from a folder given their filenames +def load_images(filename): + try: + img = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) + return img + except IOError: + print("File is not an image\n") + exit() + +# Plot lines on original images +def show_lines(image,lines): + # Implementation is based on workshop material + for line in lines: + rho,theta = line[0] + a = np.cos(theta) + b = np.sin(theta) + x0 = a*rho + y0 = b*rho + pt1 = (int(x0 + 1000*(-b)),int(y0 + 1000*(a))) + pt2 = (int(x0 - 1000*(-b)),int(y0 - 1000*(a))) + # Draws a line segment connecting two points, colour=(255,0,0) and thickness=2. + cv2.line(image,pt1,pt2,(255,0,0),1) + cv2.imwrite("/root/data2/joonsu0109/project/naive_vp/vanishing_lines.png", image) + # plt.imshow(image) + # plt.axis('off') + # plt.show() + + + +# Plot lines and points on original images +def show_point(image, point, save_paths): + # Implementation is based on workshop material + cv2.circle(image,point,3,(255,0,0), thickness=3) + cv2.imwrite(save_paths, image) + # plt.imshow(image) + # plt.axis('off') + # plt.show() + +## 1. Detect lines in the image +## Use the Canny edge detector and Hough transform to detect lines in the image. + +def detect_lines(image): + """ + Use Canny edge detection and Hough transform to get selected lines + (which are useful for locating vanishing point) for all images + + Args: images: a list of original images + + Return: blur_images: Blurred images (for report) + edge_images: Edge images (for report) + valid_lines_all: Detected lines + """ + # Do blurry to smooth the image, try to remove edges from textures + gau_kernel = cv2.getGaussianKernel(70,4)# 1d gaussian kernel (size, sigma) + gau_kern2d = np.outer(gau_kernel, gau_kernel) + gau_kern2d = gau_kern2d/gau_kern2d.sum() # 2d gaussian kernel to do blurry + # Apply blurry filter + blur_image = cv2.filter2D(image,-1,gau_kern2d) + # Canny edge detection with OpenCV for all blurry images + edge_image = cv2.Canny(blur_image,40,70,apertureSize=3,L2gradient=True) + # Use hough transform to detect all lines + lines=cv2.HoughLines(edge_image, 1, np.pi/120, 55) + valid_lines = [] + # Remove horizontal and vertical lines as they would not converge to vanishing point + for line in lines: + rho,theta = line[0] + if (theta>0.4 and theta < 1.47) or (theta > 1.67 and theta < 2.74): + valid_lines.append(line) + + return blur_image,edge_image,valid_lines + +# Find the intersection point +def find_intersection_point(line1,line2): + """Implementation is based on code from https://stackoverflow.com/questions/46565975 + Original author: StackOverflow contributor alkasm + Find an intercept point of 2 lines model + + Args: line1,line2: 2 lines using rho and theta (polar coordinates) to represent + + Return: x0,y0: x and y for the intersection point + """ + # rho and theta for each line + rho1, theta1 = line1[0] + rho2, theta2 = line2[0] + # Use formula from https://stackoverflow.com/a/383527/5087436 to solve for intersection between 2 lines + A = np.array([ + [np.cos(theta1), np.sin(theta1)], + [np.cos(theta2), np.sin(theta2)] + ]) + b = np.array([[rho1], [rho2]]) + det_A = np.linalg.det(A) + if det_A != 0: + x0, y0 = np.linalg.solve(A, b) + # Round up x and y because pixel cannot have float number + x0, y0 = int(np.round(x0)), int(np.round(y0)) + return x0, y0 + else: + return None + + +# Find the distance from a point to a line +def find_dist_to_line(point,line): + """Implementation is based on Computer Vision material, owned by the University of Melbourne + Find an intercept point of the line model with a normal from point to it, to calculate the + distance betwee point and intercept + + Args: point: the point using x and y to represent + line: the line using rho and theta (polar coordinates) to represent + + Return: dist: the distance from the point to the line + """ + x0,y0 = point + rho, theta = line[0] + m = (-1*(np.cos(theta)))/np.sin(theta) + c = rho/np.sin(theta) + # intersection point with the model + x = (x0 + m*y0 - m*c)/(1 + m**2) + y = (m*x0 + (m**2)*y0 - (m**2)*c)/(1 + m**2) + c + dist = math.sqrt((x - x0)**2 + (y - y0)**2) + return dist + + + +def RANSAC(lines,ransac_iterations,ransac_threshold,ransac_ratio): + """Implementation is based on code from Computer Vision material, owned by the University of Melbourne + Use RANSAC to identify the vanishing points for all images + + Args: lines_all: The lines for all images + ransac_iterations,ransac_threshold,ransac_ratio: RANSAC hyperparameters + + Return: vanishing_points: Estimated vanishing points for all images + """ + # Store vanishing point for the image + inlier_count_ratio = 0. + vanishing_point = (0,0) + # perform RANSAC iterations for each set of lines + print("NRANSAC") + for iteration in range(ransac_iterations): + # randomly sample 2 lines + n = 2 + selected_lines = random.sample(lines,n) + line1 = selected_lines[0] + line2 = selected_lines[1] + intersection_point = find_intersection_point(line1,line2) + if intersection_point is not None: + # count the number of inliers num + inlier_count = 0 + # inliers are lines whose distance to the point is less than ransac_threshold + for line in lines: + # find the distance from the line to the point + dist = find_dist_to_line(intersection_point,line) + # check whether it's an inlier or not + if dist < ransac_threshold: + inlier_count += 1 + + # If the value of inlier_count is higher than previously saved value, + # save it, and save the current point + if inlier_count/float(len(lines)) > inlier_count_ratio: + inlier_count_ratio = inlier_count/float(len(lines)) + vanishing_point = intersection_point + + # We are done in case we have enough inliers + if inlier_count > len(lines)*ransac_ratio: + break + return vanishing_point + +def find_vanishing_point(img, grid_size, intersections): + # Image dimensions + print("img.shape: ",img.shape) + image_height = img.shape[0] + image_width = img.shape[1] + + # Grid dimensions + grid_rows = (image_height // grid_size) + 1 + grid_columns = (image_width // grid_size) + 1 + + # Current cell with most intersection points + max_intersections = 0 + best_cell = (0.0, 0.0) + + for i, j in itertools.product(range(grid_columns),range(grid_rows)): + + cell_left = i * grid_size + cell_right = (i + 1) * grid_size + cell_bottom = j * grid_size + cell_top = (j + 1) * grid_size + + center_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) + + cv2.rectangle(img, (cell_left, cell_bottom), (cell_right, cell_top), (0, 0, 255), 5) + + current_intersections = 0 # Number of intersections in the current cell + for x, y in intersections: + if cell_left < x < cell_right and cell_bottom < y < cell_top: + current_intersections += 1 + + # Current cell has more intersections that previous cell (better) + if current_intersections > max_intersections: + max_intersections = current_intersections + best_cell = ((cell_left + cell_right) / 2, (cell_bottom + cell_top) / 2) + print("Best Cell:", best_cell) + + if best_cell[0] != None and best_cell[1] != None: + rx1 = int(best_cell[0] - grid_size / 2) + ry1 = int(best_cell[1] - grid_size / 2) + rx2 = int(best_cell[0] + grid_size / 2) + ry2 = int(best_cell[1] + grid_size / 2) + cv2.rectangle(img, (rx1, ry1), (rx2, ry2), (0, 255, 0), 10) + cv2.imwrite('/root/data2/joonsu0109/project/naive_vp/vanishing-point-detection/outputs/result.png', img) + + return best_cell + +### 3. Main function +### Run your vanishing point detection method on a folder of images, return the (x,y) locations of the vanishing points + + +# RANSAC parameters: + +def line_intersection(line1, line2): + """ + Computes the intersection point of two lines in polar coordinates (rho, theta). + + Args: + line1 (np.ndarray): First line, represented as (rho, theta). + line2 (np.ndarray): Second line, represented as (rho, theta). + + Returns: + tuple or None: Intersection point (x, y), or None if lines are parallel. + """ + # Extract (rho, theta) for each line + rho1, theta1 = line1[0] + rho2, theta2 = line2[0] + + # Represent lines in the form: a1*x + b1*y = c1 + a1, b1 = np.cos(theta1), np.sin(theta1) + c1 = rho1 + a2, b2 = np.cos(theta2), np.sin(theta2) + c2 = rho2 + + # Solve the system of linear equations + A = np.array([[a1, b1], [a2, b2]]) + C = np.array([c1, c2]) + + # Check if determinant is close to zero (parallel lines) + det = np.linalg.det(A) + if abs(det) < 1e-6: + return None + + # Find the intersection point + x, y = np.linalg.solve(A, C) + return x, y + + +def find_intersections(lines): + """ + Finds intersections between pairs of lines. + + Args: + lines (np.ndarray): Array of lines in the format (n, 1, 2), + where each line is represented as (rho, theta). + + Returns: + list: List of intersection points [(x, y), ...]. + """ + intersections = [] + for i, line_1 in enumerate(lines): + for line_2 in lines[i + 1:]: + intersection = line_intersection(line_1, line_2) + if intersection is not None: # If lines intersect, add the point + intersections.append(intersection) + return intersections + +def sample_lines(lines, size): + if size > len(lines): + size = len(lines) + return random.sample(lines, size) + +if __name__ == "__main__": + # Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing + # filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file + """For the image, Use Canny+Hough to detect edges, use RANSAC to identify the vanishing points + """ + input_folder = "/root/data2/joonsu0109/dataset/SemanticKITTI/dataset/sequences/08/image_2" + output_folder = "/root/data2/joonsu0109/project/naive_vp/outputs_ransac" + os.makedirs(output_folder, exist_ok=True) + file_list = os.listdir(input_folder) + detected_list = os.listdir(output_folder) + for filename in tqdm(file_list): + # Read images from folder + if filename not in detected_list: + try: + print("Processing image: ",filename) + file_path = os.path.join(input_folder, filename) + save_paths = os.path.join(output_folder, filename) + image = cv2.imread(file_path) + # Task1: Detect lines using Canny + Hough + blur_image, edge_image, lines = detect_lines(image) + print("Number of lines detected: ",len(lines)) + # Show lines on the original images + # show_lines(image, lines) + + ransac_iterations,ransac_threshold,ransac_ratio = 50,10,0.93 + vanishing_point = RANSAC(lines, ransac_iterations, ransac_threshold, ransac_ratio) + + show_point(image, vanishing_point, save_paths) + except: + print("Error processing image: ",filename) + continue \ No newline at end of file