Spaces:
Sleeping
Sleeping
# Copyright (c) OpenMMLab. All rights reserved. | |
import argparse | |
import os | |
import os.path as osp | |
import mmengine | |
from mmengine.config import Config, DictAction | |
from mmengine.hooks import Hook | |
from mmengine.runner import Runner | |
def parse_args(): | |
parser = argparse.ArgumentParser( | |
description='MMPose test (and eval) model') | |
parser.add_argument('config', help='test config file path') | |
parser.add_argument('checkpoint', help='checkpoint file') | |
parser.add_argument( | |
'--work-dir', help='the directory to save evaluation results') | |
parser.add_argument('--out', help='the file to save metric results.') | |
parser.add_argument( | |
'--dump', | |
type=str, | |
help='dump predictions to a pickle file for offline evaluation') | |
parser.add_argument( | |
'--cfg-options', | |
nargs='+', | |
action=DictAction, | |
default={}, | |
help='override some settings in the used config, the key-value pair ' | |
'in xxx=yyy format will be merged into config file. For example, ' | |
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") | |
parser.add_argument( | |
'--show-dir', | |
help='directory where the visualization images will be saved.') | |
parser.add_argument( | |
'--show', | |
action='store_true', | |
help='whether to display the prediction results in a window.') | |
parser.add_argument( | |
'--interval', | |
type=int, | |
default=1, | |
help='visualize per interval samples.') | |
parser.add_argument( | |
'--wait-time', | |
type=float, | |
default=1, | |
help='display time of every window. (second)') | |
parser.add_argument( | |
'--launcher', | |
choices=['none', 'pytorch', 'slurm', 'mpi'], | |
default='none', | |
help='job launcher') | |
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch` | |
# will pass the `--local-rank` parameter to `tools/test.py` instead | |
# of `--local_rank`. | |
parser.add_argument('--local_rank', '--local-rank', type=int, default=0) | |
parser.add_argument( | |
'--badcase', | |
action='store_true', | |
help='whether analyze badcase in test') | |
args = parser.parse_args() | |
if 'LOCAL_RANK' not in os.environ: | |
os.environ['LOCAL_RANK'] = str(args.local_rank) | |
return args | |
def merge_args(cfg, args): | |
"""Merge CLI arguments to config.""" | |
cfg.launcher = args.launcher | |
cfg.load_from = args.checkpoint | |
# -------------------- work directory -------------------- | |
# work_dir is determined in this priority: CLI > segment in file > filename | |
if args.work_dir is not None: | |
# update configs according to CLI args if args.work_dir is not None | |
cfg.work_dir = args.work_dir | |
elif cfg.get('work_dir', None) is None: | |
# use config filename as default work_dir if cfg.work_dir is None | |
cfg.work_dir = osp.join('./work_dirs', | |
osp.splitext(osp.basename(args.config))[0]) | |
# -------------------- visualization -------------------- | |
if (args.show and not args.badcase) or (args.show_dir is not None): | |
assert 'visualization' in cfg.default_hooks, \ | |
'PoseVisualizationHook is not set in the ' \ | |
'`default_hooks` field of config. Please set ' \ | |
'`visualization=dict(type="PoseVisualizationHook")`' | |
cfg.default_hooks.visualization.enable = True | |
cfg.default_hooks.visualization.show = False \ | |
if args.badcase else args.show | |
if args.show: | |
cfg.default_hooks.visualization.wait_time = args.wait_time | |
cfg.default_hooks.visualization.out_dir = args.show_dir | |
cfg.default_hooks.visualization.interval = args.interval | |
# -------------------- badcase analyze -------------------- | |
if args.badcase: | |
assert 'badcase' in cfg.default_hooks, \ | |
'BadcaseAnalyzeHook is not set in the ' \ | |
'`default_hooks` field of config. Please set ' \ | |
'`badcase=dict(type="BadcaseAnalyzeHook")`' | |
cfg.default_hooks.badcase.enable = True | |
cfg.default_hooks.badcase.show = args.show | |
if args.show: | |
cfg.default_hooks.badcase.wait_time = args.wait_time | |
cfg.default_hooks.badcase.interval = args.interval | |
metric_type = cfg.default_hooks.badcase.get('metric_type', 'loss') | |
if metric_type not in ['loss', 'accuracy']: | |
raise ValueError('Only support badcase metric type' | |
"in ['loss', 'accuracy']") | |
if metric_type == 'loss': | |
if not cfg.default_hooks.badcase.get('metric'): | |
cfg.default_hooks.badcase.metric = cfg.model.head.loss | |
else: | |
if not cfg.default_hooks.badcase.get('metric'): | |
cfg.default_hooks.badcase.metric = cfg.test_evaluator | |
# -------------------- Dump predictions -------------------- | |
if args.dump is not None: | |
assert args.dump.endswith(('.pkl', '.pickle')), \ | |
'The dump file must be a pkl file.' | |
dump_metric = dict(type='DumpResults', out_file_path=args.dump) | |
if isinstance(cfg.test_evaluator, (list, tuple)): | |
cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] | |
else: | |
cfg.test_evaluator = [cfg.test_evaluator, dump_metric] | |
# -------------------- Other arguments -------------------- | |
if args.cfg_options is not None: | |
cfg.merge_from_dict(args.cfg_options) | |
return cfg | |
def main(): | |
args = parse_args() | |
# load config | |
cfg = Config.fromfile(args.config) | |
cfg = merge_args(cfg, args) | |
# build the runner from config | |
runner = Runner.from_cfg(cfg) | |
if args.out: | |
class SaveMetricHook(Hook): | |
def after_test_epoch(self, _, metrics=None): | |
if metrics is not None: | |
mmengine.dump(metrics, args.out) | |
runner.register_hook(SaveMetricHook(), 'LOWEST') | |
# start testing | |
runner.test() | |
if __name__ == '__main__': | |
main() | |