# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth' # noqa # TODO: delete custom_imports after mmpretrain supports auto import # please install mmpretrain >= 1.0.0rc7 # import mmpretrain.models to trigger register_module in mmpretrain custom_imports = dict( imports=['mmpretrain.models'], allow_failed_imports=False) data_preprocessor = dict( type='SegDataPreProcessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True, pad_val=0, seg_pad_val=255) model = dict( type='EncoderDecoder', data_preprocessor=data_preprocessor, backbone=dict( type='mmpretrain.PoolFormer', arch='s12', init_cfg=dict( type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.'), in_patch_size=7, in_stride=4, in_pad=2, down_patch_size=3, down_stride=2, down_pad=1, drop_rate=0., drop_path_rate=0., out_indices=(0, 2, 4, 6), frozen_stages=0, ), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=4), decode_head=dict( type='FPNHead', in_channels=[256, 256, 256, 256], in_index=[0, 1, 2, 3], feature_strides=[4, 8, 16, 32], channels=128, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))