-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathpetr_swin-l-p4-w7-224-22kto1k_16x1_100e_crowdpose.py
151 lines (151 loc) · 5.87 KB
/
petr_swin-l-p4-w7-224-22kto1k_16x1_100e_crowdpose.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
_base_ = [
'../_base_/datasets/crowdpose_keypoint.py',
'../_base_/default_runtime.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth' # noqa
model = dict(
type='opera.PETR',
backbone=dict(
type='mmdet.SwinTransformer',
embed_dims=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(
type='mmdet.ChannelMapper',
in_channels=[384, 768, 1536],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
bbox_head=dict(
type='opera.PETRHead',
num_query=300,
num_keypoints=14,
num_classes=1, # only person
in_channels=2048,
sync_cls_avg_factor=True,
with_kpt_refine=True,
as_two_stage=True,
transformer=dict(
type='opera.PETRTransformer',
num_keypoints=14,
encoder=dict(
type='mmcv.DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='mmcv.BaseTransformerLayer',
attn_cfgs=dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='opera.PetrTransformerDecoder',
num_layers=6,
num_keypoints=14,
return_intermediate=True,
transformerlayers=dict(
type='mmcv.DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='mmcv.MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='opera.MultiScaleDeformablePoseAttention',
embed_dims=256,
num_points=14)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm'))),
hm_encoder=dict(
type='mmcv.DetrTransformerEncoder',
num_layers=1,
transformerlayers=dict(
type='mmcv.BaseTransformerLayer',
attn_cfgs=dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256,
num_levels=1),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
refine_decoder=dict(
type='mmcv.DeformableDetrTransformerDecoder',
num_layers=3,
return_intermediate=True,
transformerlayers=dict(
type='mmcv.DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='mmcv.MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='mmcv.MultiScaleDeformableAttention',
embed_dims=256,
im2col_step=128)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))),
positional_encoding=dict(
type='mmcv.SinePositionalEncoding',
num_feats=128,
normalize=True,
offset=-0.5),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
loss_kpt=dict(type='mmdet.L1Loss', loss_weight=70.0),
loss_kpt_rpn=dict(type='mmdet.L1Loss', loss_weight=70.0),
loss_oks=dict(type='opera.OKSLoss', loss_weight=2.0, num_keypoints=14),
loss_hm=dict(type='opera.CenterFocalLoss', loss_weight=4.0),
loss_kpt_refine=dict(type='mmdet.L1Loss', loss_weight=80.0),
loss_oks_refine=dict(type='opera.OKSLoss', loss_weight=3.0,
num_keypoints=14)),
train_cfg=dict(
assigner=dict(
type='opera.PoseHungarianAssigner',
cls_cost=dict(type='mmdet.FocalLossCost', weight=2.0),
kpt_cost=dict(type='opera.KptL1Cost', weight=70.0),
oks_cost=dict(type='opera.OksCost', weight=7.0,
num_keypoints=14))),
test_cfg=dict(max_per_img=100)) # set 'max_per_img=20' for time counting
# optimizer
optimizer = dict(
type='AdamW',
lr=1e-4,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
'sampling_offsets': dict(lr_mult=0.1),
'reference_points': dict(lr_mult=0.1)
}))
optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[80])
runner = dict(type='EpochBasedRunner', max_epochs=100)
checkpoint_config = dict(interval=1, max_keep_ckpts=20)