From 356ac8d161742538660de465e26734e85df21c15 Mon Sep 17 00:00:00 2001 From: F-jie Date: Sat, 12 Feb 2022 12:39:29 +0800 Subject: [PATCH] =?UTF-8?q?FEAT=EF=BC=9A=E6=96=87=E4=BB=B6=E6=95=B4?= =?UTF-8?q?=E7=90=86&Transformer?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 3 +- ARI/ARIAnnotation.py | 2 +- ARI/ARICOCO.py | 2 +- TAU/TAUUtils.py => ARI/ARIUtils.py | 0 ARI/Linemod.py | 2 +- .../DETR/models => TAU/model}/transformer.py | 567 +++++++++--------- third-party/DETR/.gitignore | 17 - third-party/DETR/Dockerfile | 13 - third-party/DETR/LICENSE | 201 ------- third-party/DETR/README.md | 263 -------- third-party/DETR/d2/README.md | 39 -- .../d2/configs/detr_256_6_6_torchvision.yaml | 45 -- .../detr_segm_256_6_6_torchvision.yaml | 46 -- third-party/DETR/d2/converter.py | 69 --- third-party/DETR/d2/detr/__init__.py | 4 - third-party/DETR/d2/detr/config.py | 34 -- third-party/DETR/d2/detr/dataset_mapper.py | 122 ---- third-party/DETR/d2/detr/detr.py | 261 -------- third-party/DETR/d2/train_net.py | 145 ----- third-party/DETR/datasets/__init__.py | 25 - third-party/DETR/datasets/coco.py | 158 ----- third-party/DETR/datasets/coco_eval.py | 257 -------- third-party/DETR/datasets/coco_panoptic.py | 99 --- third-party/DETR/datasets/panoptic_eval.py | 44 -- third-party/DETR/datasets/transforms.py | 276 --------- third-party/DETR/engine.py | 151 ----- third-party/DETR/hubconf.py | 168 ------ third-party/DETR/main.py | 248 -------- third-party/DETR/models/__init__.py | 6 - third-party/DETR/models/backbone.py | 119 ---- third-party/DETR/models/detr.py | 359 ----------- third-party/DETR/models/matcher.py | 86 --- third-party/DETR/models/position_encoding.py | 89 --- third-party/DETR/models/segmentation.py | 363 ----------- third-party/DETR/requirements.txt | 9 - third-party/DETR/run_with_submitit.py | 111 ---- third-party/DETR/test_all.py | 209 ------- third-party/DETR/tox.ini | 3 - third-party/DETR/util/__init__.py | 1 - third-party/DETR/util/box_ops.py | 88 --- third-party/DETR/util/misc.py | 468 --------------- third-party/DETR/util/plot_utils.py | 107 ---- 42 files changed, 275 insertions(+), 5004 deletions(-) rename TAU/TAUUtils.py => ARI/ARIUtils.py (100%) rename {third-party/DETR/models => TAU/model}/transformer.py (54%) delete mode 100644 third-party/DETR/.gitignore delete mode 100644 third-party/DETR/Dockerfile delete mode 100644 third-party/DETR/LICENSE delete mode 100644 third-party/DETR/README.md delete mode 100644 third-party/DETR/d2/README.md delete mode 100644 third-party/DETR/d2/configs/detr_256_6_6_torchvision.yaml delete mode 100644 third-party/DETR/d2/configs/detr_segm_256_6_6_torchvision.yaml delete mode 100644 third-party/DETR/d2/converter.py delete mode 100644 third-party/DETR/d2/detr/__init__.py delete mode 100644 third-party/DETR/d2/detr/config.py delete mode 100644 third-party/DETR/d2/detr/dataset_mapper.py delete mode 100644 third-party/DETR/d2/detr/detr.py delete mode 100644 third-party/DETR/d2/train_net.py delete mode 100644 third-party/DETR/datasets/__init__.py delete mode 100644 third-party/DETR/datasets/coco.py delete mode 100644 third-party/DETR/datasets/coco_eval.py delete mode 100644 third-party/DETR/datasets/coco_panoptic.py delete mode 100644 third-party/DETR/datasets/panoptic_eval.py delete mode 100644 third-party/DETR/datasets/transforms.py delete mode 100644 third-party/DETR/engine.py delete mode 100644 third-party/DETR/hubconf.py delete mode 100644 third-party/DETR/main.py delete mode 100644 third-party/DETR/models/__init__.py delete mode 100644 third-party/DETR/models/backbone.py delete mode 100644 third-party/DETR/models/detr.py delete mode 100644 third-party/DETR/models/matcher.py delete mode 100644 third-party/DETR/models/position_encoding.py delete mode 100644 third-party/DETR/models/segmentation.py delete mode 100644 third-party/DETR/requirements.txt delete mode 100644 third-party/DETR/run_with_submitit.py delete mode 100644 third-party/DETR/test_all.py delete mode 100644 third-party/DETR/tox.ini delete mode 100644 third-party/DETR/util/__init__.py delete mode 100644 third-party/DETR/util/box_ops.py delete mode 100644 third-party/DETR/util/misc.py delete mode 100644 third-party/DETR/util/plot_utils.py diff --git a/.gitignore b/.gitignore index 47fa832..544ca76 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ data/ -*.pyc \ No newline at end of file +*.pyc +third-party/ \ No newline at end of file diff --git a/ARI/ARIAnnotation.py b/ARI/ARIAnnotation.py index 4428041..988060c 100644 --- a/ARI/ARIAnnotation.py +++ b/ARI/ARIAnnotation.py @@ -1,5 +1,5 @@ import cv2 -from TAU.TAUUtils import draw2DBBOX, draw3DBBOX +from ARI.ARIUtils import draw2DBBOX, draw3DBBOX class ARIAnnotation2D(object): diff --git a/ARI/ARICOCO.py b/ARI/ARICOCO.py index acda13e..6bb1d90 100644 --- a/ARI/ARICOCO.py +++ b/ARI/ARICOCO.py @@ -2,7 +2,7 @@ import cv2 import json from ARI.ARIAnnotation import ARIDataset -from TAU.TAUUtils import draw2DBBOX +from ARI.ARIUtils import draw2DBBOX class ARICOCO(object): diff --git a/TAU/TAUUtils.py b/ARI/ARIUtils.py similarity index 100% rename from TAU/TAUUtils.py rename to ARI/ARIUtils.py diff --git a/ARI/Linemod.py b/ARI/Linemod.py index 2655440..91f2f5c 100644 --- a/ARI/Linemod.py +++ b/ARI/Linemod.py @@ -1,7 +1,7 @@ import os from ARI.ARIAnnotation import ARIAnnotation2D, ARIDataset, ARIImage -from TAU.TAUUtils import * +from ARI.ARIUtils import * class Linemod(ARIDataset): intrinsic = np.array([[572.4114, 0., 325.2611], diff --git a/third-party/DETR/models/transformer.py b/TAU/model/transformer.py similarity index 54% rename from third-party/DETR/models/transformer.py rename to TAU/model/transformer.py index dcd5367..98a2185 100644 --- a/third-party/DETR/models/transformer.py +++ b/TAU/model/transformer.py @@ -1,297 +1,270 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Transformer class. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers -""" -import copy -from typing import Optional, List - -import torch -import torch.nn.functional as F -from torch import nn, Tensor - - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False, - return_intermediate_dec=False): - super().__init__() - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, - return_intermediate=return_intermediate_dec) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, mask, query_embed, pos_embed): - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - mask = mask.flatten(1) - - tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, - pos=pos_embed, query_pos=query_embed) - return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) - - -class TransformerEncoder(nn.Module): - - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - output = tgt - - intermediate = [] - - for layer in self.layers: - output = layer(output, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - if self.return_intermediate: - intermediate.append(self.norm(output)) - - if self.norm is not None: - output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) - - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -class TransformerDecoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - return self.forward_post(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - ) - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") +import copy +from typing import Optional +from torch import Tensor, nn +import torch +import torch.nn.functional as F + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + # Multi-head self-attention + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # fead-forward + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + # Layer-norm + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + # Dropout + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + # activation + self.activation = _get_activation_fn(activation) + # other + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_pre(self, src, src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + # self-attention + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1[src2] + # feed-forward + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward_post(self, src, src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + # self-attention + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1[src2] + # feed-forward + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward(self, src, src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + # multihead self-attention + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # feed forward + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): + + tgt2 = self.norm1(tgt) + q, k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + # multihead self-attention + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + # feed forward + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): + # multihead self-attention + q, k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + # multihead self-attention + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + # feed forward + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward(self, tgt, memory, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, + memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, + memory_key_padding_mask, pos, query_pos) + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask= src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, + return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, query_pos: Optional[Tensor] = None): + output = tgt + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class Transformer(nn.Module): + + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False): + super().__init__() + + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, normalize_before) + decoder_norm = nn.LayerNorm(d_model) + + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self.d_model = d_model + self.nhead = nhead + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + mask = mask.flatten(1) + + tgt = torch.zeros_like(query_embed) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + +def _get_activation_fn(activation: str): + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == 'glu': + return F.glu + + raise RuntimeError(F"activation should be relu/gelu/glu, not {activation}.") diff --git a/third-party/DETR/.gitignore b/third-party/DETR/.gitignore deleted file mode 100644 index 7d776a6..0000000 --- a/third-party/DETR/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -.nfs* -*.ipynb -*.pyc -.dumbo.json -.DS_Store -.*.swp -*.pth -**/__pycache__/** -.ipynb_checkpoints/ -datasets/data/ -experiment-* -*.tmp -*.pkl -**/.mypy_cache/* -.mypy_cache/* -not_tracked_dir/ -.vscode diff --git a/third-party/DETR/Dockerfile b/third-party/DETR/Dockerfile deleted file mode 100644 index 3e6da22..0000000 --- a/third-party/DETR/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get update -qq && \ - apt-get install -y git vim libgtk2.0-dev && \ - rm -rf /var/cache/apk/* - -RUN pip --no-cache-dir install Cython - -COPY requirements.txt /workspace - -RUN pip --no-cache-dir install -r /workspace/requirements.txt diff --git a/third-party/DETR/LICENSE b/third-party/DETR/LICENSE deleted file mode 100644 index b1395e9..0000000 --- a/third-party/DETR/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2020 - present, Facebook, Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/third-party/DETR/README.md b/third-party/DETR/README.md deleted file mode 100644 index cd8ac02..0000000 --- a/third-party/DETR/README.md +++ /dev/null @@ -1,263 +0,0 @@ -**DEā«¶TR**: End-to-End Object Detection with Transformers -======== -PyTorch training code and pretrained models for **DETR** (**DE**tection **TR**ansformer). -We replace the full complex hand-crafted object detection pipeline with a Transformer, and match Faster R-CNN with a ResNet-50, obtaining **42 AP** on COCO using half the computation power (FLOPs) and the same number of parameters. Inference in 50 lines of PyTorch. - -![DETR](.github/DETR.png) - -**What it is**. Unlike traditional computer vision techniques, DETR approaches object detection as a direct set prediction problem. It consists of a set-based global loss, which forces unique predictions via bipartite matching, and a Transformer encoder-decoder architecture. -Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. Due to this parallel nature, DETR is very fast and efficient. - -**About the code**. We believe that object detection should not be more difficult than classification, -and should not require complex libraries for training and inference. -DETR is very simple to implement and experiment with, and we provide a -[standalone Colab Notebook](https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/detr_demo.ipynb) -showing how to do inference with DETR in only a few lines of PyTorch code. -Training code follows this idea - it is not a library, -but simply a [main.py](main.py) importing model and criterion -definitions with standard training loops. - -Additionnally, we provide a Detectron2 wrapper in the d2/ folder. See the readme there for more information. - -For details see [End-to-End Object Detection with Transformers](https://ai.facebook.com/research/publications/end-to-end-object-detection-with-transformers) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. - -# Model Zoo -We provide baseline DETR and DETR-DC5 models, and plan to include more in future. -AP is computed on COCO 2017 val5k, and inference time is over the first 100 val5k COCO images, -with torchscript transformer. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
namebackbonescheduleinf_timebox APurlsize
0DETRR505000.03642.0model | logs159Mb
1DETR-DC5R505000.08343.3model | logs159Mb
2DETRR1015000.05043.5model | logs232Mb
3DETR-DC5R1015000.09744.9model | logs232Mb
- -COCO val5k evaluation results can be found in this [gist](https://gist.github.com/szagoruyko/9c9ebb8455610958f7deaa27845d7918). - -The models are also available via torch hub, -to load DETR R50 with pretrained weights simply do: -```python -model = torch.hub.load('facebookresearch/detr:main', 'detr_resnet50', pretrained=True) -``` - - -COCO panoptic val5k models: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
namebackbonebox APsegm APPQurlsize
0DETRR5038.831.143.4download165Mb
1DETR-DC5R5040.231.944.6download165Mb
2DETRR10140.13345.1download237Mb
- -Checkout our [panoptic colab](https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/DETR_panoptic.ipynb) -to see how to use and visualize DETR's panoptic segmentation prediction. - -# Notebooks - -We provide a few notebooks in colab to help you get a grasp on DETR: -* [DETR's hands on Colab Notebook](https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/detr_attention.ipynb): Shows how to load a model from hub, generate predictions, then visualize the attention of the model (similar to the figures of the paper) -* [Standalone Colab Notebook](https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/detr_demo.ipynb): In this notebook, we demonstrate how to implement a simplified version of DETR from the grounds up in 50 lines of Python, then visualize the predictions. It is a good starting point if you want to gain better understanding the architecture and poke around before diving in the codebase. -* [Panoptic Colab Notebook](https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/DETR_panoptic.ipynb): Demonstrates how to use DETR for panoptic segmentation and plot the predictions. - - -# Usage - Object detection -There are no extra compiled components in DETR and package dependencies are minimal, -so the code is very simple to use. We provide instructions how to install dependencies via conda. -First, clone the repository locally: -``` -git clone https://github.com/facebookresearch/detr.git -``` -Then, install PyTorch 1.5+ and torchvision 0.6+: -``` -conda install -c pytorch pytorch torchvision -``` -Install pycocotools (for evaluation on COCO) and scipy (for training): -``` -conda install cython scipy -pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' -``` -That's it, should be good to train and evaluate detection models. - -(optional) to work with panoptic install panopticapi: -``` -pip install git+https://github.com/cocodataset/panopticapi.git -``` - -## Data preparation - -Download and extract COCO 2017 train and val images with annotations from -[http://cocodataset.org](http://cocodataset.org/#download). -We expect the directory structure to be the following: -``` -path/to/coco/ - annotations/ # annotation json files - train2017/ # train images - val2017/ # val images -``` - -## Training -To train baseline DETR on a single node with 8 gpus for 300 epochs run: -``` -python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --coco_path /path/to/coco -``` -A single epoch takes 28 minutes, so 300 epoch training -takes around 6 days on a single machine with 8 V100 cards. -To ease reproduction of our results we provide -[results and training logs](https://gist.github.com/szagoruyko/b4c3b2c3627294fc369b899987385a3f) -for 150 epoch schedule (3 days on a single machine), achieving 39.5/60.3 AP/AP50. - -We train DETR with AdamW setting learning rate in the transformer to 1e-4 and 1e-5 in the backbone. -Horizontal flips, scales and crops are used for augmentation. -Images are rescaled to have min size 800 and max size 1333. -The transformer is trained with dropout of 0.1, and the whole model is trained with grad clip of 0.1. - - -## Evaluation -To evaluate DETR R50 on COCO val5k with a single GPU run: -``` -python main.py --batch_size 2 --no_aux_loss --eval --resume https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --coco_path /path/to/coco -``` -We provide results for all DETR detection models in this -[gist](https://gist.github.com/szagoruyko/9c9ebb8455610958f7deaa27845d7918). -Note that numbers vary depending on batch size (number of images) per GPU. -Non-DC5 models were trained with batch size 2, and DC5 with 1, -so DC5 models show a significant drop in AP if evaluated with more -than 1 image per GPU. - -## Multinode training -Distributed training is available via Slurm and [submitit](https://github.com/facebookincubator/submitit): -``` -pip install submitit -``` -Train baseline DETR-6-6 model on 4 nodes for 300 epochs: -``` -python run_with_submitit.py --timeout 3000 --coco_path /path/to/coco -``` - -# Usage - Segmentation - -We show that it is relatively straightforward to extend DETR to predict segmentation masks. We mainly demonstrate strong panoptic segmentation results. - -## Data preparation - -For panoptic segmentation, you need the panoptic annotations additionally to the coco dataset (see above for the coco dataset). You need to download and extract the [annotations](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip). -We expect the directory structure to be the following: -``` -path/to/coco_panoptic/ - annotations/ # annotation json files - panoptic_train2017/ # train panoptic annotations - panoptic_val2017/ # val panoptic annotations -``` - -## Training - -We recommend training segmentation in two stages: first train DETR to detect all the boxes, and then train the segmentation head. -For panoptic segmentation, DETR must learn to detect boxes for both stuff and things classes. You can train it on a single node with 8 gpus for 300 epochs with: -``` -python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --coco_path /path/to/coco --coco_panoptic_path /path/to/coco_panoptic --dataset_file coco_panoptic --output_dir /output/path/box_model -``` -For instance segmentation, you can simply train a normal box model (or used a pre-trained one we provide). - -Once you have a box model checkpoint, you need to freeze it, and train the segmentation head in isolation. -For panoptic segmentation you can train on a single node with 8 gpus for 25 epochs: -``` -python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --masks --epochs 25 --lr_drop 15 --coco_path /path/to/coco --coco_panoptic_path /path/to/coco_panoptic --dataset_file coco_panoptic --frozen_weights /output/path/box_model/checkpoint.pth --output_dir /output/path/segm_model -``` -For instance segmentation only, simply remove the `dataset_file` and `coco_panoptic_path` arguments from the above command line. - -# License -DETR is released under the Apache 2.0 license. Please see the [LICENSE](LICENSE) file for more information. - -# Contributing -We actively welcome your pull requests! Please see [CONTRIBUTING.md](.github/CONTRIBUTING.md) and [CODE_OF_CONDUCT.md](.github/CODE_OF_CONDUCT.md) for more info. diff --git a/third-party/DETR/d2/README.md b/third-party/DETR/d2/README.md deleted file mode 100644 index 7f1d753..0000000 --- a/third-party/DETR/d2/README.md +++ /dev/null @@ -1,39 +0,0 @@ -Detectron2 wrapper for DETR -======= - -We provide a Detectron2 wrapper for DETR, thus providing a way to better integrate it in the existing detection ecosystem. It can be used for example to easily leverage datasets or backbones provided in Detectron2. - -This wrapper currently supports only box detection, and is intended to be as close as possible to the original implementation, and we checked that it indeed match the results. Some notable facts and caveats: -- The data augmentation matches DETR's original data augmentation. This required patching the RandomCrop augmentation from Detectron2, so you'll need a version from the master branch from June 24th 2020 or more recent. -- To match DETR's original backbone initialization, we use the weights of a ResNet50 trained on imagenet using torchvision. This network uses a different pixel mean and std than most of the backbones available in Detectron2 by default, so extra care must be taken when switching to another one. Note that no other torchvision models are available in Detectron2 as of now, though it may change in the future. -- The gradient clipping mode is "full_model", which is not the default in Detectron2. - -# Usage - -To install Detectron2, please follow the [official installation instructions](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). - -## Evaluating a model - -For convenience, we provide a conversion script to convert models trained by the main DETR training loop into the format of this wrapper. To download and convert the main Resnet50 model, simply do: - -``` -python converter.py --source_model https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --output_model converted_model.pth -``` - -You can then evaluate it using: -``` -python train_net.py --eval-only --config configs/detr_256_6_6_torchvision.yaml MODEL.WEIGHTS "converted_model.pth" -``` - - -## Training - -To train DETR on a single node with 8 gpus, simply use: -``` -python train_net.py --config configs/detr_256_6_6_torchvision.yaml --num-gpus 8 -``` - -To fine-tune DETR for instance segmentation on a single node with 8 gpus, simply use: -``` -python train_net.py --config configs/detr_segm_256_6_6_torchvision.yaml --num-gpus 8 MODEL.DETR.FROZEN_WEIGHTS -``` diff --git a/third-party/DETR/d2/configs/detr_256_6_6_torchvision.yaml b/third-party/DETR/d2/configs/detr_256_6_6_torchvision.yaml deleted file mode 100644 index 25d6418..0000000 --- a/third-party/DETR/d2/configs/detr_256_6_6_torchvision.yaml +++ /dev/null @@ -1,45 +0,0 @@ -MODEL: - META_ARCHITECTURE: "Detr" - WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" - PIXEL_MEAN: [123.675, 116.280, 103.530] - PIXEL_STD: [58.395, 57.120, 57.375] - MASK_ON: False - RESNETS: - DEPTH: 50 - STRIDE_IN_1X1: False - OUT_FEATURES: ["res2", "res3", "res4", "res5"] - DETR: - GIOU_WEIGHT: 2.0 - L1_WEIGHT: 5.0 - NUM_OBJECT_QUERIES: 100 -DATASETS: - TRAIN: ("coco_2017_train",) - TEST: ("coco_2017_val",) -SOLVER: - IMS_PER_BATCH: 64 - BASE_LR: 0.0001 - STEPS: (369600,) - MAX_ITER: 554400 - WARMUP_FACTOR: 1.0 - WARMUP_ITERS: 10 - WEIGHT_DECAY: 0.0001 - OPTIMIZER: "ADAMW" - BACKBONE_MULTIPLIER: 0.1 - CLIP_GRADIENTS: - ENABLED: True - CLIP_TYPE: "full_model" - CLIP_VALUE: 0.01 - NORM_TYPE: 2.0 -INPUT: - MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) - CROP: - ENABLED: True - TYPE: "absolute_range" - SIZE: (384, 600) - FORMAT: "RGB" -TEST: - EVAL_PERIOD: 4000 -DATALOADER: - FILTER_EMPTY_ANNOTATIONS: False - NUM_WORKERS: 4 -VERSION: 2 diff --git a/third-party/DETR/d2/configs/detr_segm_256_6_6_torchvision.yaml b/third-party/DETR/d2/configs/detr_segm_256_6_6_torchvision.yaml deleted file mode 100644 index ade490e..0000000 --- a/third-party/DETR/d2/configs/detr_segm_256_6_6_torchvision.yaml +++ /dev/null @@ -1,46 +0,0 @@ -MODEL: - META_ARCHITECTURE: "Detr" -# WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" - PIXEL_MEAN: [123.675, 116.280, 103.530] - PIXEL_STD: [58.395, 57.120, 57.375] - MASK_ON: True - RESNETS: - DEPTH: 50 - STRIDE_IN_1X1: False - OUT_FEATURES: ["res2", "res3", "res4", "res5"] - DETR: - GIOU_WEIGHT: 2.0 - L1_WEIGHT: 5.0 - NUM_OBJECT_QUERIES: 100 - FROZEN_WEIGHTS: '' -DATASETS: - TRAIN: ("coco_2017_train",) - TEST: ("coco_2017_val",) -SOLVER: - IMS_PER_BATCH: 64 - BASE_LR: 0.0001 - STEPS: (55440,) - MAX_ITER: 92400 - WARMUP_FACTOR: 1.0 - WARMUP_ITERS: 10 - WEIGHT_DECAY: 0.0001 - OPTIMIZER: "ADAMW" - BACKBONE_MULTIPLIER: 0.1 - CLIP_GRADIENTS: - ENABLED: True - CLIP_TYPE: "full_model" - CLIP_VALUE: 0.01 - NORM_TYPE: 2.0 -INPUT: - MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) - CROP: - ENABLED: True - TYPE: "absolute_range" - SIZE: (384, 600) - FORMAT: "RGB" -TEST: - EVAL_PERIOD: 4000 -DATALOADER: - FILTER_EMPTY_ANNOTATIONS: False - NUM_WORKERS: 4 -VERSION: 2 diff --git a/third-party/DETR/d2/converter.py b/third-party/DETR/d2/converter.py deleted file mode 100644 index 6fa5ff4..0000000 --- a/third-party/DETR/d2/converter.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version. -""" -import json -import argparse - -import numpy as np -import torch - - -def parse_args(): - parser = argparse.ArgumentParser("D2 model converter") - - parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert") - parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model") - return parser.parse_args() - - -def main(): - args = parse_args() - - # D2 expects contiguous classes, so we need to remap the 92 classes from DETR - # fmt: off - coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91] - # fmt: on - - coco_idx = np.array(coco_idx) - - if args.source_model.startswith("https"): - checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True) - else: - checkpoint = torch.load(args.source_model, map_location="cpu") - model_to_convert = checkpoint["model"] - - model_converted = {} - for k in model_to_convert.keys(): - old_k = k - if "backbone" in k: - k = k.replace("backbone.0.body.", "") - if "layer" not in k: - k = "stem." + k - for t in [1, 2, 3, 4]: - k = k.replace(f"layer{t}", f"res{t + 1}") - for t in [1, 2, 3]: - k = k.replace(f"bn{t}", f"conv{t}.norm") - k = k.replace("downsample.0", "shortcut") - k = k.replace("downsample.1", "shortcut.norm") - k = "backbone.0.backbone." + k - k = "detr." + k - print(old_k, "->", k) - if "class_embed" in old_k: - v = model_to_convert[old_k].detach() - if v.shape[0] == 92: - shape_old = v.shape - model_converted[k] = v[coco_idx] - print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape)) - continue - model_converted[k] = model_to_convert[old_k].detach() - - model_to_save = {"model": model_converted} - torch.save(model_to_save, args.output_model) - - -if __name__ == "__main__": - main() diff --git a/third-party/DETR/d2/detr/__init__.py b/third-party/DETR/d2/detr/__init__.py deleted file mode 100644 index a618f82..0000000 --- a/third-party/DETR/d2/detr/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .config import add_detr_config -from .detr import Detr -from .dataset_mapper import DetrDatasetMapper diff --git a/third-party/DETR/d2/detr/config.py b/third-party/DETR/d2/detr/config.py deleted file mode 100644 index 9ea267d..0000000 --- a/third-party/DETR/d2/detr/config.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from detectron2.config import CfgNode as CN - - -def add_detr_config(cfg): - """ - Add config for DETR. - """ - cfg.MODEL.DETR = CN() - cfg.MODEL.DETR.NUM_CLASSES = 80 - - # For Segmentation - cfg.MODEL.DETR.FROZEN_WEIGHTS = '' - - # LOSS - cfg.MODEL.DETR.GIOU_WEIGHT = 2.0 - cfg.MODEL.DETR.L1_WEIGHT = 5.0 - cfg.MODEL.DETR.DEEP_SUPERVISION = True - cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1 - - # TRANSFORMER - cfg.MODEL.DETR.NHEADS = 8 - cfg.MODEL.DETR.DROPOUT = 0.1 - cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048 - cfg.MODEL.DETR.ENC_LAYERS = 6 - cfg.MODEL.DETR.DEC_LAYERS = 6 - cfg.MODEL.DETR.PRE_NORM = False - - cfg.MODEL.DETR.HIDDEN_DIM = 256 - cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100 - - cfg.SOLVER.OPTIMIZER = "ADAMW" - cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 diff --git a/third-party/DETR/d2/detr/dataset_mapper.py b/third-party/DETR/d2/detr/dataset_mapper.py deleted file mode 100644 index f428a49..0000000 --- a/third-party/DETR/d2/detr/dataset_mapper.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import logging - -import numpy as np -import torch - -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T -from detectron2.data.transforms import TransformGen - -__all__ = ["DetrDatasetMapper"] - - -def build_transform_gen(cfg, is_train): - """ - Create a list of :class:`TransformGen` from config. - Returns: - list[TransformGen] - """ - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN - max_size = cfg.INPUT.MAX_SIZE_TRAIN - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - if sample_style == "range": - assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) - - logger = logging.getLogger(__name__) - tfm_gens = [] - if is_train: - tfm_gens.append(T.RandomFlip()) - tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) - if is_train: - logger.info("TransformGens used in training: " + str(tfm_gens)) - return tfm_gens - - -class DetrDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by DETR. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - def __init__(self, cfg, is_train=True): - if cfg.INPUT.CROP.ENABLED and is_train: - self.crop_gen = [ - T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), - T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), - ] - else: - self.crop_gen = None - - self.mask_on = cfg.MODEL.MASK_ON - self.tfm_gens = build_transform_gen(cfg, is_train) - logging.getLogger(__name__).info( - "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) - ) - - self.img_format = cfg.INPUT.FORMAT - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - if self.crop_gen is None: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - if np.random.rand() > 0.5: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - image, transforms = T.apply_transform_gens( - self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image - ) - - image_shape = image.shape[:2] # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations(obj, transforms, image_shape) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - dataset_dict["instances"] = utils.filter_empty_instances(instances) - return dataset_dict diff --git a/third-party/DETR/d2/detr/detr.py b/third-party/DETR/d2/detr/detr.py deleted file mode 100644 index 95f89df..0000000 --- a/third-party/DETR/d2/detr/detr.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import math -from typing import List - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn.functional as F -from scipy.optimize import linear_sum_assignment -from torch import nn - -from detectron2.layers import ShapeSpec -from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess -from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks -from detectron2.utils.logger import log_first_n -from fvcore.nn import giou_loss, smooth_l1_loss -from models.backbone import Joiner -from models.detr import DETR, SetCriterion -from models.matcher import HungarianMatcher -from models.position_encoding import PositionEmbeddingSine -from models.transformer import Transformer -from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm -from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh -from util.misc import NestedTensor -from datasets.coco import convert_coco_poly_to_mask - -__all__ = ["Detr"] - - -class MaskedBackbone(nn.Module): - """ This is a thin wrapper around D2's backbone to provide padding masking""" - - def __init__(self, cfg): - super().__init__() - self.backbone = build_backbone(cfg) - backbone_shape = self.backbone.output_shape() - self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()] - self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels - - def forward(self, images): - features = self.backbone(images.tensor) - masks = self.mask_out_padding( - [features_per_level.shape for features_per_level in features.values()], - images.image_sizes, - images.tensor.device, - ) - assert len(features) == len(masks) - for i, k in enumerate(features.keys()): - features[k] = NestedTensor(features[k], masks[i]) - return features - - def mask_out_padding(self, feature_shapes, image_sizes, device): - masks = [] - assert len(feature_shapes) == len(self.feature_strides) - for idx, shape in enumerate(feature_shapes): - N, _, H, W = shape - masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device) - for img_idx, (h, w) in enumerate(image_sizes): - masks_per_feature_level[ - img_idx, - : int(np.ceil(float(h) / self.feature_strides[idx])), - : int(np.ceil(float(w) / self.feature_strides[idx])), - ] = 0 - masks.append(masks_per_feature_level) - return masks - - -@META_ARCH_REGISTRY.register() -class Detr(nn.Module): - """ - Implement Detr - """ - - def __init__(self, cfg): - super().__init__() - - self.device = torch.device(cfg.MODEL.DEVICE) - - self.num_classes = cfg.MODEL.DETR.NUM_CLASSES - self.mask_on = cfg.MODEL.MASK_ON - hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM - num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES - # Transformer parameters: - nheads = cfg.MODEL.DETR.NHEADS - dropout = cfg.MODEL.DETR.DROPOUT - dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD - enc_layers = cfg.MODEL.DETR.ENC_LAYERS - dec_layers = cfg.MODEL.DETR.DEC_LAYERS - pre_norm = cfg.MODEL.DETR.PRE_NORM - - # Loss parameters: - giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT - l1_weight = cfg.MODEL.DETR.L1_WEIGHT - deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT - - N_steps = hidden_dim // 2 - d2_backbone = MaskedBackbone(cfg) - backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) - backbone.num_channels = d2_backbone.num_channels - - transformer = Transformer( - d_model=hidden_dim, - dropout=dropout, - nhead=nheads, - dim_feedforward=dim_feedforward, - num_encoder_layers=enc_layers, - num_decoder_layers=dec_layers, - normalize_before=pre_norm, - return_intermediate_dec=deep_supervision, - ) - - self.detr = DETR( - backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision - ) - if self.mask_on: - frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS - if frozen_weights != '': - print("LOAD pre-trained weights") - weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model'] - new_weight = {} - for k, v in weight.items(): - if 'detr.' in k: - new_weight[k.replace('detr.', '')] = v - else: - print(f"Skipping loading weight {k} from frozen model") - del weight - self.detr.load_state_dict(new_weight) - del new_weight - self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != '')) - self.seg_postprocess = PostProcessSegm - - self.detr.to(self.device) - - # building criterion - matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) - weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} - weight_dict["loss_giou"] = giou_weight - if deep_supervision: - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - losses = ["labels", "boxes", "cardinality"] - if self.mask_on: - losses += ["masks"] - self.criterion = SetCriterion( - self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, - ) - self.criterion.to(self.device) - - pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) - pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) - self.normalizer = lambda x: (x - pixel_mean) / pixel_std - self.to(self.device) - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper` . - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - - * image: Tensor, image in (C, H, W) format. - * instances: Instances - - Other information that's included in the original dicts, such as: - - * "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - Returns: - dict[str: Tensor]: - mapping from a named loss to a tensor storing the loss. Used during training only. - """ - images = self.preprocess_image(batched_inputs) - output = self.detr(images) - - if self.training: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - - targets = self.prepare_targets(gt_instances) - loss_dict = self.criterion(output, targets) - weight_dict = self.criterion.weight_dict - for k in loss_dict.keys(): - if k in weight_dict: - loss_dict[k] *= weight_dict[k] - return loss_dict - else: - box_cls = output["pred_logits"] - box_pred = output["pred_boxes"] - mask_pred = output["pred_masks"] if self.mask_on else None - results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes) - processed_results = [] - for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = detector_postprocess(results_per_image, height, width) - processed_results.append({"instances": r}) - return processed_results - - def prepare_targets(self, targets): - new_targets = [] - for targets_per_image in targets: - h, w = targets_per_image.image_size - image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device) - gt_classes = targets_per_image.gt_classes - gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy - gt_boxes = box_xyxy_to_cxcywh(gt_boxes) - new_targets.append({"labels": gt_classes, "boxes": gt_boxes}) - if self.mask_on and hasattr(targets_per_image, 'gt_masks'): - gt_masks = targets_per_image.gt_masks - gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w) - new_targets[-1].update({'masks': gt_masks}) - return new_targets - - def inference(self, box_cls, box_pred, mask_pred, image_sizes): - """ - Arguments: - box_cls (Tensor): tensor of shape (batch_size, num_queries, K). - The tensor predicts the classification probability for each query. - box_pred (Tensor): tensors of shape (batch_size, num_queries, 4). - The tensor predicts 4-vector (x,y,w,h) box - regression values for every queryx - image_sizes (List[torch.Size]): the input image sizes - - Returns: - results (List[Instances]): a list of #images elements. - """ - assert len(box_cls) == len(image_sizes) - results = [] - - # For each box we assign the best class or the second best if the best on is `no_object`. - scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1) - - for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip( - scores, labels, box_pred, image_sizes - )): - result = Instances(image_size) - result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image)) - - result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0]) - if self.mask_on: - mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False) - mask = mask[0].sigmoid() > 0.5 - B, N, H, W = mask_pred.shape - mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32) - result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device) - - result.scores = scores_per_image - result.pred_classes = labels_per_image - results.append(result) - return results - - def preprocess_image(self, batched_inputs): - """ - Normalize, pad and batch the input images. - """ - images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs] - images = ImageList.from_tensors(images) - return images diff --git a/third-party/DETR/d2/train_net.py b/third-party/DETR/d2/train_net.py deleted file mode 100644 index 82f6929..0000000 --- a/third-party/DETR/d2/train_net.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Training Script. - -This script is a simplified version of the training script in detectron2/tools. -""" -import os -import sys -import itertools - -# fmt: off -sys.path.insert(1, os.path.join(sys.path[0], '..')) -# fmt: on - -import time -from typing import Any, Dict, List, Set - -import torch - -import detectron2.utils.comm as comm -from d2.detr import DetrDatasetMapper, add_detr_config -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg -from detectron2.data import MetadataCatalog, build_detection_train_loader -from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch -from detectron2.evaluation import COCOEvaluator, verify_results - -from detectron2.solver.build import maybe_add_gradient_clipping - - -class Trainer(DefaultTrainer): - """ - Extension of the Trainer class adapted to DETR. - """ - - @classmethod - def build_evaluator(cls, cfg, dataset_name, output_folder=None): - """ - Create evaluator(s) for a given dataset. - This uses the special metadata "evaluator_type" associated with each builtin dataset. - For your own dataset, you can simply create an evaluator manually in your - script and do not have to worry about the hacky if-else logic here. - """ - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - return COCOEvaluator(dataset_name, cfg, True, output_folder) - - @classmethod - def build_train_loader(cls, cfg): - if "Detr" == cfg.MODEL.META_ARCHITECTURE: - mapper = DetrDatasetMapper(cfg, True) - else: - mapper = None - return build_detection_train_loader(cfg, mapper=mapper) - - @classmethod - def build_optimizer(cls, cfg, model): - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - for key, value in model.named_parameters(recurse=True): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - lr = cfg.SOLVER.BASE_LR - weight_decay = cfg.SOLVER.WEIGHT_DECAY - if "backbone" in key: - lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER - params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] - - def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class - # detectron2 doesn't have full model gradient clipping now - clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE - enable = ( - cfg.SOLVER.CLIP_GRADIENTS.ENABLED - and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" - and clip_norm_val > 0.0 - ) - - class FullModelGradientClippingOptimizer(optim): - def step(self, closure=None): - all_params = itertools.chain(*[x["params"] for x in self.param_groups]) - torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) - super().step(closure=closure) - - return FullModelGradientClippingOptimizer if enable else optim - - optimizer_type = cfg.SOLVER.OPTIMIZER - if optimizer_type == "SGD": - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( - params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM - ) - elif optimizer_type == "ADAMW": - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( - params, cfg.SOLVER.BASE_LR - ) - else: - raise NotImplementedError(f"no optimizer type {optimizer_type}") - if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": - optimizer = maybe_add_gradient_clipping(cfg, optimizer) - return optimizer - - -def setup(args): - """ - Create configs and perform basic setups. - """ - cfg = get_cfg() - add_detr_config(cfg) - cfg.merge_from_file(args.config_file) - cfg.merge_from_list(args.opts) - cfg.freeze() - default_setup(cfg, args) - return cfg - - -def main(args): - cfg = setup(args) - - if args.eval_only: - model = Trainer.build_model(cfg) - DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume) - res = Trainer.test(cfg, model) - if comm.is_main_process(): - verify_results(cfg, res) - return res - - trainer = Trainer(cfg) - trainer.resume_or_load(resume=args.resume) - return trainer.train() - - -if __name__ == "__main__": - args = default_argument_parser().parse_args() - print("Command Line Args:", args) - launch( - main, - args.num_gpus, - num_machines=args.num_machines, - machine_rank=args.machine_rank, - dist_url=args.dist_url, - args=(args,), - ) diff --git a/third-party/DETR/datasets/__init__.py b/third-party/DETR/datasets/__init__.py deleted file mode 100644 index 571b126..0000000 --- a/third-party/DETR/datasets/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import torch.utils.data -import torchvision - -from .coco import build as build_coco - - -def get_coco_api_from_dataset(dataset): - for _ in range(10): - # if isinstance(dataset, torchvision.datasets.CocoDetection): - # break - if isinstance(dataset, torch.utils.data.Subset): - dataset = dataset.dataset - if isinstance(dataset, torchvision.datasets.CocoDetection): - return dataset.coco - - -def build_dataset(image_set, args): - if args.dataset_file == 'coco': - return build_coco(image_set, args) - if args.dataset_file == 'coco_panoptic': - # to avoid making panopticapi required for coco - from .coco_panoptic import build as build_coco_panoptic - return build_coco_panoptic(image_set, args) - raise ValueError(f'dataset {args.dataset_file} not supported') diff --git a/third-party/DETR/datasets/coco.py b/third-party/DETR/datasets/coco.py deleted file mode 100644 index 93a436b..0000000 --- a/third-party/DETR/datasets/coco.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -COCO dataset which returns image_id for evaluation. - -Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py -""" -from pathlib import Path - -import torch -import torch.utils.data -import torchvision -from pycocotools import mask as coco_mask - -import datasets.transforms as T - - -class CocoDetection(torchvision.datasets.CocoDetection): - def __init__(self, img_folder, ann_file, transforms, return_masks): - super(CocoDetection, self).__init__(img_folder, ann_file) - self._transforms = transforms - self.prepare = ConvertCocoPolysToMask(return_masks) - - def __getitem__(self, idx): - img, target = super(CocoDetection, self).__getitem__(idx) - image_id = self.ids[idx] - target = {'image_id': image_id, 'annotations': target} - img, target = self.prepare(img, target) - if self._transforms is not None: - img, target = self._transforms(img, target) - return img, target - - -def convert_coco_poly_to_mask(segmentations, height, width): - masks = [] - for polygons in segmentations: - rles = coco_mask.frPyObjects(polygons, height, width) - mask = coco_mask.decode(rles) - if len(mask.shape) < 3: - mask = mask[..., None] - mask = torch.as_tensor(mask, dtype=torch.uint8) - mask = mask.any(dim=2) - masks.append(mask) - if masks: - masks = torch.stack(masks, dim=0) - else: - masks = torch.zeros((0, height, width), dtype=torch.uint8) - return masks - - -class ConvertCocoPolysToMask(object): - def __init__(self, return_masks=False): - self.return_masks = return_masks - - def __call__(self, image, target): - w, h = image.size - - image_id = target["image_id"] - image_id = torch.tensor([image_id]) - - anno = target["annotations"] - - anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] - - boxes = [obj["bbox"] for obj in anno] - # guard against no boxes via resizing - boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) - boxes[:, 2:] += boxes[:, :2] - boxes[:, 0::2].clamp_(min=0, max=w) - boxes[:, 1::2].clamp_(min=0, max=h) - - classes = [obj["category_id"] for obj in anno] - classes = torch.tensor(classes, dtype=torch.int64) - - if self.return_masks: - segmentations = [obj["segmentation"] for obj in anno] - masks = convert_coco_poly_to_mask(segmentations, h, w) - - keypoints = None - if anno and "keypoints" in anno[0]: - keypoints = [obj["keypoints"] for obj in anno] - keypoints = torch.as_tensor(keypoints, dtype=torch.float32) - num_keypoints = keypoints.shape[0] - if num_keypoints: - keypoints = keypoints.view(num_keypoints, -1, 3) - - keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) - boxes = boxes[keep] - classes = classes[keep] - if self.return_masks: - masks = masks[keep] - if keypoints is not None: - keypoints = keypoints[keep] - - target = {} - target["boxes"] = boxes - target["labels"] = classes - if self.return_masks: - target["masks"] = masks - target["image_id"] = image_id - if keypoints is not None: - target["keypoints"] = keypoints - - # for conversion to coco api - area = torch.tensor([obj["area"] for obj in anno]) - iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) - target["area"] = area[keep] - target["iscrowd"] = iscrowd[keep] - - target["orig_size"] = torch.as_tensor([int(h), int(w)]) - target["size"] = torch.as_tensor([int(h), int(w)]) - - return image, target - - -def make_coco_transforms(image_set): - - normalize = T.Compose([ - T.ToTensor(), - T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - - scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] - - if image_set == 'train': - return T.Compose([ - T.RandomHorizontalFlip(), - T.RandomSelect( - T.RandomResize(scales, max_size=1333), - T.Compose([ - T.RandomResize([400, 500, 600]), - T.RandomSizeCrop(384, 600), - T.RandomResize(scales, max_size=1333), - ]) - ), - normalize, - ]) - - if image_set == 'val': - return T.Compose([ - T.RandomResize([800], max_size=1333), - normalize, - ]) - - raise ValueError(f'unknown {image_set}') - - -def build(image_set, args): - root = Path(args.coco_path) - assert root.exists(), f'provided COCO path {root} does not exist' - mode = 'instances' - PATHS = { - "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), - "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), - } - - img_folder, ann_file = PATHS[image_set] - dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) - return dataset diff --git a/third-party/DETR/datasets/coco_eval.py b/third-party/DETR/datasets/coco_eval.py deleted file mode 100644 index 9487c08..0000000 --- a/third-party/DETR/datasets/coco_eval.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -COCO evaluator that works in distributed mode. - -Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py -The difference is that there is less copy-pasting from pycocotools -in the end of the file, as python3 can suppress prints with contextlib -""" -import os -import contextlib -import copy -import numpy as np -import torch - -from pycocotools.cocoeval import COCOeval -from pycocotools.coco import COCO -import pycocotools.mask as mask_util - -from util.misc import all_gather - - -class CocoEvaluator(object): - def __init__(self, coco_gt, iou_types): - assert isinstance(iou_types, (list, tuple)) - coco_gt = copy.deepcopy(coco_gt) - self.coco_gt = coco_gt - - self.iou_types = iou_types - self.coco_eval = {} - for iou_type in iou_types: - self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) - - self.img_ids = [] - self.eval_imgs = {k: [] for k in iou_types} - - def update(self, predictions): - img_ids = list(np.unique(list(predictions.keys()))) - self.img_ids.extend(img_ids) - - for iou_type in self.iou_types: - results = self.prepare(predictions, iou_type) - - # suppress pycocotools prints - with open(os.devnull, 'w') as devnull: - with contextlib.redirect_stdout(devnull): - coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() - coco_eval = self.coco_eval[iou_type] - - coco_eval.cocoDt = coco_dt - coco_eval.params.imgIds = list(img_ids) - img_ids, eval_imgs = evaluate(coco_eval) - - self.eval_imgs[iou_type].append(eval_imgs) - - def synchronize_between_processes(self): - for iou_type in self.iou_types: - self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) - create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) - - def accumulate(self): - for coco_eval in self.coco_eval.values(): - coco_eval.accumulate() - - def summarize(self): - for iou_type, coco_eval in self.coco_eval.items(): - print("IoU metric: {}".format(iou_type)) - coco_eval.summarize() - - def prepare(self, predictions, iou_type): - if iou_type == "bbox": - return self.prepare_for_coco_detection(predictions) - elif iou_type == "segm": - return self.prepare_for_coco_segmentation(predictions) - elif iou_type == "keypoints": - return self.prepare_for_coco_keypoint(predictions) - else: - raise ValueError("Unknown iou type {}".format(iou_type)) - - def prepare_for_coco_detection(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return coco_results - - def prepare_for_coco_segmentation(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - scores = prediction["scores"] - labels = prediction["labels"] - masks = prediction["masks"] - - masks = masks > 0.5 - - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - rles = [ - mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] - for mask in masks - ] - for rle in rles: - rle["counts"] = rle["counts"].decode("utf-8") - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "segmentation": rle, - "score": scores[k], - } - for k, rle in enumerate(rles) - ] - ) - return coco_results - - def prepare_for_coco_keypoint(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - keypoints = prediction["keypoints"] - keypoints = keypoints.flatten(start_dim=1).tolist() - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - 'keypoints': keypoint, - "score": scores[k], - } - for k, keypoint in enumerate(keypoints) - ] - ) - return coco_results - - -def convert_to_xywh(boxes): - xmin, ymin, xmax, ymax = boxes.unbind(1) - return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) - - -def merge(img_ids, eval_imgs): - all_img_ids = all_gather(img_ids) - all_eval_imgs = all_gather(eval_imgs) - - merged_img_ids = [] - for p in all_img_ids: - merged_img_ids.extend(p) - - merged_eval_imgs = [] - for p in all_eval_imgs: - merged_eval_imgs.append(p) - - merged_img_ids = np.array(merged_img_ids) - merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) - - # keep only unique (and in sorted order) images - merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) - merged_eval_imgs = merged_eval_imgs[..., idx] - - return merged_img_ids, merged_eval_imgs - - -def create_common_coco_eval(coco_eval, img_ids, eval_imgs): - img_ids, eval_imgs = merge(img_ids, eval_imgs) - img_ids = list(img_ids) - eval_imgs = list(eval_imgs.flatten()) - - coco_eval.evalImgs = eval_imgs - coco_eval.params.imgIds = img_ids - coco_eval._paramsEval = copy.deepcopy(coco_eval.params) - - -################################################################# -# From pycocotools, just removed the prints and fixed -# a Python3 bug about unicode not defined -################################################################# - - -def evaluate(self): - ''' - Run per image evaluation on given images and store results (a list of dict) in self.evalImgs - :return: None - ''' - # tic = time.time() - # print('Running per image evaluation...') - p = self.params - # add backward compatibility if useSegm is specified in params - if p.useSegm is not None: - p.iouType = 'segm' if p.useSegm == 1 else 'bbox' - print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) - # print('Evaluate annotation type *{}*'.format(p.iouType)) - p.imgIds = list(np.unique(p.imgIds)) - if p.useCats: - p.catIds = list(np.unique(p.catIds)) - p.maxDets = sorted(p.maxDets) - self.params = p - - self._prepare() - # loop through images, area range, max detection number - catIds = p.catIds if p.useCats else [-1] - - if p.iouType == 'segm' or p.iouType == 'bbox': - computeIoU = self.computeIoU - elif p.iouType == 'keypoints': - computeIoU = self.computeOks - self.ious = { - (imgId, catId): computeIoU(imgId, catId) - for imgId in p.imgIds - for catId in catIds} - - evaluateImg = self.evaluateImg - maxDet = p.maxDets[-1] - evalImgs = [ - evaluateImg(imgId, catId, areaRng, maxDet) - for catId in catIds - for areaRng in p.areaRng - for imgId in p.imgIds - ] - # this is NOT in the pycocotools code, but could be done outside - evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) - self._paramsEval = copy.deepcopy(self.params) - # toc = time.time() - # print('DONE (t={:0.2f}s).'.format(toc-tic)) - return p.imgIds, evalImgs - -################################################################# -# end of straight copy from pycocotools, just removing the prints -################################################################# diff --git a/third-party/DETR/datasets/coco_panoptic.py b/third-party/DETR/datasets/coco_panoptic.py deleted file mode 100644 index b24f615..0000000 --- a/third-party/DETR/datasets/coco_panoptic.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -from pathlib import Path - -import numpy as np -import torch -from PIL import Image - -from panopticapi.utils import rgb2id -from util.box_ops import masks_to_boxes - -from .coco import make_coco_transforms - - -class CocoPanoptic: - def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): - with open(ann_file, 'r') as f: - self.coco = json.load(f) - - # sort 'images' field so that they are aligned with 'annotations' - # i.e., in alphabetical order - self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) - # sanity check - if "annotations" in self.coco: - for img, ann in zip(self.coco['images'], self.coco['annotations']): - assert img['file_name'][:-4] == ann['file_name'][:-4] - - self.img_folder = img_folder - self.ann_folder = ann_folder - self.ann_file = ann_file - self.transforms = transforms - self.return_masks = return_masks - - def __getitem__(self, idx): - ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] - img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') - ann_path = Path(self.ann_folder) / ann_info['file_name'] - - img = Image.open(img_path).convert('RGB') - w, h = img.size - if "segments_info" in ann_info: - masks = np.asarray(Image.open(ann_path), dtype=np.uint32) - masks = rgb2id(masks) - - ids = np.array([ann['id'] for ann in ann_info['segments_info']]) - masks = masks == ids[:, None, None] - - masks = torch.as_tensor(masks, dtype=torch.uint8) - labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) - - target = {} - target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) - if self.return_masks: - target['masks'] = masks - target['labels'] = labels - - target["boxes"] = masks_to_boxes(masks) - - target['size'] = torch.as_tensor([int(h), int(w)]) - target['orig_size'] = torch.as_tensor([int(h), int(w)]) - if "segments_info" in ann_info: - for name in ['iscrowd', 'area']: - target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) - - if self.transforms is not None: - img, target = self.transforms(img, target) - - return img, target - - def __len__(self): - return len(self.coco['images']) - - def get_height_and_width(self, idx): - img_info = self.coco['images'][idx] - height = img_info['height'] - width = img_info['width'] - return height, width - - -def build(image_set, args): - img_folder_root = Path(args.coco_path) - ann_folder_root = Path(args.coco_panoptic_path) - assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' - assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' - mode = 'panoptic' - PATHS = { - "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), - "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), - } - - img_folder, ann_file = PATHS[image_set] - img_folder_path = img_folder_root / img_folder - ann_folder = ann_folder_root / f'{mode}_{img_folder}' - ann_file = ann_folder_root / ann_file - - dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, - transforms=make_coco_transforms(image_set), return_masks=args.masks) - - return dataset diff --git a/third-party/DETR/datasets/panoptic_eval.py b/third-party/DETR/datasets/panoptic_eval.py deleted file mode 100644 index 9cb4f83..0000000 --- a/third-party/DETR/datasets/panoptic_eval.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -import os - -import util.misc as utils - -try: - from panopticapi.evaluation import pq_compute -except ImportError: - pass - - -class PanopticEvaluator(object): - def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): - self.gt_json = ann_file - self.gt_folder = ann_folder - if utils.is_main_process(): - if not os.path.exists(output_dir): - os.mkdir(output_dir) - self.output_dir = output_dir - self.predictions = [] - - def update(self, predictions): - for p in predictions: - with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: - f.write(p.pop("png_string")) - - self.predictions += predictions - - def synchronize_between_processes(self): - all_predictions = utils.all_gather(self.predictions) - merged_predictions = [] - for p in all_predictions: - merged_predictions += p - self.predictions = merged_predictions - - def summarize(self): - if utils.is_main_process(): - json_data = {"annotations": self.predictions} - predictions_json = os.path.join(self.output_dir, "predictions.json") - with open(predictions_json, "w") as f: - f.write(json.dumps(json_data)) - return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) - return None diff --git a/third-party/DETR/datasets/transforms.py b/third-party/DETR/datasets/transforms.py deleted file mode 100644 index 0635857..0000000 --- a/third-party/DETR/datasets/transforms.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from util.box_ops import box_xyxy_to_cxcywh -from util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - target["boxes"] = boxes - - if "masks" in target: - target['masks'] = target['masks'].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target['masks'] = interpolate( - target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int): - self.min_size = min_size - self.max_size = max_size - - def __call__(self, img: PIL.Image.Image, target: dict): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - return crop(img, target, region) - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/third-party/DETR/engine.py b/third-party/DETR/engine.py deleted file mode 100644 index ac5ea6f..0000000 --- a/third-party/DETR/engine.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Train and eval functions used in main.py -""" -import math -import os -import sys -from typing import Iterable - -import torch - -import util.misc as utils -from datasets.coco_eval import CocoEvaluator -from datasets.panoptic_eval import PanopticEvaluator - - -def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, - data_loader: Iterable, optimizer: torch.optim.Optimizer, - device: torch.device, epoch: int, max_norm: float = 0): - model.train() - criterion.train() - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Epoch: [{}]'.format(epoch) - print_freq = 10 - - for samples, targets in metric_logger.log_every(data_loader, print_freq, header): - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - outputs = model(samples) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - - # reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) - - loss_value = losses_reduced_scaled.item() - - if not math.isfinite(loss_value): - print("Loss is {}, stopping training".format(loss_value)) - print(loss_dict_reduced) - sys.exit(1) - - optimizer.zero_grad() - losses.backward() - if max_norm > 0: - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) - optimizer.step() - - metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} - - -@torch.no_grad() -def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): - model.eval() - criterion.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Test:' - - iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) - coco_evaluator = CocoEvaluator(base_ds, iou_types) - # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] - - panoptic_evaluator = None - if 'panoptic' in postprocessors.keys(): - panoptic_evaluator = PanopticEvaluator( - data_loader.dataset.ann_file, - data_loader.dataset.ann_folder, - output_dir=os.path.join(output_dir, "panoptic_eval"), - ) - - for samples, targets in metric_logger.log_every(data_loader, 10, header): - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - outputs = model(samples) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - - # reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), - **loss_dict_reduced_scaled, - **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - - orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) - results = postprocessors['bbox'](outputs, orig_target_sizes) - if 'segm' in postprocessors.keys(): - target_sizes = torch.stack([t["size"] for t in targets], dim=0) - results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) - res = {target['image_id'].item(): output for target, output in zip(targets, results)} - if coco_evaluator is not None: - coco_evaluator.update(res) - - if panoptic_evaluator is not None: - res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) - for i, target in enumerate(targets): - image_id = target["image_id"].item() - file_name = f"{image_id:012d}.png" - res_pano[i]["image_id"] = image_id - res_pano[i]["file_name"] = file_name - - panoptic_evaluator.update(res_pano) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - if coco_evaluator is not None: - coco_evaluator.synchronize_between_processes() - if panoptic_evaluator is not None: - panoptic_evaluator.synchronize_between_processes() - - # accumulate predictions from all images - if coco_evaluator is not None: - coco_evaluator.accumulate() - coco_evaluator.summarize() - panoptic_res = None - if panoptic_evaluator is not None: - panoptic_res = panoptic_evaluator.summarize() - stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} - if coco_evaluator is not None: - if 'bbox' in postprocessors.keys(): - stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() - if 'segm' in postprocessors.keys(): - stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() - if panoptic_res is not None: - stats['PQ_all'] = panoptic_res["All"] - stats['PQ_th'] = panoptic_res["Things"] - stats['PQ_st'] = panoptic_res["Stuff"] - return stats, coco_evaluator diff --git a/third-party/DETR/hubconf.py b/third-party/DETR/hubconf.py deleted file mode 100644 index 328c330..0000000 --- a/third-party/DETR/hubconf.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import torch - -from models.backbone import Backbone, Joiner -from models.detr import DETR, PostProcess -from models.position_encoding import PositionEmbeddingSine -from models.segmentation import DETRsegm, PostProcessPanoptic -from models.transformer import Transformer - -dependencies = ["torch", "torchvision"] - - -def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): - hidden_dim = 256 - backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) - pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) - backbone_with_pos_enc = Joiner(backbone, pos_enc) - backbone_with_pos_enc.num_channels = backbone.num_channels - transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) - detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) - if mask: - return DETRsegm(detr) - return detr - - -def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR R50 with 6 encoder and 6 decoder layers. - - Achieves 42/62.4 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet50", dilation=False, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R50 with 6 encoder and 6 decoder layers. - - The last block of ResNet-50 has dilation to increase - output resolution. - Achieves 43.3/63.1 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet50", dilation=True, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - Achieves 43.5/63.8 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet101", dilation=False, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - The last block of ResNet-101 has dilation to increase - output resolution. - Achieves 44.9/64.7 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet101", dilation=True, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet50_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR R50 with 6 encoder and 6 decoder layers. - Achieves 43.4 PQ on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model - - -def detr_resnet50_dc5_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR-DC5 R50 with 6 encoder and 6 decoder layers. - - The last block of ResNet-50 has dilation to increase - output resolution. - Achieves 44.6 on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model - - -def detr_resnet101_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - Achieves 45.1 PQ on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model diff --git a/third-party/DETR/main.py b/third-party/DETR/main.py deleted file mode 100644 index e5f9eff..0000000 --- a/third-party/DETR/main.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -import datetime -import json -import random -import time -from pathlib import Path - -import numpy as np -import torch -from torch.utils.data import DataLoader, DistributedSampler - -import datasets -import util.misc as utils -from datasets import build_dataset, get_coco_api_from_dataset -from engine import evaluate, train_one_epoch -from models import build_model - - -def get_args_parser(): - parser = argparse.ArgumentParser('Set transformer detector', add_help=False) - parser.add_argument('--lr', default=1e-4, type=float) - parser.add_argument('--lr_backbone', default=1e-5, type=float) - parser.add_argument('--batch_size', default=2, type=int) - parser.add_argument('--weight_decay', default=1e-4, type=float) - parser.add_argument('--epochs', default=300, type=int) - parser.add_argument('--lr_drop', default=200, type=int) - parser.add_argument('--clip_max_norm', default=0.1, type=float, - help='gradient clipping max norm') - - # Model parameters - parser.add_argument('--frozen_weights', type=str, default=None, - help="Path to the pretrained model. If set, only the mask head will be trained") - # * Backbone - parser.add_argument('--backbone', default='resnet50', type=str, - help="Name of the convolutional backbone to use") - parser.add_argument('--dilation', action='store_true', - help="If true, we replace stride with dilation in the last convolutional block (DC5)") - parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), - help="Type of positional embedding to use on top of the image features") - - # * Transformer - parser.add_argument('--enc_layers', default=6, type=int, - help="Number of encoding layers in the transformer") - parser.add_argument('--dec_layers', default=6, type=int, - help="Number of decoding layers in the transformer") - parser.add_argument('--dim_feedforward', default=2048, type=int, - help="Intermediate size of the feedforward layers in the transformer blocks") - parser.add_argument('--hidden_dim', default=256, type=int, - help="Size of the embeddings (dimension of the transformer)") - parser.add_argument('--dropout', default=0.1, type=float, - help="Dropout applied in the transformer") - parser.add_argument('--nheads', default=8, type=int, - help="Number of attention heads inside the transformer's attentions") - parser.add_argument('--num_queries', default=100, type=int, - help="Number of query slots") - parser.add_argument('--pre_norm', action='store_true') - - # * Segmentation - parser.add_argument('--masks', action='store_true', - help="Train segmentation head if the flag is provided") - - # Loss - parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', - help="Disables auxiliary decoding losses (loss at each layer)") - # * Matcher - parser.add_argument('--set_cost_class', default=1, type=float, - help="Class coefficient in the matching cost") - parser.add_argument('--set_cost_bbox', default=5, type=float, - help="L1 box coefficient in the matching cost") - parser.add_argument('--set_cost_giou', default=2, type=float, - help="giou box coefficient in the matching cost") - # * Loss coefficients - parser.add_argument('--mask_loss_coef', default=1, type=float) - parser.add_argument('--dice_loss_coef', default=1, type=float) - parser.add_argument('--bbox_loss_coef', default=5, type=float) - parser.add_argument('--giou_loss_coef', default=2, type=float) - parser.add_argument('--eos_coef', default=0.1, type=float, - help="Relative classification weight of the no-object class") - - # dataset parameters - parser.add_argument('--dataset_file', default='coco') - parser.add_argument('--coco_path', type=str) - parser.add_argument('--coco_panoptic_path', type=str) - parser.add_argument('--remove_difficult', action='store_true') - - parser.add_argument('--output_dir', default='', - help='path where to save, empty for no saving') - parser.add_argument('--device', default='cuda', - help='device to use for training / testing') - parser.add_argument('--seed', default=42, type=int) - parser.add_argument('--resume', default='', help='resume from checkpoint') - parser.add_argument('--start_epoch', default=0, type=int, metavar='N', - help='start epoch') - parser.add_argument('--eval', action='store_true') - parser.add_argument('--num_workers', default=2, type=int) - - # distributed training parameters - parser.add_argument('--world_size', default=1, type=int, - help='number of distributed processes') - parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') - return parser - - -def main(args): - utils.init_distributed_mode(args) - print("git:\n {}\n".format(utils.get_sha())) - - if args.frozen_weights is not None: - assert args.masks, "Frozen training is meant for segmentation only" - print(args) - - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - - model, criterion, postprocessors = build_model(args) - model.to(device) - - model_without_ddp = model - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - model_without_ddp = model.module - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print('number of params:', n_parameters) - - param_dicts = [ - {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]}, - { - "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], - "lr": args.lr_backbone, - }, - ] - optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, - weight_decay=args.weight_decay) - lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) - - dataset_train = build_dataset(image_set='train', args=args) - dataset_val = build_dataset(image_set='val', args=args) - - if args.distributed: - sampler_train = DistributedSampler(dataset_train) - sampler_val = DistributedSampler(dataset_val, shuffle=False) - else: - sampler_train = torch.utils.data.RandomSampler(dataset_train) - sampler_val = torch.utils.data.SequentialSampler(dataset_val) - - batch_sampler_train = torch.utils.data.BatchSampler( - sampler_train, args.batch_size, drop_last=True) - - data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, - collate_fn=utils.collate_fn, num_workers=args.num_workers) - data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, - drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) - - if args.dataset_file == "coco_panoptic": - # We also evaluate AP during panoptic training, on original coco DS - coco_val = datasets.coco.build("val", args) - base_ds = get_coco_api_from_dataset(coco_val) - else: - base_ds = get_coco_api_from_dataset(dataset_val) - - if args.frozen_weights is not None: - checkpoint = torch.load(args.frozen_weights, map_location='cpu') - model_without_ddp.detr.load_state_dict(checkpoint['model']) - - output_dir = Path(args.output_dir) - if args.resume: - if args.resume.startswith('https'): - checkpoint = torch.hub.load_state_dict_from_url( - args.resume, map_location='cpu', check_hash=True) - else: - checkpoint = torch.load(args.resume, map_location='cpu') - model_without_ddp.load_state_dict(checkpoint['model']) - if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: - optimizer.load_state_dict(checkpoint['optimizer']) - lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) - args.start_epoch = checkpoint['epoch'] + 1 - - if args.eval: - test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, - data_loader_val, base_ds, device, args.output_dir) - if args.output_dir: - utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") - return - - print("Start training") - start_time = time.time() - for epoch in range(args.start_epoch, args.epochs): - if args.distributed: - sampler_train.set_epoch(epoch) - train_stats = train_one_epoch( - model, criterion, data_loader_train, optimizer, device, epoch, - args.clip_max_norm) - lr_scheduler.step() - if args.output_dir: - checkpoint_paths = [output_dir / 'checkpoint.pth'] - # extra checkpoint before LR drop and every 100 epochs - if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: - checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') - for checkpoint_path in checkpoint_paths: - utils.save_on_master({ - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'lr_scheduler': lr_scheduler.state_dict(), - 'epoch': epoch, - 'args': args, - }, checkpoint_path) - - test_stats, coco_evaluator = evaluate( - model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir - ) - - log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, - **{f'test_{k}': v for k, v in test_stats.items()}, - 'epoch': epoch, - 'n_parameters': n_parameters} - - if args.output_dir and utils.is_main_process(): - with (output_dir / "log.txt").open("a") as f: - f.write(json.dumps(log_stats) + "\n") - - # for evaluation logs - if coco_evaluator is not None: - (output_dir / 'eval').mkdir(exist_ok=True) - if "bbox" in coco_evaluator.coco_eval: - filenames = ['latest.pth'] - if epoch % 50 == 0: - filenames.append(f'{epoch:03}.pth') - for name in filenames: - torch.save(coco_evaluator.coco_eval["bbox"].eval, - output_dir / "eval" / name) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Training time {}'.format(total_time_str)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) - args = parser.parse_args() - if args.output_dir: - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - main(args) diff --git a/third-party/DETR/models/__init__.py b/third-party/DETR/models/__init__.py deleted file mode 100644 index a3f2653..0000000 --- a/third-party/DETR/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .detr import build - - -def build_model(args): - return build(args) diff --git a/third-party/DETR/models/backbone.py b/third-party/DETR/models/backbone.py deleted file mode 100644 index 9668093..0000000 --- a/third-party/DETR/models/backbone.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Backbone modules. -""" -from collections import OrderedDict - -import torch -import torch.nn.functional as F -import torchvision -from torch import nn -from torchvision.models._utils import IntermediateLayerGetter -from typing import Dict, List - -from util.misc import NestedTensor, is_main_process - -from .position_encoding import build_position_encoding - - -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - Copy-paste from torchvision.misc.ops with added eps before rqsrt, - without which any other models than torchvision.models.resnet[18,34,50,101] - produce nans. - """ - - def __init__(self, n): - super(FrozenBatchNorm2d, self).__init__() - self.register_buffer("weight", torch.ones(n)) - self.register_buffer("bias", torch.zeros(n)) - self.register_buffer("running_mean", torch.zeros(n)) - self.register_buffer("running_var", torch.ones(n)) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - num_batches_tracked_key = prefix + 'num_batches_tracked' - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super(FrozenBatchNorm2d, self)._load_from_state_dict( - state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs) - - def forward(self, x): - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - eps = 1e-5 - scale = w * (rv + eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - -class BackboneBase(nn.Module): - - def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): - super().__init__() - for name, parameter in backbone.named_parameters(): - if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: - parameter.requires_grad_(False) - if return_interm_layers: - return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} - else: - return_layers = {'layer4': "0"} - self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) - self.num_channels = num_channels - - def forward(self, tensor_list: NestedTensor): - xs = self.body(tensor_list.tensors) - out: Dict[str, NestedTensor] = {} - for name, x in xs.items(): - m = tensor_list.mask - assert m is not None - mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] - out[name] = NestedTensor(x, mask) - return out - - -class Backbone(BackboneBase): - """ResNet backbone with frozen BatchNorm.""" - def __init__(self, name: str, - train_backbone: bool, - return_interm_layers: bool, - dilation: bool): - backbone = getattr(torchvision.models, name)( - replace_stride_with_dilation=[False, False, dilation], - pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) - num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 - super().__init__(backbone, train_backbone, num_channels, return_interm_layers) - - -class Joiner(nn.Sequential): - def __init__(self, backbone, position_embedding): - super().__init__(backbone, position_embedding) - - def forward(self, tensor_list: NestedTensor): - xs = self[0](tensor_list) - out: List[NestedTensor] = [] - pos = [] - for name, x in xs.items(): - out.append(x) - # position encoding - pos.append(self[1](x).to(x.tensors.dtype)) - - return out, pos - - -def build_backbone(args): - position_embedding = build_position_encoding(args) - train_backbone = args.lr_backbone > 0 - return_interm_layers = args.masks - backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) - model = Joiner(backbone, position_embedding) - model.num_channels = backbone.num_channels - return model diff --git a/third-party/DETR/models/detr.py b/third-party/DETR/models/detr.py deleted file mode 100644 index 23c2376..0000000 --- a/third-party/DETR/models/detr.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR model and criterion classes. -""" -import torch -import torch.nn.functional as F -from torch import nn - -from util import box_ops -from util.misc import (NestedTensor, nested_tensor_from_tensor_list, - accuracy, get_world_size, interpolate, - is_dist_avail_and_initialized) - -from .backbone import build_backbone -from .matcher import build_matcher -from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, - dice_loss, sigmoid_focal_loss) -from .transformer import build_transformer - - -class DETR(nn.Module): - """ This is the DETR module that performs object detection """ - def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False): - """ Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_classes: number of object classes - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.num_queries = num_queries - self.transformer = transformer - hidden_dim = transformer.d_model - self.class_embed = nn.Linear(hidden_dim, num_classes + 1) - self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - self.query_embed = nn.Embedding(num_queries, hidden_dim) - self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) - self.backbone = backbone - self.aux_loss = aux_loss - - def forward(self, samples: NestedTensor): - """Ā The forward expects a NestedTensor, which consists of: - - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels - - It returns a dict with the following elements: - - "pred_logits": the classification logits (including no-object) for all queries. - Shape= [batch_size x num_queries x (num_classes + 1)] - - "pred_boxes": The normalized boxes coordinates for all queries, represented as - (center_x, center_y, height, width). These values are normalized in [0, 1], - relative to the size of each individual image (disregarding possible padding). - See PostProcess for information on how to retrieve the unnormalized bounding box. - - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of - dictionnaries containing the two above keys for each decoder layer. - """ - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.backbone(samples) - - src, mask = features[-1].decompose() - assert mask is not None - hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0] - - outputs_class = self.class_embed(hs) - outputs_coord = self.bbox_embed(hs).sigmoid() - out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} - if self.aux_loss: - out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_coord): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - return [{'pred_logits': a, 'pred_boxes': b} - for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] - - -class SetCriterion(nn.Module): - """ This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): - """ Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__() - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.eos_coef = eos_coef - self.losses = losses - empty_weight = torch.ones(self.num_classes + 1) - empty_weight[-1] = self.eos_coef - self.register_buffer('empty_weight', empty_weight) - - def loss_labels(self, outputs, targets, indices, num_boxes, log=True): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - assert 'pred_logits' in outputs - src_logits = outputs['pred_logits'] - - idx = self._get_src_permutation_idx(indices) - target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) - target_classes = torch.full(src_logits.shape[:2], self.num_classes, - dtype=torch.int64, device=src_logits.device) - target_classes[idx] = target_classes_o - - loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) - losses = {'loss_ce': loss_ce} - - if log: - # TODO this should probably be a separate loss, not hacked in this one here - losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] - return losses - - @torch.no_grad() - def loss_cardinality(self, outputs, targets, indices, num_boxes): - """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes - This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients - """ - pred_logits = outputs['pred_logits'] - device = pred_logits.device - tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) - # Count the number of predictions that are NOT "no-object" (which is the last class) - card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) - card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) - losses = {'cardinality_error': card_err} - return losses - - def loss_boxes(self, outputs, targets, indices, num_boxes): - """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss - targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] - The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. - """ - assert 'pred_boxes' in outputs - idx = self._get_src_permutation_idx(indices) - src_boxes = outputs['pred_boxes'][idx] - target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) - - loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') - - losses = {} - losses['loss_bbox'] = loss_bbox.sum() / num_boxes - - loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( - box_ops.box_cxcywh_to_xyxy(src_boxes), - box_ops.box_cxcywh_to_xyxy(target_boxes))) - losses['loss_giou'] = loss_giou.sum() / num_boxes - return losses - - def loss_masks(self, outputs, targets, indices, num_boxes): - """Compute the losses related to the masks: the focal loss and the dice loss. - targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] - """ - assert "pred_masks" in outputs - - src_idx = self._get_src_permutation_idx(indices) - tgt_idx = self._get_tgt_permutation_idx(indices) - src_masks = outputs["pred_masks"] - src_masks = src_masks[src_idx] - masks = [t["masks"] for t in targets] - # TODO use valid to mask invalid areas due to padding in loss - target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() - target_masks = target_masks.to(src_masks) - target_masks = target_masks[tgt_idx] - - # upsample predictions to the target size - src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], - mode="bilinear", align_corners=False) - src_masks = src_masks[:, 0].flatten(1) - - target_masks = target_masks.flatten(1) - target_masks = target_masks.view(src_masks.shape) - losses = { - "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), - "loss_dice": dice_loss(src_masks, target_masks, num_boxes), - } - return losses - - def _get_src_permutation_idx(self, indices): - # permute predictions following indices - batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) - src_idx = torch.cat([src for (src, _) in indices]) - return batch_idx, src_idx - - def _get_tgt_permutation_idx(self, indices): - # permute targets following indices - batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) - tgt_idx = torch.cat([tgt for (_, tgt) in indices]) - return batch_idx, tgt_idx - - def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): - loss_map = { - 'labels': self.loss_labels, - 'cardinality': self.loss_cardinality, - 'boxes': self.loss_boxes, - 'masks': self.loss_masks - } - assert loss in loss_map, f'do you really want to compute {loss} loss?' - return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) - - def forward(self, outputs, targets): - """ This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} - - # Retrieve the matching between the outputs of the last layer and the targets - indices = self.matcher(outputs_without_aux, targets) - - # Compute the average number of target boxes accross all nodes, for normalization purposes - num_boxes = sum(len(t["labels"]) for t in targets) - num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_boxes) - num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() - - # Compute all the requested losses - losses = {} - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) - - # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. - if 'aux_outputs' in outputs: - for i, aux_outputs in enumerate(outputs['aux_outputs']): - indices = self.matcher(aux_outputs, targets) - for loss in self.losses: - if loss == 'masks': - # Intermediate masks losses are too costly to compute, we ignore them. - continue - kwargs = {} - if loss == 'labels': - # Logging is enabled only for the last layer - kwargs = {'log': False} - l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) - l_dict = {k + f'_{i}': v for k, v in l_dict.items()} - losses.update(l_dict) - - return losses - - -class PostProcess(nn.Module): - """ This module converts the model's output into the format expected by the coco api""" - @torch.no_grad() - def forward(self, outputs, target_sizes): - """ Perform the computation - Parameters: - outputs: raw outputs of the model - target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch - For evaluation, this must be the original image size (before any data augmentation) - For visualization, this should be the image size after data augment, but before padding - """ - out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] - - assert len(out_logits) == len(target_sizes) - assert target_sizes.shape[1] == 2 - - prob = F.softmax(out_logits, -1) - scores, labels = prob[..., :-1].max(-1) - - # convert to [x0, y0, x1, y1] format - boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) - # and from relative [0, 1] to absolute [0, height] coordinates - img_h, img_w = target_sizes.unbind(1) - scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) - boxes = boxes * scale_fct[:, None, :] - - results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] - - return results - - -class MLP(nn.Module): - """ Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def build(args): - # the `num_classes` naming here is somewhat misleading. - # it indeed corresponds to `max_obj_id + 1`, where max_obj_id - # is the maximum id for a class in your dataset. For example, - # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. - # As another example, for a dataset that has a single class with id 1, - # you should pass `num_classes` to be 2 (max_obj_id + 1). - # For more details on this, check the following discussion - # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 - num_classes = 20 if args.dataset_file != 'coco' else 91 - if args.dataset_file == "coco_panoptic": - # for panoptic, we just add a num_classes that is large enough to hold - # max_obj_id + 1, but the exact value doesn't really matter - num_classes = 250 - device = torch.device(args.device) - - backbone = build_backbone(args) - - transformer = build_transformer(args) - - model = DETR( - backbone, - transformer, - num_classes=num_classes, - num_queries=args.num_queries, - aux_loss=args.aux_loss, - ) - if args.masks: - model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) - matcher = build_matcher(args) - weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} - weight_dict['loss_giou'] = args.giou_loss_coef - if args.masks: - weight_dict["loss_mask"] = args.mask_loss_coef - weight_dict["loss_dice"] = args.dice_loss_coef - # TODO this is a hack - if args.aux_loss: - aux_weight_dict = {} - for i in range(args.dec_layers - 1): - aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ['labels', 'boxes', 'cardinality'] - if args.masks: - losses += ["masks"] - criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, - eos_coef=args.eos_coef, losses=losses) - criterion.to(device) - postprocessors = {'bbox': PostProcess()} - if args.masks: - postprocessors['segm'] = PostProcessSegm() - if args.dataset_file == "coco_panoptic": - is_thing_map = {i: i <= 90 for i in range(201)} - postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) - - return model, criterion, postprocessors diff --git a/third-party/DETR/models/matcher.py b/third-party/DETR/models/matcher.py deleted file mode 100644 index 0c29147..0000000 --- a/third-party/DETR/models/matcher.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Modules to compute the matching cost and solve the corresponding LSAP. -""" -import torch -from scipy.optimize import linear_sum_assignment -from torch import nn - -from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou - - -class HungarianMatcher(nn.Module): - """This class computes an assignment between the targets and the predictions of the network - - For efficiency reasons, the targets don't include the no_object. Because of this, in general, - there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, - while the others are un-matched (and thus treated as non-objects). - """ - - def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): - """Creates the matcher - - Params: - cost_class: This is the relative weight of the classification error in the matching cost - cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost - cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost - """ - super().__init__() - self.cost_class = cost_class - self.cost_bbox = cost_bbox - self.cost_giou = cost_giou - assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" - - @torch.no_grad() - def forward(self, outputs, targets): - """ Performs the matching - - Params: - outputs: This is a dict that contains at least these entries: - "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits - "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates - - targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: - "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth - objects in the target) containing the class labels - "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates - - Returns: - A list of size batch_size, containing tuples of (index_i, index_j) where: - - index_i is the indices of the selected predictions (in order) - - index_j is the indices of the corresponding selected targets (in order) - For each batch element, it holds: - len(index_i) = len(index_j) = min(num_queries, num_target_boxes) - """ - bs, num_queries = outputs["pred_logits"].shape[:2] - - # We flatten to compute the cost matrices in a batch - out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] - out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] - - # Also concat the target labels and boxes - tgt_ids = torch.cat([v["labels"] for v in targets]) - tgt_bbox = torch.cat([v["boxes"] for v in targets]) - - # Compute the classification cost. Contrary to the loss, we don't use the NLL, - # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. - cost_class = -out_prob[:, tgt_ids] - - # Compute the L1 cost between boxes - cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) - - # Compute the giou cost betwen boxes - cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) - - # Final cost matrix - C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou - C = C.view(bs, num_queries, -1).cpu() - - sizes = [len(v["boxes"]) for v in targets] - indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] - return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] - - -def build_matcher(args): - return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) diff --git a/third-party/DETR/models/position_encoding.py b/third-party/DETR/models/position_encoding.py deleted file mode 100644 index 73ae39e..0000000 --- a/third-party/DETR/models/position_encoding.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Various positional encodings for the transformer. -""" -import math -import torch -from torch import nn - -from util.misc import NestedTensor - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - mask = tensor_list.mask - assert mask is not None - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -class PositionEmbeddingLearned(nn.Module): - """ - Absolute pos embedding, learned. - """ - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(50, num_pos_feats) - self.col_embed = nn.Embedding(50, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i) - y_emb = self.row_embed(j) - pos = torch.cat([ - x_emb.unsqueeze(0).repeat(h, 1, 1), - y_emb.unsqueeze(1).repeat(1, w, 1), - ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) - return pos - - -def build_position_encoding(args): - N_steps = args.hidden_dim // 2 - if args.position_embedding in ('v2', 'sine'): - # TODO find a better way of exposing other arguments - position_embedding = PositionEmbeddingSine(N_steps, normalize=True) - elif args.position_embedding in ('v3', 'learned'): - position_embedding = PositionEmbeddingLearned(N_steps) - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/third-party/DETR/models/segmentation.py b/third-party/DETR/models/segmentation.py deleted file mode 100644 index 01faa88..0000000 --- a/third-party/DETR/models/segmentation.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -This file provides the definition of the convolutional heads used to predict masks, as well as the losses -""" -import io -from collections import defaultdict -from typing import List, Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor -from PIL import Image - -import util.box_ops as box_ops -from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list - -try: - from panopticapi.utils import id2rgb, rgb2id -except ImportError: - pass - - -class DETRsegm(nn.Module): - def __init__(self, detr, freeze_detr=False): - super().__init__() - self.detr = detr - - if freeze_detr: - for p in self.parameters(): - p.requires_grad_(False) - - hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead - self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) - self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) - - def forward(self, samples: NestedTensor): - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.detr.backbone(samples) - - bs = features[-1].tensors.shape[0] - - src, mask = features[-1].decompose() - assert mask is not None - src_proj = self.detr.input_proj(src) - hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) - - outputs_class = self.detr.class_embed(hs) - outputs_coord = self.detr.bbox_embed(hs).sigmoid() - out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} - if self.detr.aux_loss: - out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) - - # FIXME h_boxes takes the last one computed, keep this in mind - bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) - - seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) - outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) - - out["pred_masks"] = outputs_seg_masks - return out - - -def _expand(tensor, length: int): - return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) - - -class MaskHeadSmallConv(nn.Module): - """ - Simple convolutional head, using group norm. - Upsampling is done using a FPN approach - """ - - def __init__(self, dim, fpn_dims, context_dim): - super().__init__() - - inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] - self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) - self.gn1 = torch.nn.GroupNorm(8, dim) - self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) - self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) - self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) - self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) - self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) - self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) - self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) - self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) - self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) - - self.dim = dim - - self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) - self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) - self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - nn.init.constant_(m.bias, 0) - - def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): - x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) - - x = self.lay1(x) - x = self.gn1(x) - x = F.relu(x) - x = self.lay2(x) - x = self.gn2(x) - x = F.relu(x) - - cur_fpn = self.adapter1(fpns[0]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay3(x) - x = self.gn3(x) - x = F.relu(x) - - cur_fpn = self.adapter2(fpns[1]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay4(x) - x = self.gn4(x) - x = F.relu(x) - - cur_fpn = self.adapter3(fpns[2]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay5(x) - x = self.gn5(x) - x = F.relu(x) - - x = self.out_lay(x) - return x - - -class MHAttentionMap(nn.Module): - """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" - - def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): - super().__init__() - self.num_heads = num_heads - self.hidden_dim = hidden_dim - self.dropout = nn.Dropout(dropout) - - self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - - nn.init.zeros_(self.k_linear.bias) - nn.init.zeros_(self.q_linear.bias) - nn.init.xavier_uniform_(self.k_linear.weight) - nn.init.xavier_uniform_(self.q_linear.weight) - self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 - - def forward(self, q, k, mask: Optional[Tensor] = None): - q = self.q_linear(q) - k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) - qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) - kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) - weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) - - if mask is not None: - weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) - weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size()) - weights = self.dropout(weights) - return weights - - -def dice_loss(inputs, targets, num_boxes): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_boxes - - -def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_boxes - - -class PostProcessSegm(nn.Module): - def __init__(self, threshold=0.5): - super().__init__() - self.threshold = threshold - - @torch.no_grad() - def forward(self, results, outputs, orig_target_sizes, max_target_sizes): - assert len(orig_target_sizes) == len(max_target_sizes) - max_h, max_w = max_target_sizes.max(0)[0].tolist() - outputs_masks = outputs["pred_masks"].squeeze(2) - outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) - outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() - - for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): - img_h, img_w = t[0], t[1] - results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) - results[i]["masks"] = F.interpolate( - results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" - ).byte() - - return results - - -class PostProcessPanoptic(nn.Module): - """This class converts the output of the model to the final panoptic result, in the format expected by the - coco panoptic API """ - - def __init__(self, is_thing_map, threshold=0.85): - """ - Parameters: - is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether - the class is a thing (True) or a stuff (False) class - threshold: confidence threshold: segments with confidence lower than this will be deleted - """ - super().__init__() - self.threshold = threshold - self.is_thing_map = is_thing_map - - def forward(self, outputs, processed_sizes, target_sizes=None): - """ This function computes the panoptic prediction from the model's predictions. - Parameters: - outputs: This is a dict coming directly from the model. See the model doc for the content. - processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the - model, ie the size after data augmentation but before batching. - target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size - of each prediction. If left to None, it will default to the processed_sizes - """ - if target_sizes is None: - target_sizes = processed_sizes - assert len(processed_sizes) == len(target_sizes) - out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] - assert len(out_logits) == len(raw_masks) == len(target_sizes) - preds = [] - - def to_tuple(tup): - if isinstance(tup, tuple): - return tup - return tuple(tup.cpu().tolist()) - - for cur_logits, cur_masks, cur_boxes, size, target_size in zip( - out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes - ): - # we filter empty queries and detection below threshold - scores, labels = cur_logits.softmax(-1).max(-1) - keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) - cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) - cur_scores = cur_scores[keep] - cur_classes = cur_classes[keep] - cur_masks = cur_masks[keep] - cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) - cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) - - h, w = cur_masks.shape[-2:] - assert len(cur_boxes) == len(cur_classes) - - # It may be that we have several predicted masks for the same stuff class. - # In the following, we track the list of masks ids for each stuff class (they are merged later on) - cur_masks = cur_masks.flatten(1) - stuff_equiv_classes = defaultdict(lambda: []) - for k, label in enumerate(cur_classes): - if not self.is_thing_map[label.item()]: - stuff_equiv_classes[label.item()].append(k) - - def get_ids_area(masks, scores, dedup=False): - # This helper function creates the final panoptic segmentation image - # It also returns the area of the masks that appears on the image - - m_id = masks.transpose(0, 1).softmax(-1) - - if m_id.shape[-1] == 0: - # We didn't detect any mask :( - m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) - else: - m_id = m_id.argmax(-1).view(h, w) - - if dedup: - # Merge the masks corresponding to the same stuff class - for equiv in stuff_equiv_classes.values(): - if len(equiv) > 1: - for eq_id in equiv: - m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) - - final_h, final_w = to_tuple(target_size) - - seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) - seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) - - np_seg_img = ( - torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() - ) - m_id = torch.from_numpy(rgb2id(np_seg_img)) - - area = [] - for i in range(len(scores)): - area.append(m_id.eq(i).sum().item()) - return area, seg_img - - area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) - if cur_classes.numel() > 0: - # We know filter empty masks as long as we find some - while True: - filtered_small = torch.as_tensor( - [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device - ) - if filtered_small.any().item(): - cur_scores = cur_scores[~filtered_small] - cur_classes = cur_classes[~filtered_small] - cur_masks = cur_masks[~filtered_small] - area, seg_img = get_ids_area(cur_masks, cur_scores) - else: - break - - else: - cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) - - segments_info = [] - for i, a in enumerate(area): - cat = cur_classes[i].item() - segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) - del cur_classes - - with io.BytesIO() as out: - seg_img.save(out, format="PNG") - predictions = {"png_string": out.getvalue(), "segments_info": segments_info} - preds.append(predictions) - return preds diff --git a/third-party/DETR/requirements.txt b/third-party/DETR/requirements.txt deleted file mode 100644 index bb8f782..0000000 --- a/third-party/DETR/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -cython -git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools -submitit -torch>=1.5.0 -torchvision>=0.6.0 -git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi -scipy -onnx -onnxruntime diff --git a/third-party/DETR/run_with_submitit.py b/third-party/DETR/run_with_submitit.py deleted file mode 100644 index b6780de..0000000 --- a/third-party/DETR/run_with_submitit.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -A script to run multinode training with submitit. -""" -import argparse -import os -import uuid -from pathlib import Path - -import main as detection -import submitit - - -def parse_args(): - detection_parser = detection.get_args_parser() - parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser]) - parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") - parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request") - parser.add_argument("--timeout", default=60, type=int, help="Duration of the job") - parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") - return parser.parse_args() - - -def get_shared_folder() -> Path: - user = os.getenv("USER") - if Path("/checkpoint/").is_dir(): - p = Path(f"/checkpoint/{user}/experiments") - p.mkdir(exist_ok=True) - return p - raise RuntimeError("No shared folder available") - - -def get_init_file(): - # Init file must not exist, but it's parent dir must exist. - os.makedirs(str(get_shared_folder()), exist_ok=True) - init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" - if init_file.exists(): - os.remove(str(init_file)) - return init_file - - -class Trainer(object): - def __init__(self, args): - self.args = args - - def __call__(self): - import main as detection - - self._setup_gpu_args() - detection.main(self.args) - - def checkpoint(self): - import os - import submitit - from pathlib import Path - - self.args.dist_url = get_init_file().as_uri() - checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") - if os.path.exists(checkpoint_file): - self.args.resume = checkpoint_file - print("Requeuing ", self.args) - empty_trainer = type(self)(self.args) - return submitit.helpers.DelayedSubmission(empty_trainer) - - def _setup_gpu_args(self): - import submitit - from pathlib import Path - - job_env = submitit.JobEnvironment() - self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) - self.args.gpu = job_env.local_rank - self.args.rank = job_env.global_rank - self.args.world_size = job_env.num_tasks - print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") - - -def main(): - args = parse_args() - if args.job_dir == "": - args.job_dir = get_shared_folder() / "%j" - - # Note that the folder will depend on the job_id, to easily track experiments - executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) - - # cluster setup is defined by environment variables - num_gpus_per_node = args.ngpus - nodes = args.nodes - timeout_min = args.timeout - - executor.update_parameters( - mem_gb=40 * num_gpus_per_node, - gpus_per_node=num_gpus_per_node, - tasks_per_node=num_gpus_per_node, # one task per GPU - cpus_per_task=10, - nodes=nodes, - timeout_min=timeout_min, # max is 60 * 72 - ) - - executor.update_parameters(name="detr") - - args.dist_url = get_init_file().as_uri() - args.output_dir = args.job_dir - - trainer = Trainer(args) - job = executor.submit(trainer) - - print("Submitted job_id:", job.job_id) - - -if __name__ == "__main__": - main() diff --git a/third-party/DETR/test_all.py b/third-party/DETR/test_all.py deleted file mode 100644 index 7153892..0000000 --- a/third-party/DETR/test_all.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import io -import unittest - -import torch -from torch import nn, Tensor -from typing import List - -from models.matcher import HungarianMatcher -from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned -from models.backbone import Backbone, Joiner, BackboneBase -from util import box_ops -from util.misc import nested_tensor_from_tensor_list -from hubconf import detr_resnet50, detr_resnet50_panoptic - -# onnxruntime requires python 3.5 or above -try: - import onnxruntime -except ImportError: - onnxruntime = None - - -class Tester(unittest.TestCase): - - def test_box_cxcywh_to_xyxy(self): - t = torch.rand(10, 4) - r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t)) - self.assertLess((t - r).abs().max(), 1e-5) - - @staticmethod - def indices_torch2python(indices): - return [(i.tolist(), j.tolist()) for i, j in indices] - - def test_hungarian(self): - n_queries, n_targets, n_classes = 100, 15, 91 - logits = torch.rand(1, n_queries, n_classes + 1) - boxes = torch.rand(1, n_queries, 4) - tgt_labels = torch.randint(high=n_classes, size=(n_targets,)) - tgt_boxes = torch.rand(n_targets, 4) - matcher = HungarianMatcher() - targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}] - indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets) - indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2) - self.assertEqual(len(indices_single[0][0]), n_targets) - self.assertEqual(len(indices_single[0][1]), n_targets) - self.assertEqual(self.indices_torch2python(indices_single), - self.indices_torch2python([indices_batched[0]])) - self.assertEqual(self.indices_torch2python(indices_single), - self.indices_torch2python([indices_batched[1]])) - - # test with empty targets - tgt_labels_empty = torch.randint(high=n_classes, size=(0,)) - tgt_boxes_empty = torch.rand(0, 4) - targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}] - indices = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty) - self.assertEqual(len(indices[1][0]), 0) - indices = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2) - self.assertEqual(len(indices[0][0]), 0) - - def test_position_encoding_script(self): - m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned() - mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa - - def test_backbone_script(self): - backbone = Backbone('resnet50', True, False, False) - torch.jit.script(backbone) # noqa - - def test_model_script_detection(self): - model = detr_resnet50(pretrained=False).eval() - scripted_model = torch.jit.script(model) - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - - def test_model_script_panoptic(self): - model = detr_resnet50_panoptic(pretrained=False).eval() - scripted_model = torch.jit.script(model) - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"])) - - def test_model_detection_different_inputs(self): - model = detr_resnet50(pretrained=False).eval() - # support NestedTensor - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - self.assertIn('pred_logits', out) - # and 4d Tensor - x = torch.rand(1, 3, 200, 200) - out = model(x) - self.assertIn('pred_logits', out) - # and List[Tensor[C, H, W]] - x = torch.rand(3, 200, 200) - out = model([x]) - self.assertIn('pred_logits', out) - - def test_warpped_model_script_detection(self): - class WrappedDETR(nn.Module): - def __init__(self, model): - super().__init__() - self.model = model - - def forward(self, inputs: List[Tensor]): - sample = nested_tensor_from_tensor_list(inputs) - return self.model(sample) - - model = detr_resnet50(pretrained=False) - wrapped_model = WrappedDETR(model) - wrapped_model.eval() - scripted_model = torch.jit.script(wrapped_model) - x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)] - out = wrapped_model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - - -@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable') -class ONNXExporterTester(unittest.TestCase): - @classmethod - def setUpClass(cls): - torch.manual_seed(123) - - def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None, - output_names=None, input_names=None): - model.eval() - - onnx_io = io.BytesIO() - # export to onnx with the first input - torch.onnx.export(model, inputs_list[0], onnx_io, - do_constant_folding=do_constant_folding, opset_version=12, - dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names) - # validate the exported model with onnx runtime - for test_inputs in inputs_list: - with torch.no_grad(): - if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list): - test_inputs = (nested_tensor_from_tensor_list(test_inputs),) - test_ouputs = model(*test_inputs) - if isinstance(test_ouputs, torch.Tensor): - test_ouputs = (test_ouputs,) - self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) - - def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): - - inputs, _ = torch.jit._flatten(inputs) - outputs, _ = torch.jit._flatten(outputs) - - def to_numpy(tensor): - if tensor.requires_grad: - return tensor.detach().cpu().numpy() - else: - return tensor.cpu().numpy() - - inputs = list(map(to_numpy, inputs)) - outputs = list(map(to_numpy, outputs)) - - ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) - # compute onnxruntime output prediction - ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) - ort_outs = ort_session.run(None, ort_inputs) - for i, element in enumerate(outputs): - try: - torch.testing.assert_allclose(element, ort_outs[i], rtol=1e-03, atol=1e-05) - except AssertionError as error: - if tolerate_small_mismatch: - self.assertIn("(0.00%)", str(error), str(error)) - else: - raise - - def test_model_onnx_detection(self): - model = detr_resnet50(pretrained=False).eval() - dummy_image = torch.ones(1, 3, 800, 800) * 0.3 - model(dummy_image) - - # Test exported model on images of different size, or dummy input - self.run_model( - model, - [(torch.rand(1, 3, 750, 800),)], - input_names=["inputs"], - output_names=["pred_logits", "pred_boxes"], - tolerate_small_mismatch=True, - ) - - @unittest.skip("CI doesn't have enough memory") - def test_model_onnx_detection_panoptic(self): - model = detr_resnet50_panoptic(pretrained=False).eval() - dummy_image = torch.ones(1, 3, 800, 800) * 0.3 - model(dummy_image) - - # Test exported model on images of different size, or dummy input - self.run_model( - model, - [(torch.rand(1, 3, 750, 800),)], - input_names=["inputs"], - output_names=["pred_logits", "pred_boxes", "pred_masks"], - tolerate_small_mismatch=True, - ) - - -if __name__ == '__main__': - unittest.main() diff --git a/third-party/DETR/tox.ini b/third-party/DETR/tox.ini deleted file mode 100644 index 5554a88..0000000 --- a/third-party/DETR/tox.ini +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 120 -ignore = F401,E402,F403,W503,W504 diff --git a/third-party/DETR/util/__init__.py b/third-party/DETR/util/__init__.py deleted file mode 100644 index 168f997..0000000 --- a/third-party/DETR/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/third-party/DETR/util/box_ops.py b/third-party/DETR/util/box_ops.py deleted file mode 100644 index 9c088e5..0000000 --- a/third-party/DETR/util/box_ops.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Utilities for bounding box manipulation and GIoU. -""" -import torch -from torchvision.ops.boxes import box_area - - -def box_cxcywh_to_xyxy(x): - x_c, y_c, w, h = x.unbind(-1) - b = [(x_c - 0.5 * w), (y_c - 0.5 * h), - (x_c + 0.5 * w), (y_c + 0.5 * h)] - return torch.stack(b, dim=-1) - - -def box_xyxy_to_cxcywh(x): - x0, y0, x1, y1 = x.unbind(-1) - b = [(x0 + x1) / 2, (y0 + y1) / 2, - (x1 - x0), (y1 - y0)] - return torch.stack(b, dim=-1) - - -# modified from torchvision to also return the union -def box_iou(boxes1, boxes2): - area1 = box_area(boxes1) - area2 = box_area(boxes2) - - lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] - rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] - - wh = (rb - lt).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - union = area1[:, None] + area2 - inter - - iou = inter / union - return iou, union - - -def generalized_box_iou(boxes1, boxes2): - """ - Generalized IoU from https://giou.stanford.edu/ - - The boxes should be in [x0, y0, x1, y1] format - - Returns a [N, M] pairwise matrix, where N = len(boxes1) - and M = len(boxes2) - """ - # degenerate boxes gives inf / nan results - # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() - iou, union = box_iou(boxes1, boxes2) - - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] - - return iou - (area - union) / area - - -def masks_to_boxes(masks): - """Compute the bounding boxes around the provided masks - - The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. - - Returns a [N, 4] tensors, with the boxes in xyxy format - """ - if masks.numel() == 0: - return torch.zeros((0, 4), device=masks.device) - - h, w = masks.shape[-2:] - - y = torch.arange(0, h, dtype=torch.float) - x = torch.arange(0, w, dtype=torch.float) - y, x = torch.meshgrid(y, x) - - x_mask = (masks * x.unsqueeze(0)) - x_max = x_mask.flatten(1).max(-1)[0] - x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - y_mask = (masks * y.unsqueeze(0)) - y_max = y_mask.flatten(1).max(-1)[0] - y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/third-party/DETR/util/misc.py b/third-party/DETR/util/misc.py deleted file mode 100644 index dfa9fb5..0000000 --- a/third-party/DETR/util/misc.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import os -import subprocess -import time -from collections import defaultdict, deque -import datetime -import pickle -from packaging import version -from typing import Optional, List - -import torch -import torch.distributed as dist -from torch import Tensor - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -if version.parse(torchvision.__version__) < version.parse('0.7'): - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - if torch.cuda.is_available(): - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}', - 'max mem: {memory:.0f}' - ]) - else: - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ]) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() - sha = 'N/A' - diff = "clean" - branch = 'N/A' - try: - sha = _run(['git', 'rev-parse', 'HEAD']) - subprocess.check_output(['git', 'diff'], cwd=cwd) - diff = _run(['git', 'diff-index', 'HEAD']) - diff = "has uncommited changes" if diff else "clean" - branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], :img.shape[2]] = False - else: - raise ValueError('not supported') - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}'.format( - args.rank, args.dist_url), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if version.parse(torchvision.__version__) < version.parse('0.7'): - if input.numel() > 0: - return torch.nn.functional.interpolate( - input, size, scale_factor, mode, align_corners - ) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/third-party/DETR/util/plot_utils.py b/third-party/DETR/util/plot_utils.py deleted file mode 100644 index 0f24bed..0000000 --- a/third-party/DETR/util/plot_utils.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Plotting utilities to visualize training logs. -""" -import torch -import pandas as pd -import numpy as np -import seaborn as sns -import matplotlib.pyplot as plt - -from pathlib import Path, PurePath - - -def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): - ''' - Function to plot specific fields from training log(s). Plots both training and test results. - - :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file - - fields = which results to plot from each log file - plots both training and test for each field. - - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots - - log_name = optional, name of log file if different than default 'log.txt'. - - :: Outputs - matplotlib plots of results in fields, color coded for each log file. - - solid lines are training results, dashed lines are test results. - - ''' - func_name = "plot_utils.py::plot_logs" - - # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, - # convert single Path to list to avoid 'not iterable' error - - if not isinstance(logs, list): - if isinstance(logs, PurePath): - logs = [logs] - print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") - else: - raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ - Expect list[Path] or single Path obj, received {type(logs)}") - - # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir - for i, dir in enumerate(logs): - if not isinstance(dir, PurePath): - raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") - if not dir.exists(): - raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") - # verify log_name exists - fn = Path(dir / log_name) - if not fn.exists(): - print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") - print(f"--> full path of missing log file: {fn}") - return - - # load log file(s) and plot - dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] - - fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) - - for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): - for j, field in enumerate(fields): - if field == 'mAP': - coco_eval = pd.DataFrame( - np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] - ).ewm(com=ewm_col).mean() - axs[j].plot(coco_eval, c=color) - else: - df.interpolate().ewm(com=ewm_col).mean().plot( - y=[f'train_{field}', f'test_{field}'], - ax=axs[j], - color=[color] * 2, - style=['-', '--'] - ) - for ax, field in zip(axs, fields): - ax.legend([Path(p).name for p in logs]) - ax.set_title(field) - - -def plot_precision_recall(files, naming_scheme='iter'): - if naming_scheme == 'exp_id': - # name becomes exp_id - names = [f.parts[-3] for f in files] - elif naming_scheme == 'iter': - names = [f.stem for f in files] - else: - raise ValueError(f'not supported {naming_scheme}') - fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) - for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): - data = torch.load(f) - # precision is n_iou, n_points, n_cat, n_area, max_det - precision = data['precision'] - recall = data['params'].recThrs - scores = data['scores'] - # take precision for all classes, all areas and 100 detections - precision = precision[0, :, :, 0, -1].mean(1) - scores = scores[0, :, :, 0, -1].mean(1) - prec = precision.mean() - rec = data['recall'][0, :, 0, -1].mean() - print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + - f'score={scores.mean():0.3f}, ' + - f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' - ) - axs[0].plot(recall, precision, c=color) - axs[1].plot(recall, scores, c=color) - - axs[0].set_title('Precision / Recall') - axs[0].legend(names) - axs[1].set_title('Scores / Recall') - axs[1].legend(names) - return fig, axs