Spaces:
Runtime error
Runtime error
File size: 3,629 Bytes
2366e36 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmcv.runner import BaseModule
from torch import nn
from mmocr.models.builder import NECKS
class UpBlock(BaseModule):
"""Upsample block for DRRG and TextSnake."""
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, int)
assert isinstance(out_channels, int)
self.conv1x1 = nn.Conv2d(
in_channels, in_channels, kernel_size=1, stride=1, padding=0)
self.conv3x3 = nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.deconv = nn.ConvTranspose2d(
out_channels, out_channels, kernel_size=4, stride=2, padding=1)
def forward(self, x):
x = F.relu(self.conv1x1(x))
x = F.relu(self.conv3x3(x))
x = self.deconv(x)
return x
@NECKS.register_module()
class FPN_UNet(BaseModule):
"""The class for implementing DRRG and TextSnake U-Net-like FPN.
DRRG: `Deep Relational Reasoning Graph Network for Arbitrary Shape
Text Detection <https://arxiv.org/abs/2003.07493>`_.
TextSnake: `A Flexible Representation for Detecting Text of Arbitrary
Shapes <https://arxiv.org/abs/1807.01544>`_.
Args:
in_channels (list[int]): Number of input channels at each scale. The
length of the list should be 4.
out_channels (int): The number of output channels.
init_cfg (dict or list[dict], optional): Initialization configs.
"""
def __init__(self,
in_channels,
out_channels,
init_cfg=dict(
type='Xavier',
layer=['Conv2d', 'ConvTranspose2d'],
distribution='uniform')):
super().__init__(init_cfg=init_cfg)
assert len(in_channels) == 4
assert isinstance(out_channels, int)
blocks_out_channels = [out_channels] + [
min(out_channels * 2**i, 256) for i in range(4)
]
blocks_in_channels = [blocks_out_channels[1]] + [
in_channels[i] + blocks_out_channels[i + 2] for i in range(3)
] + [in_channels[3]]
self.up4 = nn.ConvTranspose2d(
blocks_in_channels[4],
blocks_out_channels[4],
kernel_size=4,
stride=2,
padding=1)
self.up_block3 = UpBlock(blocks_in_channels[3], blocks_out_channels[3])
self.up_block2 = UpBlock(blocks_in_channels[2], blocks_out_channels[2])
self.up_block1 = UpBlock(blocks_in_channels[1], blocks_out_channels[1])
self.up_block0 = UpBlock(blocks_in_channels[0], blocks_out_channels[0])
def forward(self, x):
"""
Args:
x (list[Tensor] | tuple[Tensor]): A list of four tensors of shape
:math:`(N, C_i, H_i, W_i)`, representing C2, C3, C4, C5
features respectively. :math:`C_i` should matches the number in
``in_channels``.
Returns:
Tensor: Shape :math:`(N, C, H, W)` where :math:`H=4H_0` and
:math:`W=4W_0`.
"""
c2, c3, c4, c5 = x
x = F.relu(self.up4(c5))
x = torch.cat([x, c4], dim=1)
x = F.relu(self.up_block3(x))
x = torch.cat([x, c3], dim=1)
x = F.relu(self.up_block2(x))
x = torch.cat([x, c2], dim=1)
x = F.relu(self.up_block1(x))
x = self.up_block0(x)
# the output should be of the same height and width as backbone input
return x
|