|
| 1 | +from typing import Optional, Union |
| 2 | + |
| 3 | +from segmentation_models_pytorch.encoders import get_encoder |
| 4 | +from segmentation_models_pytorch.base import ( |
| 5 | + SegmentationModel, |
| 6 | + SegmentationHead, |
| 7 | + ClassificationHead, |
| 8 | +) |
| 9 | +from .decoder import SegformerDecoder |
| 10 | + |
| 11 | + |
| 12 | +class Segformer(SegmentationModel): |
| 13 | + """Segformer is simple and efficient design for semantic segmentation with Transformers |
| 14 | +
|
| 15 | + Args: |
| 16 | + encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone) |
| 17 | + to extract features of different spatial resolution |
| 18 | + encoder_depth: A number of stages used in encoder in range [3, 5]. Each stage generate features |
| 19 | + two times smaller in spatial dimensions than previous one (e.g. for depth 0 we will have features |
| 20 | + with shapes [(N, C, H, W),], for depth 1 - [(N, C, H, W), (N, C, H // 2, W // 2)] and so on). |
| 21 | + Default is 5 |
| 22 | + encoder_weights: One of **None** (random initialization), **"imagenet"** (pre-training on ImageNet) and |
| 23 | + other pretrained weights (see table with available weights for each encoder_name) |
| 24 | + decoder_segmentation_channels: A number of convolution filters in segmentation blocks, default is 256 |
| 25 | + in_channels: A number of input channels for the model, default is 3 (RGB images) |
| 26 | + classes: A number of classes for output mask (or you can think as a number of channels of output mask) |
| 27 | + activation: An activation function to apply after the final convolution layer. |
| 28 | + Available options are **"sigmoid"**, **"softmax"**, **"logsoftmax"**, **"tanh"**, **"identity"**, |
| 29 | + **callable** and **None**. |
| 30 | + Default is **None** |
| 31 | + aux_params: Dictionary with parameters of the auxiliary output (classification head). Auxiliary output is build |
| 32 | + on top of encoder if **aux_params** is not **None** (default). Supported params: |
| 33 | + - classes (int): A number of classes |
| 34 | + - pooling (str): One of "max", "avg". Default is "avg" |
| 35 | + - dropout (float): Dropout factor in [0, 1) |
| 36 | + - activation (str): An activation function to apply "sigmoid"/"softmax" |
| 37 | + (could be **None** to return logits) |
| 38 | +
|
| 39 | + Returns: |
| 40 | + ``torch.nn.Module``: **Segformer** |
| 41 | +
|
| 42 | + .. _Segformer: |
| 43 | + https://arxiv.org/abs/2105.15203 |
| 44 | +
|
| 45 | + """ |
| 46 | + |
| 47 | + def __init__( |
| 48 | + self, |
| 49 | + encoder_name: str = "resnet34", |
| 50 | + encoder_depth: int = 5, |
| 51 | + encoder_weights: Optional[str] = "imagenet", |
| 52 | + decoder_segmentation_channels: int = 256, |
| 53 | + in_channels: int = 3, |
| 54 | + classes: int = 1, |
| 55 | + activation: Optional[Union[str, callable]] = None, |
| 56 | + aux_params: Optional[dict] = None, |
| 57 | + ): |
| 58 | + super().__init__() |
| 59 | + |
| 60 | + self.encoder = get_encoder( |
| 61 | + encoder_name, |
| 62 | + in_channels=in_channels, |
| 63 | + depth=encoder_depth, |
| 64 | + weights=encoder_weights, |
| 65 | + ) |
| 66 | + |
| 67 | + self.decoder = SegformerDecoder( |
| 68 | + encoder_channels=self.encoder.out_channels, |
| 69 | + encoder_depth=encoder_depth, |
| 70 | + segmentation_channels=decoder_segmentation_channels, |
| 71 | + ) |
| 72 | + |
| 73 | + self.segmentation_head = SegmentationHead( |
| 74 | + in_channels=decoder_segmentation_channels, |
| 75 | + out_channels=classes, |
| 76 | + activation=activation, |
| 77 | + kernel_size=3, |
| 78 | + upsampling=4, |
| 79 | + ) |
| 80 | + |
| 81 | + if aux_params is not None: |
| 82 | + self.classification_head = ClassificationHead( |
| 83 | + in_channels=self.encoder.out_channels[-1], **aux_params |
| 84 | + ) |
| 85 | + else: |
| 86 | + self.classification_head = None |
| 87 | + |
| 88 | + self.name = "segformer-{}".format(encoder_name) |
| 89 | + self.initialize() |
0 commit comments