# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Block modules.""" import torch import torch.nn as nn import torch.nn.functional as F import math from ultralytics.utils.torch_utils import fuse_conv_and_bn from .conv import Conv, DSConv, DWConv, GhostConv, LightConv, RepConv, autopad from .transformer import TransformerBlock __all__ = ( "DFL", "HGBlock", "HGStem", "SPP", "SPPF", "C1", "C2", "C3", "C2f", "C2fAttn", "ImagePoolingAttn", "ContrastiveHead", "BNContrastiveHead", "C3x", "C3TR", "C3Ghost", "GhostBottleneck", "Bottleneck", "BottleneckCSP", "Proto", "RepC3", "ResNetLayer", "RepNCSPELAN4", "ELAN1", "ADown", "AConv", "SPPELAN", "CBFuse", "CBLinear", "C3k2", "C2fPSA", "C2PSA", "RepVGGDW", "CIB", "C2fCIB", "Attention", "PSA", "SCDown", "TorchVision", "HyperACE", "DownsampleConv", "FullPAD_Tunnel", "DSC3k2" ) class DFL(nn.Module): """ Integral module of Distribution Focal Loss (DFL). Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 """ def __init__(self, c1=16): """Initialize a convolutional layer with a given number of input channels.""" super().__init__() self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False) x = torch.arange(c1, dtype=torch.float) self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1)) self.c1 = c1 def forward(self, x): """Applies a transformer layer on input tensor 'x' and returns a tensor.""" b, _, a = x.shape # batch, channels, anchors return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a) # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a) class Proto(nn.Module): """YOLOv8 mask Proto module for segmentation models.""" def __init__(self, c1, c_=256, c2=32): """ Initializes the YOLOv8 mask Proto module with specified number of protos and masks. Input arguments are ch_in, number of protos, number of masks. """ super().__init__() self.cv1 = Conv(c1, c_, k=3) self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest') self.cv2 = Conv(c_, c_, k=3) self.cv3 = Conv(c_, c2) def forward(self, x): """Performs a forward pass through layers using an upsampled input image.""" return self.cv3(self.cv2(self.upsample(self.cv1(x)))) class HGStem(nn.Module): """ StemBlock of PPHGNetV2 with 5 convolutions and one maxpool2d. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py """ def __init__(self, c1, cm, c2): """Initialize the SPP layer with input/output channels and specified kernel sizes for max pooling.""" super().__init__() self.stem1 = Conv(c1, cm, 3, 2, act=nn.ReLU()) self.stem2a = Conv(cm, cm // 2, 2, 1, 0, act=nn.ReLU()) self.stem2b = Conv(cm // 2, cm, 2, 1, 0, act=nn.ReLU()) self.stem3 = Conv(cm * 2, cm, 3, 2, act=nn.ReLU()) self.stem4 = Conv(cm, c2, 1, 1, act=nn.ReLU()) self.pool = nn.MaxPool2d(kernel_size=2, stride=1, padding=0, ceil_mode=True) def forward(self, x): """Forward pass of a PPHGNetV2 backbone layer.""" x = self.stem1(x) x = F.pad(x, [0, 1, 0, 1]) x2 = self.stem2a(x) x2 = F.pad(x2, [0, 1, 0, 1]) x2 = self.stem2b(x2) x1 = self.pool(x) x = torch.cat([x1, x2], dim=1) x = self.stem3(x) x = self.stem4(x) return x class HGBlock(nn.Module): """ HG_Block of PPHGNetV2 with 2 convolutions and LightConv. https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/backbones/hgnet_v2.py """ def __init__(self, c1, cm, c2, k=3, n=6, lightconv=False, shortcut=False, act=nn.ReLU()): """Initializes a CSP Bottleneck with 1 convolution using specified input and output channels.""" super().__init__() block = LightConv if lightconv else Conv self.m = nn.ModuleList(block(c1 if i == 0 else cm, cm, k=k, act=act) for i in range(n)) self.sc = Conv(c1 + n * cm, c2 // 2, 1, 1, act=act) # squeeze conv self.ec = Conv(c2 // 2, c2, 1, 1, act=act) # excitation conv self.add = shortcut and c1 == c2 def forward(self, x): """Forward pass of a PPHGNetV2 backbone layer.""" y = [x] y.extend(m(y[-1]) for m in self.m) y = self.ec(self.sc(torch.cat(y, 1))) return y + x if self.add else y class SPP(nn.Module): """Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729.""" def __init__(self, c1, c2, k=(5, 9, 13)): """Initialize the SPP layer with input/output channels and pooling kernel sizes.""" super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) def forward(self, x): """Forward pass of the SPP layer, performing spatial pyramid pooling.""" x = self.cv1(x) return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class SPPF(nn.Module): """Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher.""" def __init__(self, c1, c2, k=5): """ Initializes the SPPF layer with given input/output channels and kernel size. This module is equivalent to SPP(k=(5, 9, 13)). """ super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * 4, c2, 1, 1) self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) def forward(self, x): """Forward pass through Ghost Convolution block.""" y = [self.cv1(x)] y.extend(self.m(y[-1]) for _ in range(3)) return self.cv2(torch.cat(y, 1)) class C1(nn.Module): """CSP Bottleneck with 1 convolution.""" def __init__(self, c1, c2, n=1): """Initializes the CSP Bottleneck with configurations for 1 convolution with arguments ch_in, ch_out, number.""" super().__init__() self.cv1 = Conv(c1, c2, 1, 1) self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n))) def forward(self, x): """Applies cross-convolutions to input in the C3 module.""" y = self.cv1(x) return self.m(y) + y class C2(nn.Module): """CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes a CSP Bottleneck with 2 convolutions and optional shortcut connection.""" super().__init__() self.c = int(c2 * e) # hidden channels self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2) # self.attention = ChannelAttention(2 * self.c) # or SpatialAttention() self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))) def forward(self, x): """Forward pass through the CSP bottleneck with 2 convolutions.""" a, b = self.cv1(x).chunk(2, 1) return self.cv2(torch.cat((self.m(a), b), 1)) class C2f(nn.Module): """Faster Implementation of CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): """Initializes a CSP bottleneck with 2 convolutions and n Bottleneck blocks for faster processing.""" super().__init__() self.c = int(c2 * e) # hidden channels self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2) self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) def forward(self, x): """Forward pass through C2f layer.""" y = list(self.cv1(x).chunk(2, 1)) y.extend(m(y[-1]) for m in self.m) return self.cv2(torch.cat(y, 1)) def forward_split(self, x): """Forward pass using split() instead of chunk().""" y = self.cv1(x).split((self.c, self.c), 1) y = [y[0], y[1]] y.extend(m(y[-1]) for m in self.m) return self.cv2(torch.cat(y, 1)) class C3(nn.Module): """CSP Bottleneck with 3 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n))) def forward(self, x): """Forward pass through the CSP bottleneck with 2 convolutions.""" return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3x(C3): """C3 module with cross-convolutions.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize C3TR instance and set default parameters.""" super().__init__(c1, c2, n, shortcut, g, e) self.c_ = int(c2 * e) self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n))) class RepC3(nn.Module): """Rep C3.""" def __init__(self, c1, c2, n=3, e=1.0): """Initialize CSP Bottleneck with a single convolution using input channels, output channels, and number.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.m = nn.Sequential(*[RepConv(c_, c_) for _ in range(n)]) self.cv3 = Conv(c_, c2, 1, 1) if c_ != c2 else nn.Identity() def forward(self, x): """Forward pass of RT-DETR neck layer.""" return self.cv3(self.m(self.cv1(x)) + self.cv2(x)) class C3TR(C3): """C3 module with TransformerBlock().""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize C3Ghost module with GhostBottleneck().""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = TransformerBlock(c_, c_, 4, n) class C3Ghost(C3): """C3 module with GhostBottleneck().""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initialize 'SPP' module with various pooling sizes for spatial pyramid pooling.""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) class GhostBottleneck(nn.Module): """Ghost Bottleneck https://github.com/huawei-noah/ghostnet.""" def __init__(self, c1, c2, k=3, s=1): """Initializes GhostBottleneck module with arguments ch_in, ch_out, kernel, stride.""" super().__init__() c_ = c2 // 2 self.conv = nn.Sequential( GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw GhostConv(c_, c2, 1, 1, act=False), # pw-linear ) self.shortcut = ( nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() ) def forward(self, x): """Applies skip connection and concatenation to input tensor.""" return self.conv(x) + self.shortcut(x) class Bottleneck(nn.Module): """Standard bottleneck.""" def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): """Initializes a standard bottleneck module with optional shortcut connection and configurable parameters.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, k[0], 1) self.cv2 = Conv(c_, c2, k[1], 1, g=g) self.add = shortcut and c1 == c2 def forward(self, x): """Applies the YOLO FPN to input data.""" return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) class BottleneckCSP(nn.Module): """CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes the CSP Bottleneck given arguments for ch_in, ch_out, number, shortcut, groups, expansion.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): """Applies a CSP bottleneck with 3 convolutions.""" y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class ResNetBlock(nn.Module): """ResNet block with standard convolution layers.""" def __init__(self, c1, c2, s=1, e=4): """Initialize convolution with given parameters.""" super().__init__() c3 = e * c2 self.cv1 = Conv(c1, c2, k=1, s=1, act=True) self.cv2 = Conv(c2, c2, k=3, s=s, p=1, act=True) self.cv3 = Conv(c2, c3, k=1, act=False) self.shortcut = nn.Sequential(Conv(c1, c3, k=1, s=s, act=False)) if s != 1 or c1 != c3 else nn.Identity() def forward(self, x): """Forward pass through the ResNet block.""" return F.relu(self.cv3(self.cv2(self.cv1(x))) + self.shortcut(x)) class ResNetLayer(nn.Module): """ResNet layer with multiple ResNet blocks.""" def __init__(self, c1, c2, s=1, is_first=False, n=1, e=4): """Initializes the ResNetLayer given arguments.""" super().__init__() self.is_first = is_first if self.is_first: self.layer = nn.Sequential( Conv(c1, c2, k=7, s=2, p=3, act=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ) else: blocks = [ResNetBlock(c1, c2, s, e=e)] blocks.extend([ResNetBlock(e * c2, c2, 1, e=e) for _ in range(n - 1)]) self.layer = nn.Sequential(*blocks) def forward(self, x): """Forward pass through the ResNet layer.""" return self.layer(x) class MaxSigmoidAttnBlock(nn.Module): """Max Sigmoid attention block.""" def __init__(self, c1, c2, nh=1, ec=128, gc=512, scale=False): """Initializes MaxSigmoidAttnBlock with specified arguments.""" super().__init__() self.nh = nh self.hc = c2 // nh self.ec = Conv(c1, ec, k=1, act=False) if c1 != ec else None self.gl = nn.Linear(gc, ec) self.bias = nn.Parameter(torch.zeros(nh)) self.proj_conv = Conv(c1, c2, k=3, s=1, act=False) self.scale = nn.Parameter(torch.ones(1, nh, 1, 1)) if scale else 1.0 def forward(self, x, guide): """Forward process.""" bs, _, h, w = x.shape guide = self.gl(guide) guide = guide.view(bs, -1, self.nh, self.hc) embed = self.ec(x) if self.ec is not None else x embed = embed.view(bs, self.nh, self.hc, h, w) aw = torch.einsum("bmchw,bnmc->bmhwn", embed, guide) aw = aw.max(dim=-1)[0] aw = aw / (self.hc**0.5) aw = aw + self.bias[None, :, None, None] aw = aw.sigmoid() * self.scale x = self.proj_conv(x) x = x.view(bs, self.nh, -1, h, w) x = x * aw.unsqueeze(2) return x.view(bs, -1, h, w) class C2fAttn(nn.Module): """C2f module with an additional attn module.""" def __init__(self, c1, c2, n=1, ec=128, nh=1, gc=512, shortcut=False, g=1, e=0.5): """Initializes C2f module with attention mechanism for enhanced feature extraction and processing.""" super().__init__() self.c = int(c2 * e) # hidden channels self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv((3 + n) * self.c, c2, 1) # optional act=FReLU(c2) self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) self.attn = MaxSigmoidAttnBlock(self.c, self.c, gc=gc, ec=ec, nh=nh) def forward(self, x, guide): """Forward pass through C2f layer.""" y = list(self.cv1(x).chunk(2, 1)) y.extend(m(y[-1]) for m in self.m) y.append(self.attn(y[-1], guide)) return self.cv2(torch.cat(y, 1)) def forward_split(self, x, guide): """Forward pass using split() instead of chunk().""" y = list(self.cv1(x).split((self.c, self.c), 1)) y.extend(m(y[-1]) for m in self.m) y.append(self.attn(y[-1], guide)) return self.cv2(torch.cat(y, 1)) class ImagePoolingAttn(nn.Module): """ImagePoolingAttn: Enhance the text embeddings with image-aware information.""" def __init__(self, ec=256, ch=(), ct=512, nh=8, k=3, scale=False): """Initializes ImagePoolingAttn with specified arguments.""" super().__init__() nf = len(ch) self.query = nn.Sequential(nn.LayerNorm(ct), nn.Linear(ct, ec)) self.key = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec)) self.value = nn.Sequential(nn.LayerNorm(ec), nn.Linear(ec, ec)) self.proj = nn.Linear(ec, ct) self.scale = nn.Parameter(torch.tensor([0.0]), requires_grad=True) if scale else 1.0 self.projections = nn.ModuleList([nn.Conv2d(in_channels, ec, kernel_size=1) for in_channels in ch]) self.im_pools = nn.ModuleList([nn.AdaptiveMaxPool2d((k, k)) for _ in range(nf)]) self.ec = ec self.nh = nh self.nf = nf self.hc = ec // nh self.k = k def forward(self, x, text): """Executes attention mechanism on input tensor x and guide tensor.""" bs = x[0].shape[0] assert len(x) == self.nf num_patches = self.k**2 x = [pool(proj(x)).view(bs, -1, num_patches) for (x, proj, pool) in zip(x, self.projections, self.im_pools)] x = torch.cat(x, dim=-1).transpose(1, 2) q = self.query(text) k = self.key(x) v = self.value(x) # q = q.reshape(1, text.shape[1], self.nh, self.hc).repeat(bs, 1, 1, 1) q = q.reshape(bs, -1, self.nh, self.hc) k = k.reshape(bs, -1, self.nh, self.hc) v = v.reshape(bs, -1, self.nh, self.hc) aw = torch.einsum("bnmc,bkmc->bmnk", q, k) aw = aw / (self.hc**0.5) aw = F.softmax(aw, dim=-1) x = torch.einsum("bmnk,bkmc->bnmc", aw, v) x = self.proj(x.reshape(bs, -1, self.ec)) return x * self.scale + text class ContrastiveHead(nn.Module): """Implements contrastive learning head for region-text similarity in vision-language models.""" def __init__(self): """Initializes ContrastiveHead with specified region-text similarity parameters.""" super().__init__() # NOTE: use -10.0 to keep the init cls loss consistency with other losses self.bias = nn.Parameter(torch.tensor([-10.0])) self.logit_scale = nn.Parameter(torch.ones([]) * torch.tensor(1 / 0.07).log()) def forward(self, x, w): """Forward function of contrastive learning.""" x = F.normalize(x, dim=1, p=2) w = F.normalize(w, dim=-1, p=2) x = torch.einsum("bchw,bkc->bkhw", x, w) return x * self.logit_scale.exp() + self.bias class BNContrastiveHead(nn.Module): """ Batch Norm Contrastive Head for YOLO-World using batch norm instead of l2-normalization. Args: embed_dims (int): Embed dimensions of text and image features. """ def __init__(self, embed_dims: int): """Initialize ContrastiveHead with region-text similarity parameters.""" super().__init__() self.norm = nn.BatchNorm2d(embed_dims) # NOTE: use -10.0 to keep the init cls loss consistency with other losses self.bias = nn.Parameter(torch.tensor([-10.0])) # use -1.0 is more stable self.logit_scale = nn.Parameter(-1.0 * torch.ones([])) def forward(self, x, w): """Forward function of contrastive learning.""" x = self.norm(x) w = F.normalize(w, dim=-1, p=2) x = torch.einsum("bchw,bkc->bkhw", x, w) return x * self.logit_scale.exp() + self.bias class RepBottleneck(Bottleneck): """Rep bottleneck.""" def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): """Initializes a RepBottleneck module with customizable in/out channels, shortcuts, groups and expansion.""" super().__init__(c1, c2, shortcut, g, k, e) c_ = int(c2 * e) # hidden channels self.cv1 = RepConv(c1, c_, k[0], 1) class RepCSP(C3): """Repeatable Cross Stage Partial Network (RepCSP) module for efficient feature extraction.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes RepCSP layer with given channels, repetitions, shortcut, groups and expansion ratio.""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) class RepNCSPELAN4(nn.Module): """CSP-ELAN.""" def __init__(self, c1, c2, c3, c4, n=1): """Initializes CSP-ELAN layer with specified channel sizes, repetitions, and convolutions.""" super().__init__() self.c = c3 // 2 self.cv1 = Conv(c1, c3, 1, 1) self.cv2 = nn.Sequential(RepCSP(c3 // 2, c4, n), Conv(c4, c4, 3, 1)) self.cv3 = nn.Sequential(RepCSP(c4, c4, n), Conv(c4, c4, 3, 1)) self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1) def forward(self, x): """Forward pass through RepNCSPELAN4 layer.""" y = list(self.cv1(x).chunk(2, 1)) y.extend((m(y[-1])) for m in [self.cv2, self.cv3]) return self.cv4(torch.cat(y, 1)) def forward_split(self, x): """Forward pass using split() instead of chunk().""" y = list(self.cv1(x).split((self.c, self.c), 1)) y.extend(m(y[-1]) for m in [self.cv2, self.cv3]) return self.cv4(torch.cat(y, 1)) class ELAN1(RepNCSPELAN4): """ELAN1 module with 4 convolutions.""" def __init__(self, c1, c2, c3, c4): """Initializes ELAN1 layer with specified channel sizes.""" super().__init__(c1, c2, c3, c4) self.c = c3 // 2 self.cv1 = Conv(c1, c3, 1, 1) self.cv2 = Conv(c3 // 2, c4, 3, 1) self.cv3 = Conv(c4, c4, 3, 1) self.cv4 = Conv(c3 + (2 * c4), c2, 1, 1) class AConv(nn.Module): """AConv.""" def __init__(self, c1, c2): """Initializes AConv module with convolution layers.""" super().__init__() self.cv1 = Conv(c1, c2, 3, 2, 1) def forward(self, x): """Forward pass through AConv layer.""" x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True) return self.cv1(x) class ADown(nn.Module): """ADown.""" def __init__(self, c1, c2): """Initializes ADown module with convolution layers to downsample input from channels c1 to c2.""" super().__init__() self.c = c2 // 2 self.cv1 = Conv(c1 // 2, self.c, 3, 2, 1) self.cv2 = Conv(c1 // 2, self.c, 1, 1, 0) def forward(self, x): """Forward pass through ADown layer.""" x = torch.nn.functional.avg_pool2d(x, 2, 1, 0, False, True) x1, x2 = x.chunk(2, 1) x1 = self.cv1(x1) x2 = torch.nn.functional.max_pool2d(x2, 3, 2, 1) x2 = self.cv2(x2) return torch.cat((x1, x2), 1) class SPPELAN(nn.Module): """SPP-ELAN.""" def __init__(self, c1, c2, c3, k=5): """Initializes SPP-ELAN block with convolution and max pooling layers for spatial pyramid pooling.""" super().__init__() self.c = c3 self.cv1 = Conv(c1, c3, 1, 1) self.cv2 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) self.cv3 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) self.cv4 = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) self.cv5 = Conv(4 * c3, c2, 1, 1) def forward(self, x): """Forward pass through SPPELAN layer.""" y = [self.cv1(x)] y.extend(m(y[-1]) for m in [self.cv2, self.cv3, self.cv4]) return self.cv5(torch.cat(y, 1)) class CBLinear(nn.Module): """CBLinear.""" def __init__(self, c1, c2s, k=1, s=1, p=None, g=1): """Initializes the CBLinear module, passing inputs unchanged.""" super().__init__() self.c2s = c2s self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True) def forward(self, x): """Forward pass through CBLinear layer.""" return self.conv(x).split(self.c2s, dim=1) class CBFuse(nn.Module): """CBFuse.""" def __init__(self, idx): """Initializes CBFuse module with layer index for selective feature fusion.""" super().__init__() self.idx = idx def forward(self, xs): """Forward pass through CBFuse layer.""" target_size = xs[-1].shape[2:] res = [F.interpolate(x[self.idx[i]], size=target_size, mode="nearest") for i, x in enumerate(xs[:-1])] return torch.sum(torch.stack(res + xs[-1:]), dim=0) class C3f(nn.Module): """Faster Implementation of CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups, expansion. """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv((2 + n) * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.ModuleList(Bottleneck(c_, c_, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) def forward(self, x): """Forward pass through C2f layer.""" y = [self.cv2(x), self.cv1(x)] y.extend(m(y[-1]) for m in self.m) return self.cv3(torch.cat(y, 1)) class C3k2(C2f): """Faster Implementation of CSP Bottleneck with 2 convolutions.""" def __init__(self, c1, c2, n=1, c3k=False, e=0.5, g=1, shortcut=True): """Initializes the C3k2 module, a faster CSP Bottleneck with 2 convolutions and optional C3k blocks.""" super().__init__(c1, c2, n, shortcut, g, e) self.m = nn.ModuleList( C3k(self.c, self.c, 2, shortcut, g) if c3k else Bottleneck(self.c, self.c, shortcut, g) for _ in range(n) ) class C3k(C3): """C3k is a CSP bottleneck module with customizable kernel sizes for feature extraction in neural networks.""" def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, k=3): """Initializes the C3k module with specified channels, number of layers, and configurations.""" super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels # self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n))) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n))) class RepVGGDW(torch.nn.Module): """RepVGGDW is a class that represents a depth wise separable convolutional block in RepVGG architecture.""" def __init__(self, ed) -> None: """Initializes RepVGGDW with depthwise separable convolutional layers for efficient processing.""" super().__init__() self.conv = Conv(ed, ed, 7, 1, 3, g=ed, act=False) self.conv1 = Conv(ed, ed, 3, 1, 1, g=ed, act=False) self.dim = ed self.act = nn.SiLU() def forward(self, x): """ Performs a forward pass of the RepVGGDW block. Args: x (torch.Tensor): Input tensor. Returns: (torch.Tensor): Output tensor after applying the depth wise separable convolution. """ return self.act(self.conv(x) + self.conv1(x)) def forward_fuse(self, x): """ Performs a forward pass of the RepVGGDW block without fusing the convolutions. Args: x (torch.Tensor): Input tensor. Returns: (torch.Tensor): Output tensor after applying the depth wise separable convolution. """ return self.act(self.conv(x)) @torch.no_grad() def fuse(self): """ Fuses the convolutional layers in the RepVGGDW block. This method fuses the convolutional layers and updates the weights and biases accordingly. """ conv = fuse_conv_and_bn(self.conv.conv, self.conv.bn) conv1 = fuse_conv_and_bn(self.conv1.conv, self.conv1.bn) conv_w = conv.weight conv_b = conv.bias conv1_w = conv1.weight conv1_b = conv1.bias conv1_w = torch.nn.functional.pad(conv1_w, [2, 2, 2, 2]) final_conv_w = conv_w + conv1_w final_conv_b = conv_b + conv1_b conv.weight.data.copy_(final_conv_w) conv.bias.data.copy_(final_conv_b) self.conv = conv del self.conv1 class CIB(nn.Module): """ Conditional Identity Block (CIB) module. Args: c1 (int): Number of input channels. c2 (int): Number of output channels. shortcut (bool, optional): Whether to add a shortcut connection. Defaults to True. e (float, optional): Scaling factor for the hidden channels. Defaults to 0.5. lk (bool, optional): Whether to use RepVGGDW for the third convolutional layer. Defaults to False. """ def __init__(self, c1, c2, shortcut=True, e=0.5, lk=False): """Initializes the custom model with optional shortcut, scaling factor, and RepVGGDW layer.""" super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = nn.Sequential( Conv(c1, c1, 3, g=c1), Conv(c1, 2 * c_, 1), RepVGGDW(2 * c_) if lk else Conv(2 * c_, 2 * c_, 3, g=2 * c_), Conv(2 * c_, c2, 1), Conv(c2, c2, 3, g=c2), ) self.add = shortcut and c1 == c2 def forward(self, x): """ Forward pass of the CIB module. Args: x (torch.Tensor): Input tensor. Returns: (torch.Tensor): Output tensor. """ return x + self.cv1(x) if self.add else self.cv1(x) class C2fCIB(C2f): """ C2fCIB class represents a convolutional block with C2f and CIB modules. Args: c1 (int): Number of input channels. c2 (int): Number of output channels. n (int, optional): Number of CIB modules to stack. Defaults to 1. shortcut (bool, optional): Whether to use shortcut connection. Defaults to False. lk (bool, optional): Whether to use local key connection. Defaults to False. g (int, optional): Number of groups for grouped convolution. Defaults to 1. e (float, optional): Expansion ratio for CIB modules. Defaults to 0.5. """ def __init__(self, c1, c2, n=1, shortcut=False, lk=False, g=1, e=0.5): """Initializes the module with specified parameters for channel, shortcut, local key, groups, and expansion.""" super().__init__(c1, c2, n, shortcut, g, e) self.m = nn.ModuleList(CIB(self.c, self.c, shortcut, e=1.0, lk=lk) for _ in range(n)) class Attention(nn.Module): """ Attention module that performs self-attention on the input tensor. Args: dim (int): The input tensor dimension. num_heads (int): The number of attention heads. attn_ratio (float): The ratio of the attention key dimension to the head dimension. Attributes: num_heads (int): The number of attention heads. head_dim (int): The dimension of each attention head. key_dim (int): The dimension of the attention key. scale (float): The scaling factor for the attention scores. qkv (Conv): Convolutional layer for computing the query, key, and value. proj (Conv): Convolutional layer for projecting the attended values. pe (Conv): Convolutional layer for positional encoding. """ def __init__(self, dim, num_heads=8, attn_ratio=0.5): """Initializes multi-head attention module with query, key, and value convolutions and positional encoding.""" super().__init__() self.num_heads = num_heads self.head_dim = dim // num_heads self.key_dim = int(self.head_dim * attn_ratio) self.scale = self.key_dim**-0.5 nh_kd = self.key_dim * num_heads h = dim + nh_kd * 2 self.qkv = Conv(dim, h, 1, act=False) self.proj = Conv(dim, dim, 1, act=False) self.pe = Conv(dim, dim, 3, 1, g=dim, act=False) def forward(self, x): """ Forward pass of the Attention module. Args: x (torch.Tensor): The input tensor. Returns: (torch.Tensor): The output tensor after self-attention. """ B, C, H, W = x.shape N = H * W qkv = self.qkv(x) q, k, v = qkv.view(B, self.num_heads, self.key_dim * 2 + self.head_dim, N).split( [self.key_dim, self.key_dim, self.head_dim], dim=2 ) attn = (q.transpose(-2, -1) @ k) * self.scale attn = attn.softmax(dim=-1) x = (v @ attn.transpose(-2, -1)).view(B, C, H, W) + self.pe(v.reshape(B, C, H, W)) x = self.proj(x) return x class PSABlock(nn.Module): """ PSABlock class implementing a Position-Sensitive Attention block for neural networks. This class encapsulates the functionality for applying multi-head attention and feed-forward neural network layers with optional shortcut connections. Attributes: attn (Attention): Multi-head attention module. ffn (nn.Sequential): Feed-forward neural network module. add (bool): Flag indicating whether to add shortcut connections. Methods: forward: Performs a forward pass through the PSABlock, applying attention and feed-forward layers. Examples: Create a PSABlock and perform a forward pass >>> psablock = PSABlock(c=128, attn_ratio=0.5, num_heads=4, shortcut=True) >>> input_tensor = torch.randn(1, 128, 32, 32) >>> output_tensor = psablock(input_tensor) """ def __init__(self, c, attn_ratio=0.5, num_heads=4, shortcut=True) -> None: """Initializes the PSABlock with attention and feed-forward layers for enhanced feature extraction.""" super().__init__() self.attn = Attention(c, attn_ratio=attn_ratio, num_heads=num_heads) self.ffn = nn.Sequential(Conv(c, c * 2, 1), Conv(c * 2, c, 1, act=False)) self.add = shortcut def forward(self, x): """Executes a forward pass through PSABlock, applying attention and feed-forward layers to the input tensor.""" x = x + self.attn(x) if self.add else self.attn(x) x = x + self.ffn(x) if self.add else self.ffn(x) return x class PSA(nn.Module): """ PSA class for implementing Position-Sensitive Attention in neural networks. This class encapsulates the functionality for applying position-sensitive attention and feed-forward networks to input tensors, enhancing feature extraction and processing capabilities. Attributes: c (int): Number of hidden channels after applying the initial convolution. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. attn (Attention): Attention module for position-sensitive attention. ffn (nn.Sequential): Feed-forward network for further processing. Methods: forward: Applies position-sensitive attention and feed-forward network to the input tensor. Examples: Create a PSA module and apply it to an input tensor >>> psa = PSA(c1=128, c2=128, e=0.5) >>> input_tensor = torch.randn(1, 128, 64, 64) >>> output_tensor = psa.forward(input_tensor) """ def __init__(self, c1, c2, e=0.5): """Initializes the PSA module with input/output channels and attention mechanism for feature extraction.""" super().__init__() assert c1 == c2 self.c = int(c1 * e) self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv(2 * self.c, c1, 1) self.attn = Attention(self.c, attn_ratio=0.5, num_heads=self.c // 64) self.ffn = nn.Sequential(Conv(self.c, self.c * 2, 1), Conv(self.c * 2, self.c, 1, act=False)) def forward(self, x): """Executes forward pass in PSA module, applying attention and feed-forward layers to the input tensor.""" a, b = self.cv1(x).split((self.c, self.c), dim=1) b = b + self.attn(b) b = b + self.ffn(b) return self.cv2(torch.cat((a, b), 1)) class C2PSA(nn.Module): """ C2PSA module with attention mechanism for enhanced feature extraction and processing. This module implements a convolutional block with attention mechanisms to enhance feature extraction and processing capabilities. It includes a series of PSABlock modules for self-attention and feed-forward operations. Attributes: c (int): Number of hidden channels. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. m (nn.Sequential): Sequential container of PSABlock modules for attention and feed-forward operations. Methods: forward: Performs a forward pass through the C2PSA module, applying attention and feed-forward operations. Notes: This module essentially is the same as PSA module, but refactored to allow stacking more PSABlock modules. Examples: >>> c2psa = C2PSA(c1=256, c2=256, n=3, e=0.5) >>> input_tensor = torch.randn(1, 256, 64, 64) >>> output_tensor = c2psa(input_tensor) """ def __init__(self, c1, c2, n=1, e=0.5): """Initializes the C2PSA module with specified input/output channels, number of layers, and expansion ratio.""" super().__init__() assert c1 == c2 self.c = int(c1 * e) self.cv1 = Conv(c1, 2 * self.c, 1, 1) self.cv2 = Conv(2 * self.c, c1, 1) self.m = nn.Sequential(*(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n))) def forward(self, x): """Processes the input tensor 'x' through a series of PSA blocks and returns the transformed tensor.""" a, b = self.cv1(x).split((self.c, self.c), dim=1) b = self.m(b) return self.cv2(torch.cat((a, b), 1)) class C2fPSA(C2f): """ C2fPSA module with enhanced feature extraction using PSA blocks. This class extends the C2f module by incorporating PSA blocks for improved attention mechanisms and feature extraction. Attributes: c (int): Number of hidden channels. cv1 (Conv): 1x1 convolution layer to reduce the number of input channels to 2*c. cv2 (Conv): 1x1 convolution layer to reduce the number of output channels to c. m (nn.ModuleList): List of PSA blocks for feature extraction. Methods: forward: Performs a forward pass through the C2fPSA module. forward_split: Performs a forward pass using split() instead of chunk(). Examples: >>> import torch >>> from ultralytics.models.common import C2fPSA >>> model = C2fPSA(c1=64, c2=64, n=3, e=0.5) >>> x = torch.randn(1, 64, 128, 128) >>> output = model(x) >>> print(output.shape) """ def __init__(self, c1, c2, n=1, e=0.5): """Initializes the C2fPSA module, a variant of C2f with PSA blocks for enhanced feature extraction.""" assert c1 == c2 super().__init__(c1, c2, n=n, e=e) self.m = nn.ModuleList(PSABlock(self.c, attn_ratio=0.5, num_heads=self.c // 64) for _ in range(n)) class SCDown(nn.Module): """ SCDown module for downsampling with separable convolutions. This module performs downsampling using a combination of pointwise and depthwise convolutions, which helps in efficiently reducing the spatial dimensions of the input tensor while maintaining the channel information. Attributes: cv1 (Conv): Pointwise convolution layer that reduces the number of channels. cv2 (Conv): Depthwise convolution layer that performs spatial downsampling. Methods: forward: Applies the SCDown module to the input tensor. Examples: >>> import torch >>> from ultralytics import SCDown >>> model = SCDown(c1=64, c2=128, k=3, s=2) >>> x = torch.randn(1, 64, 128, 128) >>> y = model(x) >>> print(y.shape) torch.Size([1, 128, 64, 64]) """ def __init__(self, c1, c2, k, s): """Initializes the SCDown module with specified input/output channels, kernel size, and stride.""" super().__init__() self.cv1 = Conv(c1, c2, 1, 1) self.cv2 = Conv(c2, c2, k=k, s=s, g=c2, act=False) def forward(self, x): """Applies convolution and downsampling to the input tensor in the SCDown module.""" return self.cv2(self.cv1(x)) class TorchVision(nn.Module): """ TorchVision module to allow loading any torchvision model. This class provides a way to load a model from the torchvision library, optionally load pre-trained weights, and customize the model by truncating or unwrapping layers. Attributes: m (nn.Module): The loaded torchvision model, possibly truncated and unwrapped. Args: c1 (int): Input channels. c2 (): Output channels. model (str): Name of the torchvision model to load. weights (str, optional): Pre-trained weights to load. Default is "DEFAULT". unwrap (bool, optional): If True, unwraps the model to a sequential containing all but the last `truncate` layers. Default is True. truncate (int, optional): Number of layers to truncate from the end if `unwrap` is True. Default is 2. split (bool, optional): Returns output from intermediate child modules as list. Default is False. """ def __init__(self, c1, c2, model, weights="DEFAULT", unwrap=True, truncate=2, split=False): """Load the model and weights from torchvision.""" import torchvision # scope for faster 'import ultralytics' super().__init__() if hasattr(torchvision.models, "get_model"): self.m = torchvision.models.get_model(model, weights=weights) else: self.m = torchvision.models.__dict__[model](pretrained=bool(weights)) if unwrap: layers = list(self.m.children())[:-truncate] if isinstance(layers[0], nn.Sequential): # Second-level for some models like EfficientNet, Swin layers = [*list(layers[0].children()), *layers[1:]] self.m = nn.Sequential(*layers) self.split = split else: self.split = False self.m.head = self.m.heads = nn.Identity() def forward(self, x): """Forward pass through the model.""" if self.split: y = [x] y.extend(m(y[-1]) for m in self.m) else: y = self.m(x) return y import logging logger = logging.getLogger(__name__) USE_FLASH_ATTN = False try: import torch if torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8: # Ampere or newer from flash_attn.flash_attn_interface import flash_attn_func USE_FLASH_ATTN = True else: from torch.nn.functional import scaled_dot_product_attention as sdpa logger.warning("FlashAttention is not available on this device. Using scaled_dot_product_attention instead.") except Exception: from torch.nn.functional import scaled_dot_product_attention as sdpa logger.warning("FlashAttention is not available on this device. Using scaled_dot_product_attention instead.") class AAttn(nn.Module): """ Area-attention module with the requirement of flash attention. Attributes: dim (int): Number of hidden channels; num_heads (int): Number of heads into which the attention mechanism is divided; area (int, optional): Number of areas the feature map is divided. Defaults to 1. Methods: forward: Performs a forward process of input tensor and outputs a tensor after the execution of the area attention mechanism. Examples: >>> import torch >>> from ultralytics.nn.modules import AAttn >>> model = AAttn(dim=64, num_heads=2, area=4) >>> x = torch.randn(2, 64, 128, 128) >>> output = model(x) >>> print(output.shape) Notes: recommend that dim//num_heads be a multiple of 32 or 64. """ def __init__(self, dim, num_heads, area=1): """Initializes the area-attention module, a simple yet efficient attention module for YOLO.""" super().__init__() self.area = area self.num_heads = num_heads self.head_dim = head_dim = dim // num_heads all_head_dim = head_dim * self.num_heads self.qk = Conv(dim, all_head_dim * 2, 1, act=False) self.v = Conv(dim, all_head_dim, 1, act=False) self.proj = Conv(all_head_dim, dim, 1, act=False) self.pe = Conv(all_head_dim, dim, 5, 1, 2, g=dim, act=False) def forward(self, x): """Processes the input tensor 'x' through the area-attention""" B, C, H, W = x.shape N = H * W qk = self.qk(x).flatten(2).transpose(1, 2) v = self.v(x) pp = self.pe(v) v = v.flatten(2).transpose(1, 2) if self.area > 1: qk = qk.reshape(B * self.area, N // self.area, C * 2) v = v.reshape(B * self.area, N // self.area, C) B, N, _ = qk.shape q, k = qk.split([C, C], dim=2) if x.is_cuda and USE_FLASH_ATTN: q = q.view(B, N, self.num_heads, self.head_dim) k = k.view(B, N, self.num_heads, self.head_dim) v = v.view(B, N, self.num_heads, self.head_dim) x = flash_attn_func( q.contiguous().half(), k.contiguous().half(), v.contiguous().half() ).to(q.dtype) else: q = q.transpose(1, 2).view(B, self.num_heads, self.head_dim, N) k = k.transpose(1, 2).view(B, self.num_heads, self.head_dim, N) v = v.transpose(1, 2).view(B, self.num_heads, self.head_dim, N) attn = (q.transpose(-2, -1) @ k) * (self.head_dim ** -0.5) max_attn = attn.max(dim=-1, keepdim=True).values exp_attn = torch.exp(attn - max_attn) attn = exp_attn / exp_attn.sum(dim=-1, keepdim=True) x = (v @ attn.transpose(-2, -1)) x = x.permute(0, 3, 1, 2) if self.area > 1: x = x.reshape(B // self.area, N * self.area, C) B, N, _ = x.shape x = x.reshape(B, H, W, C).permute(0, 3, 1, 2) return self.proj(x + pp) class ABlock(nn.Module): """ ABlock class implementing a Area-Attention block with effective feature extraction. This class encapsulates the functionality for applying multi-head attention with feature map are dividing into areas and feed-forward neural network layers. Attributes: dim (int): Number of hidden channels; num_heads (int): Number of heads into which the attention mechanism is divided; mlp_ratio (float, optional): MLP expansion ratio (or MLP hidden dimension ratio). Defaults to 1.2; area (int, optional): Number of areas the feature map is divided. Defaults to 1. Methods: forward: Performs a forward pass through the ABlock, applying area-attention and feed-forward layers. Examples: Create a ABlock and perform a forward pass >>> model = ABlock(dim=64, num_heads=2, mlp_ratio=1.2, area=4) >>> x = torch.randn(2, 64, 128, 128) >>> output = model(x) >>> print(output.shape) Notes: recommend that dim//num_heads be a multiple of 32 or 64. """ def __init__(self, dim, num_heads, mlp_ratio=1.2, area=1): """Initializes the ABlock with area-attention and feed-forward layers for faster feature extraction.""" super().__init__() self.attn = AAttn(dim, num_heads=num_heads, area=area) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = nn.Sequential(Conv(dim, mlp_hidden_dim, 1), Conv(mlp_hidden_dim, dim, 1, act=False)) self.apply(self._init_weights) def _init_weights(self, m): """Initialize weights using a truncated normal distribution.""" if isinstance(m, nn.Conv2d): nn.init.trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) def forward(self, x): """Executes a forward pass through ABlock, applying area-attention and feed-forward layers to the input tensor.""" x = x + self.attn(x) x = x + self.mlp(x) return x class A2C2f(nn.Module): """ A2C2f module with residual enhanced feature extraction using ABlock blocks with area-attention. Also known as R-ELAN This class extends the C2f module by incorporating ABlock blocks for fast attention mechanisms and feature extraction. Attributes: c1 (int): Number of input channels; c2 (int): Number of output channels; n (int, optional): Number of 2xABlock modules to stack. Defaults to 1; a2 (bool, optional): Whether use area-attention. Defaults to True; area (int, optional): Number of areas the feature map is divided. Defaults to 1; residual (bool, optional): Whether use the residual (with layer scale). Defaults to False; mlp_ratio (float, optional): MLP expansion ratio (or MLP hidden dimension ratio). Defaults to 1.2; e (float, optional): Expansion ratio for R-ELAN modules. Defaults to 0.5; g (int, optional): Number of groups for grouped convolution. Defaults to 1; shortcut (bool, optional): Whether to use shortcut connection. Defaults to True; Methods: forward: Performs a forward pass through the A2C2f module. Examples: >>> import torch >>> from ultralytics.nn.modules import A2C2f >>> model = A2C2f(c1=64, c2=64, n=2, a2=True, area=4, residual=True, e=0.5) >>> x = torch.randn(2, 64, 128, 128) >>> output = model(x) >>> print(output.shape) """ def __init__(self, c1, c2, n=1, a2=True, area=1, residual=False, mlp_ratio=2.0, e=0.5, g=1, shortcut=True): super().__init__() c_ = int(c2 * e) # hidden channels assert c_ % 32 == 0, "Dimension of ABlock be a multiple of 32." # num_heads = c_ // 64 if c_ // 64 >= 2 else c_ // 32 num_heads = c_ // 32 self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv((1 + n) * c_, c2, 1) # optional act=FReLU(c2) init_values = 0.01 # or smaller self.gamma = nn.Parameter(init_values * torch.ones((c2)), requires_grad=True) if a2 and residual else None self.m = nn.ModuleList( nn.Sequential(*(ABlock(c_, num_heads, mlp_ratio, area) for _ in range(2))) if a2 else C3k(c_, c_, 2, shortcut, g) for _ in range(n) ) def forward(self, x): """Forward pass through R-ELAN layer.""" y = [self.cv1(x)] y.extend(m(y[-1]) for m in self.m) if self.gamma is not None: return x + self.gamma.view(1, -1, 1, 1) * self.cv2(torch.cat(y, 1)) return self.cv2(torch.cat(y, 1)) class DSBottleneck(nn.Module): """ An improved bottleneck block using depthwise separable convolutions (DSConv). This class implements a lightweight bottleneck module that replaces standard convolutions with depthwise separable convolutions to reduce parameters and computational cost. Attributes: c1 (int): Number of input channels. c2 (int): Number of output channels. shortcut (bool, optional): Whether to use a residual shortcut connection. The connection is only added if c1 == c2. Defaults to True. e (float, optional): Expansion ratio for the intermediate channels. Defaults to 0.5. k1 (int, optional): Kernel size for the first DSConv layer. Defaults to 3. k2 (int, optional): Kernel size for the second DSConv layer. Defaults to 5. d2 (int, optional): Dilation for the second DSConv layer. Defaults to 1. Methods: forward: Performs a forward pass through the DSBottleneck module. Examples: >>> import torch >>> model = DSBottleneck(c1=64, c2=64, shortcut=True) >>> x = torch.randn(2, 64, 32, 32) >>> output = model(x) >>> print(output.shape) torch.Size([2, 64, 32, 32]) """ def __init__(self, c1, c2, shortcut=True, e=0.5, k1=3, k2=5, d2=1): super().__init__() c_ = int(c2 * e) self.cv1 = DSConv(c1, c_, k1, s=1, p=None, d=1) self.cv2 = DSConv(c_, c2, k2, s=1, p=None, d=d2) self.add = shortcut and c1 == c2 def forward(self, x): y = self.cv2(self.cv1(x)) return x + y if self.add else y class DSC3k(C3): """ An improved C3k module using DSBottleneck blocks for lightweight feature extraction. This class extends the C3 module by replacing its standard bottleneck blocks with DSBottleneck blocks, which use depthwise separable convolutions. Attributes: c1 (int): Number of input channels. c2 (int): Number of output channels. n (int, optional): Number of DSBottleneck blocks to stack. Defaults to 1. shortcut (bool, optional): Whether to use shortcut connections within the DSBottlenecks. Defaults to True. g (int, optional): Number of groups for grouped convolution (passed to parent C3). Defaults to 1. e (float, optional): Expansion ratio for the C3 module's hidden channels. Defaults to 0.5. k1 (int, optional): Kernel size for the first DSConv in each DSBottleneck. Defaults to 3. k2 (int, optional): Kernel size for the second DSConv in each DSBottleneck. Defaults to 5. d2 (int, optional): Dilation for the second DSConv in each DSBottleneck. Defaults to 1. Methods: forward: Performs a forward pass through the DSC3k module (inherited from C3). Examples: >>> import torch >>> model = DSC3k(c1=128, c2=128, n=2, k1=3, k2=7) >>> x = torch.randn(2, 128, 64, 64) >>> output = model(x) >>> print(output.shape) torch.Size([2, 128, 64, 64]) """ def __init__( self, c1, c2, n=1, shortcut=True, g=1, e=0.5, k1=3, k2=5, d2=1 ): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) self.m = nn.Sequential( *( DSBottleneck( c_, c_, shortcut=shortcut, e=1.0, k1=k1, k2=k2, d2=d2 ) for _ in range(n) ) ) class DSC3k2(C2f): """ An improved C3k2 module that uses lightweight depthwise separable convolution blocks. This class redesigns C3k2 module, replacing its internal processing blocks with either DSBottleneck or DSC3k modules. Attributes: c1 (int): Number of input channels. c2 (int): Number of output channels. n (int, optional): Number of internal processing blocks to stack. Defaults to 1. dsc3k (bool, optional): If True, use DSC3k as the internal block. If False, use DSBottleneck. Defaults to False. e (float, optional): Expansion ratio for the C2f module's hidden channels. Defaults to 0.5. g (int, optional): Number of groups for grouped convolution (passed to parent C2f). Defaults to 1. shortcut (bool, optional): Whether to use shortcut connections in the internal blocks. Defaults to True. k1 (int, optional): Kernel size for the first DSConv in internal blocks. Defaults to 3. k2 (int, optional): Kernel size for the second DSConv in internal blocks. Defaults to 7. d2 (int, optional): Dilation for the second DSConv in internal blocks. Defaults to 1. Methods: forward: Performs a forward pass through the DSC3k2 module (inherited from C2f). Examples: >>> import torch >>> # Using DSBottleneck as internal block >>> model1 = DSC3k2(c1=64, c2=64, n=2, dsc3k=False) >>> x = torch.randn(2, 64, 128, 128) >>> output1 = model1(x) >>> print(f"With DSBottleneck: {output1.shape}") With DSBottleneck: torch.Size([2, 64, 128, 128]) >>> # Using DSC3k as internal block >>> model2 = DSC3k2(c1=64, c2=64, n=1, dsc3k=True) >>> output2 = model2(x) >>> print(f"With DSC3k: {output2.shape}") With DSC3k: torch.Size([2, 64, 128, 128]) """ def __init__( self, c1, c2, n=1, dsc3k=False, e=0.5, g=1, shortcut=True, k1=3, k2=7, d2=1 ): super().__init__(c1, c2, n, shortcut, g, e) if dsc3k: self.m = nn.ModuleList( DSC3k( self.c, self.c, n=2, shortcut=shortcut, g=g, e=1.0, k1=k1, k2=k2, d2=d2 ) for _ in range(n) ) else: self.m = nn.ModuleList( DSBottleneck( self.c, self.c, shortcut=shortcut, e=1.0, k1=k1, k2=k2, d2=d2 ) for _ in range(n) ) class AdaHyperedgeGen(nn.Module): """ Generates an adaptive hyperedge participation matrix from a set of vertex features. This module implements the Adaptive Hyperedge Generation mechanism. It generates dynamic hyperedge prototypes based on the global context of the input nodes and calculates a continuous participation matrix (A) that defines the relationship between each vertex and each hyperedge. Attributes: node_dim (int): The feature dimension of each input node. num_hyperedges (int): The number of hyperedges to generate. num_heads (int, optional): The number of attention heads for multi-head similarity calculation. Defaults to 4. dropout (float, optional): The dropout rate applied to the logits. Defaults to 0.1. context (str, optional): The type of global context to use ('mean', 'max', or 'both'). Defaults to "both". Methods: forward: Takes a batch of vertex features and returns the participation matrix A. Examples: >>> import torch >>> model = AdaHyperedgeGen(node_dim=64, num_hyperedges=16, num_heads=4) >>> x = torch.randn(2, 100, 64) # (Batch, Num_Nodes, Node_Dim) >>> A = model(x) >>> print(A.shape) torch.Size([2, 100, 16]) """ def __init__(self, node_dim, num_hyperedges, num_heads=4, dropout=0.1, context="both"): super().__init__() self.num_heads = num_heads self.num_hyperedges = num_hyperedges self.head_dim = node_dim // num_heads self.context = context self.prototype_base = nn.Parameter(torch.Tensor(num_hyperedges, node_dim)) nn.init.xavier_uniform_(self.prototype_base) if context in ("mean", "max"): self.context_net = nn.Linear(node_dim, num_hyperedges * node_dim) elif context == "both": self.context_net = nn.Linear(2*node_dim, num_hyperedges * node_dim) else: raise ValueError( f"Unsupported context '{context}'. " "Expected one of: 'mean', 'max', 'both'." ) self.pre_head_proj = nn.Linear(node_dim, node_dim) self.dropout = nn.Dropout(dropout) self.scaling = math.sqrt(self.head_dim) def forward(self, X): B, N, D = X.shape if self.context == "mean": context_cat = X.mean(dim=1) elif self.context == "max": context_cat, _ = X.max(dim=1) else: avg_context = X.mean(dim=1) max_context, _ = X.max(dim=1) context_cat = torch.cat([avg_context, max_context], dim=-1) prototype_offsets = self.context_net(context_cat).view(B, self.num_hyperedges, D) prototypes = self.prototype_base.unsqueeze(0) + prototype_offsets X_proj = self.pre_head_proj(X) X_heads = X_proj.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) proto_heads = prototypes.view(B, self.num_hyperedges, self.num_heads, self.head_dim).permute(0, 2, 1, 3) X_heads_flat = X_heads.reshape(B * self.num_heads, N, self.head_dim) proto_heads_flat = proto_heads.reshape(B * self.num_heads, self.num_hyperedges, self.head_dim).transpose(1, 2) logits = torch.bmm(X_heads_flat, proto_heads_flat) / self.scaling logits = logits.view(B, self.num_heads, N, self.num_hyperedges).mean(dim=1) logits = self.dropout(logits) return F.softmax(logits, dim=1) class AdaHGConv(nn.Module): """ Performs the adaptive hypergraph convolution. This module contains the two-stage message passing process of hypergraph convolution: 1. Generates an adaptive participation matrix using AdaHyperedgeGen. 2. Aggregates vertex features into hyperedge features (vertex-to-edge). 3. Disseminates hyperedge features back to update vertex features (edge-to-vertex). A residual connection is added to the final output. Attributes: embed_dim (int): The feature dimension of the vertices. num_hyperedges (int, optional): The number of hyperedges for the internal generator. Defaults to 16. num_heads (int, optional): The number of attention heads for the internal generator. Defaults to 4. dropout (float, optional): The dropout rate for the internal generator. Defaults to 0.1. context (str, optional): The context type for the internal generator. Defaults to "both". Methods: forward: Performs the adaptive hypergraph convolution on a batch of vertex features. Examples: >>> import torch >>> model = AdaHGConv(embed_dim=128, num_hyperedges=16, num_heads=8) >>> x = torch.randn(2, 256, 128) # (Batch, Num_Nodes, Dim) >>> output = model(x) >>> print(output.shape) torch.Size([2, 256, 128]) """ def __init__(self, embed_dim, num_hyperedges=16, num_heads=4, dropout=0.1, context="both"): super().__init__() self.edge_generator = AdaHyperedgeGen(embed_dim, num_hyperedges, num_heads, dropout, context) self.edge_proj = nn.Sequential( nn.Linear(embed_dim, embed_dim ), nn.GELU() ) self.node_proj = nn.Sequential( nn.Linear(embed_dim, embed_dim ), nn.GELU() ) def forward(self, X): A = self.edge_generator(X) He = torch.bmm(A.transpose(1, 2), X) He = self.edge_proj(He) X_new = torch.bmm(A, He) X_new = self.node_proj(X_new) return X_new + X class AdaHGComputation(nn.Module): """ A wrapper module for applying adaptive hypergraph convolution to 4D feature maps. This class makes the hypergraph convolution compatible with standard CNN architectures. It flattens a 4D input tensor (B, C, H, W) into a sequence of vertices (tokens), applies the AdaHGConv layer to model high-order correlations, and then reshapes the output back into a 4D tensor. Attributes: embed_dim (int): The feature dimension of the vertices (equivalent to input channels C). num_hyperedges (int, optional): The number of hyperedges for the underlying AdaHGConv. Defaults to 16. num_heads (int, optional): The number of attention heads for the underlying AdaHGConv. Defaults to 8. dropout (float, optional): The dropout rate for the underlying AdaHGConv. Defaults to 0.1. context (str, optional): The context type for the underlying AdaHGConv. Defaults to "both". Methods: forward: Processes a 4D feature map through the adaptive hypergraph computation layer. Examples: >>> import torch >>> model = AdaHGComputation(embed_dim=64, num_hyperedges=8, num_heads=4) >>> x = torch.randn(2, 64, 32, 32) # (B, C, H, W) >>> output = model(x) >>> print(output.shape) torch.Size([2, 64, 32, 32]) """ def __init__(self, embed_dim, num_hyperedges=16, num_heads=8, dropout=0.1, context="both"): super().__init__() self.embed_dim = embed_dim self.hgnn = AdaHGConv( embed_dim=embed_dim, num_hyperedges=num_hyperedges, num_heads=num_heads, dropout=dropout, context=context ) def forward(self, x): B, C, H, W = x.shape tokens = x.flatten(2).transpose(1, 2) tokens = self.hgnn(tokens) x_out = tokens.transpose(1, 2).view(B, C, H, W) return x_out class C3AH(nn.Module): """ A CSP-style block integrating Adaptive Hypergraph Computation (C3AH). The input feature map is split into two paths. One path is processed by the AdaHGComputation module to model high-order correlations, while the other serves as a shortcut. The outputs are then concatenated to fuse features. Attributes: c1 (int): Number of input channels. c2 (int): Number of output channels. e (float, optional): Expansion ratio for the hidden channels. Defaults to 1.0. num_hyperedges (int, optional): The number of hyperedges for the internal AdaHGComputation. Defaults to 8. context (str, optional): The context type for the internal AdaHGComputation. Defaults to "both". Methods: forward: Performs a forward pass through the C3AH module. Examples: >>> import torch >>> model = C3AH(c1=64, c2=128, num_hyperedges=8) >>> x = torch.randn(2, 64, 32, 32) >>> output = model(x) >>> print(output.shape) torch.Size([2, 128, 32, 32]) """ def __init__(self, c1, c2, e=1.0, num_hyperedges=8, context="both"): super().__init__() c_ = int(c2 * e) assert c_ % 16 == 0, "Dimension of AdaHGComputation should be a multiple of 16." num_heads = c_ // 16 self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.m = AdaHGComputation(embed_dim=c_, num_hyperedges=num_hyperedges, num_heads=num_heads, dropout=0.1, context=context) self.cv3 = Conv(2 * c_, c2, 1) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class FuseModule(nn.Module): """ A module to fuse multi-scale features for the HyperACE block. This module takes a list of three feature maps from different scales, aligns them to a common spatial resolution by downsampling the first and upsampling the third, and then concatenates and fuses them with a convolution layer. Attributes: c_in (int): The number of channels of the input feature maps. channel_adjust (bool): Whether to adjust the channel count of the concatenated features. Methods: forward: Fuses a list of three multi-scale feature maps. Examples: >>> import torch >>> model = FuseModule(c_in=64, channel_adjust=False) >>> # Input is a list of features from different backbone stages >>> x_list = [torch.randn(2, 64, 64, 64), torch.randn(2, 64, 32, 32), torch.randn(2, 64, 16, 16)] >>> output = model(x_list) >>> print(output.shape) torch.Size([2, 64, 32, 32]) """ def __init__(self, c_in, channel_adjust): super(FuseModule, self).__init__() self.downsample = nn.AvgPool2d(kernel_size=2) self.upsample = nn.Upsample(scale_factor=2, mode='nearest') if channel_adjust: self.conv_out = Conv(4 * c_in, c_in, 1) else: self.conv_out = Conv(3 * c_in, c_in, 1) def forward(self, x): x1_ds = self.downsample(x[0]) x3_up = self.upsample(x[2]) x_cat = torch.cat([x1_ds, x[1], x3_up], dim=1) out = self.conv_out(x_cat) return out class HyperACE(nn.Module): """ Hypergraph-based Adaptive Correlation Enhancement (HyperACE). This is the core module of YOLOv13, designed to model both global high-order correlations and local low-order correlations. It first fuses multi-scale features, then processes them through parallel branches: two C3AH branches for high-order modeling and a lightweight DSConv-based branch for low-order feature extraction. Attributes: c1 (int): Number of input channels for the fuse module. c2 (int): Number of output channels for the entire block. n (int, optional): Number of blocks in the low-order branch. Defaults to 1. num_hyperedges (int, optional): Number of hyperedges for the C3AH branches. Defaults to 8. dsc3k (bool, optional): If True, use DSC3k in the low-order branch; otherwise, use DSBottleneck. Defaults to True. shortcut (bool, optional): Whether to use shortcuts in the low-order branch. Defaults to False. e1 (float, optional): Expansion ratio for the main hidden channels. Defaults to 0.5. e2 (float, optional): Expansion ratio within the C3AH branches. Defaults to 1. context (str, optional): Context type for C3AH branches. Defaults to "both". channel_adjust (bool, optional): Passed to FuseModule for channel configuration. Defaults to True. Methods: forward: Performs a forward pass through the HyperACE module. Examples: >>> import torch >>> model = HyperACE(c1=64, c2=256, n=1, num_hyperedges=8) >>> x_list = [torch.randn(2, 64, 64, 64), torch.randn(2, 64, 32, 32), torch.randn(2, 64, 16, 16)] >>> output = model(x_list) >>> print(output.shape) torch.Size([2, 256, 32, 32]) """ def __init__(self, c1, c2, n=1, num_hyperedges=8, dsc3k=True, shortcut=False, e1=0.5, e2=1, context="both", channel_adjust=True): super().__init__() self.c = int(c2 * e1) self.cv1 = Conv(c1, 3 * self.c, 1, 1) self.cv2 = Conv((4 + n) * self.c, c2, 1) self.m = nn.ModuleList( DSC3k(self.c, self.c, 2, shortcut, k1=3, k2=7) if dsc3k else DSBottleneck(self.c, self.c, shortcut=shortcut) for _ in range(n) ) self.fuse = FuseModule(c1, channel_adjust) self.branch1 = C3AH(self.c, self.c, e2, num_hyperedges, context) self.branch2 = C3AH(self.c, self.c, e2, num_hyperedges, context) def forward(self, X): x = self.fuse(X) y = list(self.cv1(x).chunk(3, 1)) out1 = self.branch1(y[1]) out2 = self.branch2(y[1]) y.extend(m(y[-1]) for m in self.m) y[1] = out1 y.append(out2) return self.cv2(torch.cat(y, 1)) class DownsampleConv(nn.Module): """ A simple downsampling block with optional channel adjustment. This module uses average pooling to reduce the spatial dimensions (H, W) by a factor of 2. It can optionally include a 1x1 convolution to adjust the number of channels, typically doubling them. Attributes: in_channels (int): The number of input channels. channel_adjust (bool, optional): If True, a 1x1 convolution doubles the channel dimension. Defaults to True. Methods: forward: Performs the downsampling and optional channel adjustment. Examples: >>> import torch >>> model = DownsampleConv(in_channels=64, channel_adjust=True) >>> x = torch.randn(2, 64, 32, 32) >>> output = model(x) >>> print(output.shape) torch.Size([2, 128, 16, 16]) """ def __init__(self, in_channels, channel_adjust=True): super().__init__() self.downsample = nn.AvgPool2d(kernel_size=2) if channel_adjust: self.channel_adjust = Conv(in_channels, in_channels * 2, 1) else: self.channel_adjust = nn.Identity() def forward(self, x): return self.channel_adjust(self.downsample(x)) class FullPAD_Tunnel(nn.Module): """ A gated fusion module for the Full-Pipeline Aggregation-and-Distribution (FullPAD) paradigm. This module implements a gated residual connection used to fuse features. It takes two inputs: the original feature map and a correlation-enhanced feature map. It then computes `output = original + gate * enhanced`, where `gate` is a learnable scalar parameter that adaptively balances the contribution of the enhanced features. Methods: forward: Performs the gated fusion of two input feature maps. Examples: >>> import torch >>> model = FullPAD_Tunnel() >>> original_feature = torch.randn(2, 64, 32, 32) >>> enhanced_feature = torch.randn(2, 64, 32, 32) >>> output = model([original_feature, enhanced_feature]) >>> print(output.shape) torch.Size([2, 64, 32, 32]) """ def __init__(self): super().__init__() self.gate = nn.Parameter(torch.tensor(0.0)) def forward(self, x): out = x[0] + self.gate * x[1] return out