|
7 | 7 | # |
8 | 8 | import os |
9 | 9 | from functools import partial |
10 | | -from typing import List, Optional, Tuple, Union |
| 10 | +from typing import List, Optional, Tuple, Type, Union |
11 | 11 |
|
12 | 12 | import torch |
13 | 13 | import torch.nn as nn |
@@ -54,7 +54,7 @@ def __init__( |
54 | 54 | use_act: bool = True, |
55 | 55 | use_scale_branch: bool = True, |
56 | 56 | num_conv_branches: int = 1, |
57 | | - act_layer: nn.Module = nn.GELU, |
| 57 | + act_layer: Type[nn.Module] = nn.GELU, |
58 | 58 | ) -> None: |
59 | 59 | """Construct a MobileOneBlock module. |
60 | 60 |
|
@@ -426,7 +426,7 @@ def _fuse_bn( |
426 | 426 | def convolutional_stem( |
427 | 427 | in_chs: int, |
428 | 428 | out_chs: int, |
429 | | - act_layer: nn.Module = nn.GELU, |
| 429 | + act_layer: Type[nn.Module] = nn.GELU, |
430 | 430 | inference_mode: bool = False |
431 | 431 | ) -> nn.Sequential: |
432 | 432 | """Build convolutional stem with MobileOne blocks. |
@@ -545,7 +545,7 @@ def __init__( |
545 | 545 | stride: int, |
546 | 546 | in_chs: int, |
547 | 547 | embed_dim: int, |
548 | | - act_layer: nn.Module = nn.GELU, |
| 548 | + act_layer: Type[nn.Module] = nn.GELU, |
549 | 549 | lkc_use_act: bool = False, |
550 | 550 | use_se: bool = False, |
551 | 551 | inference_mode: bool = False, |
@@ -718,7 +718,7 @@ def __init__( |
718 | 718 | in_chs: int, |
719 | 719 | hidden_channels: Optional[int] = None, |
720 | 720 | out_chs: Optional[int] = None, |
721 | | - act_layer: nn.Module = nn.GELU, |
| 721 | + act_layer: Type[nn.Module] = nn.GELU, |
722 | 722 | drop: float = 0.0, |
723 | 723 | ) -> None: |
724 | 724 | """Build convolutional FFN module. |
@@ -890,7 +890,7 @@ def __init__( |
890 | 890 | dim: int, |
891 | 891 | kernel_size: int = 3, |
892 | 892 | mlp_ratio: float = 4.0, |
893 | | - act_layer: nn.Module = nn.GELU, |
| 893 | + act_layer: Type[nn.Module] = nn.GELU, |
894 | 894 | proj_drop: float = 0.0, |
895 | 895 | drop_path: float = 0.0, |
896 | 896 | layer_scale_init_value: float = 1e-5, |
@@ -947,8 +947,8 @@ def __init__( |
947 | 947 | self, |
948 | 948 | dim: int, |
949 | 949 | mlp_ratio: float = 4.0, |
950 | | - act_layer: nn.Module = nn.GELU, |
951 | | - norm_layer: nn.Module = nn.BatchNorm2d, |
| 950 | + act_layer: Type[nn.Module] = nn.GELU, |
| 951 | + norm_layer: Type[nn.Module] = nn.BatchNorm2d, |
952 | 952 | proj_drop: float = 0.0, |
953 | 953 | drop_path: float = 0.0, |
954 | 954 | layer_scale_init_value: float = 1e-5, |
@@ -1007,8 +1007,8 @@ def __init__( |
1007 | 1007 | pos_emb_layer: Optional[nn.Module] = None, |
1008 | 1008 | kernel_size: int = 3, |
1009 | 1009 | mlp_ratio: float = 4.0, |
1010 | | - act_layer: nn.Module = nn.GELU, |
1011 | | - norm_layer: nn.Module = nn.BatchNorm2d, |
| 1010 | + act_layer: Type[nn.Module] = nn.GELU, |
| 1011 | + norm_layer: Type[nn.Module] = nn.BatchNorm2d, |
1012 | 1012 | proj_drop_rate: float = 0.0, |
1013 | 1013 | drop_path_rate: float = 0.0, |
1014 | 1014 | layer_scale_init_value: Optional[float] = 1e-5, |
@@ -1121,8 +1121,8 @@ def __init__( |
1121 | 1121 | fork_feat: bool = False, |
1122 | 1122 | cls_ratio: float = 2.0, |
1123 | 1123 | global_pool: str = 'avg', |
1124 | | - norm_layer: nn.Module = nn.BatchNorm2d, |
1125 | | - act_layer: nn.Module = nn.GELU, |
| 1124 | + norm_layer: Type[nn.Module] = nn.BatchNorm2d, |
| 1125 | + act_layer: Type[nn.Module] = nn.GELU, |
1126 | 1126 | inference_mode: bool = False, |
1127 | 1127 | ) -> None: |
1128 | 1128 | super().__init__() |
|
0 commit comments