diff --git a/monai/networks/blocks/crossattention.py b/monai/networks/blocks/crossattention.py index baaa21ed1f..40722ae881 100644 --- a/monai/networks/blocks/crossattention.py +++ b/monai/networks/blocks/crossattention.py @@ -63,6 +63,13 @@ def __init__( attention_dtype: cast attention operations to this dtype. use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism (see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html). + + Raises: + ValueError: if ``dropout_rate`` is not between 0 and 1. + ValueError: if ``hidden_size`` is not divisible by ``num_heads`` when ``dim_head`` is not set. + ValueError: if ``causal`` is True and ``sequence_length`` is not provided. + ValueError: if both ``save_attn`` and ``use_flash_attention`` are True. + ValueError: if ``rel_pos_embedding`` is not None and ``use_flash_attention`` is True. """ super().__init__() diff --git a/monai/networks/blocks/downsample.py b/monai/networks/blocks/downsample.py index 9e13a9583e..dcc17d455a 100644 --- a/monai/networks/blocks/downsample.py +++ b/monai/networks/blocks/downsample.py @@ -127,6 +127,11 @@ def __init__( Only used in the "maxpool", "avgpool" or "pixelunshuffle" modes. post_conv: a conv block applied after downsampling. Defaults to None. Only used in the "maxpool" and "avgpool" modes. bias: whether to have a bias term in the default preconv and conv layers. Defaults to True. + + Raises: + ValueError: if ``mode`` is ``"conv"`` or ``"convgroup"`` and ``in_channels`` is not specified. + ValueError: if ``mode`` is ``"maxpool"`` or ``"avgpool"``, ``pre_conv`` is ``"default"``, + ``out_channels != in_channels``, and ``in_channels`` is not specified. """ super().__init__() @@ -261,6 +266,10 @@ def __init__( When ``conv_block`` is an ``nn.module``, please ensure the input number of channels matches requirements. bias: whether to have a bias term in the default conv_block. Defaults to True. + + Raises: + ValueError: if ``scale_factor`` is not greater than 0. + ValueError: if ``conv_block`` is ``"default"`` and ``in_channels`` is not specified. """ super().__init__() diff --git a/monai/networks/blocks/localnet_block.py b/monai/networks/blocks/localnet_block.py index 6e0efc8588..b8b3802bb9 100644 --- a/monai/networks/blocks/localnet_block.py +++ b/monai/networks/blocks/localnet_block.py @@ -72,10 +72,23 @@ def get_deconv_block(spatial_dims: int, in_channels: int, out_channels: int) -> class ResidualBlock(nn.Module): + """ + A residual block with two convolutional layers and a skip connection. + """ def __init__( self, spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Sequence[int] | int ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. Must equal ``out_channels``. + out_channels: number of output channels. Must equal ``in_channels``. + kernel_size: convolution kernel size. + + Raises: + ValueError: if ``in_channels != out_channels``. + """ super().__init__() if in_channels != out_channels: raise ValueError( @@ -96,8 +109,20 @@ def forward(self, x) -> torch.Tensor: class LocalNetResidualBlock(nn.Module): + """ + A residual block used in LocalNet that adds a mid-level feature to the convolution output. + """ def __init__(self, spatial_dims: int, in_channels: int, out_channels: int) -> None: + """ + Args: + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. Must equal ``out_channels``. + out_channels: number of output channels. Must equal ``in_channels``. + + Raises: + ValueError: if ``in_channels != out_channels``. + """ super().__init__() if in_channels != out_channels: raise ValueError( @@ -270,11 +295,15 @@ def __init__( ) -> None: """ Args: - spatial_dims: number of spatial dimensions. - in_channels: number of input channels. - out_channels: number of output channels. - act: activation type and arguments. Defaults to ReLU. - kernel_initializer: kernel initializer. Defaults to None. + spatial_dims: number of spatial dimensions. + in_channels: number of input channels. + out_channels: number of output channels. + act: activation type and arguments. Defaults to ReLU. + initializer: kernel initializer, either ``"kaiming_uniform"`` or ``"zeros"``. Defaults to + ``"kaiming_uniform"``. Note: ``"kaiming_uniform"`` uses ``nn.init.kaiming_normal_`` internally. + + Raises: + ValueError: if ``initializer`` is not ``"kaiming_uniform"`` or ``"zeros"``. """ super().__init__() self.conv_block = get_conv_block( diff --git a/monai/networks/blocks/patchembedding.py b/monai/networks/blocks/patchembedding.py index a4caae68be..5139fa6e8a 100644 --- a/monai/networks/blocks/patchembedding.py +++ b/monai/networks/blocks/patchembedding.py @@ -68,6 +68,12 @@ def __init__( spatial_dims: number of spatial dimensions. pos_embed_kwargs: additional arguments for position embedding. For `sincos`, it can contain `temperature` and for fourier it can contain `scales`. + + Raises: + ValueError: if ``dropout_rate`` is not between 0 and 1. + ValueError: if ``hidden_size`` is not divisible by ``num_heads``. + ValueError: if any dimension of ``patch_size`` is larger than the corresponding ``img_size`` dimension. + ValueError: if ``proj_type`` is ``"perceptron"`` and ``patch_size`` does not evenly divide ``img_size``. """ super().__init__() diff --git a/monai/networks/blocks/selfattention.py b/monai/networks/blocks/selfattention.py index 2791d2fb00..d2ad24ac19 100644 --- a/monai/networks/blocks/selfattention.py +++ b/monai/networks/blocks/selfattention.py @@ -66,6 +66,13 @@ def __init__( use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism (see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html). + Raises: + ValueError: if ``dropout_rate`` is not between 0 and 1. + ValueError: if ``hidden_size`` is not divisible by ``num_heads``. + ValueError: if ``causal`` is True and ``sequence_length`` is not provided. + ValueError: if both ``save_attn`` and ``use_flash_attention`` are True. + ValueError: if ``rel_pos_embedding`` is not None and ``use_flash_attention`` is True. + """ super().__init__() diff --git a/monai/networks/blocks/upsample.py b/monai/networks/blocks/upsample.py index 97a8e7f6e3..90dfae93a1 100644 --- a/monai/networks/blocks/upsample.py +++ b/monai/networks/blocks/upsample.py @@ -86,6 +86,11 @@ def __init__( size of `scale_factor` with a stride of 1. See also: :py:class:`monai.networks.blocks.SubpixelUpsample`. Only used in the "pixelshuffle" mode. + Raises: + ValueError: if ``mode`` is ``"deconv"`` or ``"deconvgroup"`` and ``in_channels`` is not specified. + ValueError: if ``mode`` is ``"nontrainable"``, ``pre_conv`` is not set, and + ``out_channels != in_channels``. + """ super().__init__() scale_factor_ = ensure_tuple_rep(scale_factor, spatial_dims)