Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions monai/networks/blocks/crossattention.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ def __init__(
attention_dtype: cast attention operations to this dtype.
use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism
(see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html).
Raises:
ValueError: if ``dropout_rate`` is not between 0 and 1.
ValueError: if ``hidden_size`` is not divisible by ``num_heads`` when ``dim_head`` is not set.
ValueError: if ``causal`` is True and ``sequence_length`` is not provided.
ValueError: if both ``save_attn`` and ``use_flash_attention`` are True.
ValueError: if ``rel_pos_embedding`` is not None and ``use_flash_attention`` is True.
"""

super().__init__()
Expand Down
9 changes: 9 additions & 0 deletions monai/networks/blocks/downsample.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,11 @@ def __init__(
Only used in the "maxpool", "avgpool" or "pixelunshuffle" modes.
post_conv: a conv block applied after downsampling. Defaults to None. Only used in the "maxpool" and "avgpool" modes.
bias: whether to have a bias term in the default preconv and conv layers. Defaults to True.

Raises:
ValueError: if ``mode`` is ``"conv"`` or ``"convgroup"`` and ``in_channels`` is not specified.
ValueError: if ``mode`` is ``"maxpool"`` or ``"avgpool"``, ``pre_conv`` is ``"default"``,
``out_channels != in_channels``, and ``in_channels`` is not specified.
"""
super().__init__()

Expand Down Expand Up @@ -261,6 +266,10 @@ def __init__(
When ``conv_block`` is an ``nn.module``,
please ensure the input number of channels matches requirements.
bias: whether to have a bias term in the default conv_block. Defaults to True.

Raises:
ValueError: if ``scale_factor`` is not greater than 0.
ValueError: if ``conv_block`` is ``"default"`` and ``in_channels`` is not specified.
"""
super().__init__()

Expand Down
39 changes: 34 additions & 5 deletions monai/networks/blocks/localnet_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,23 @@ def get_deconv_block(spatial_dims: int, in_channels: int, out_channels: int) ->


class ResidualBlock(nn.Module):
"""
A residual block with two convolutional layers and a skip connection.
"""

def __init__(
self, spatial_dims: int, in_channels: int, out_channels: int, kernel_size: Sequence[int] | int
) -> None:
"""
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels. Must equal ``out_channels``.
out_channels: number of output channels. Must equal ``in_channels``.
kernel_size: convolution kernel size.

Raises:
ValueError: if ``in_channels != out_channels``.
"""
super().__init__()
if in_channels != out_channels:
raise ValueError(
Expand All @@ -96,8 +109,20 @@ def forward(self, x) -> torch.Tensor:


class LocalNetResidualBlock(nn.Module):
"""
A residual block used in LocalNet that adds a mid-level feature to the convolution output.
"""

def __init__(self, spatial_dims: int, in_channels: int, out_channels: int) -> None:
"""
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels. Must equal ``out_channels``.
out_channels: number of output channels. Must equal ``in_channels``.

Raises:
ValueError: if ``in_channels != out_channels``.
"""
super().__init__()
if in_channels != out_channels:
raise ValueError(
Expand Down Expand Up @@ -270,11 +295,15 @@ def __init__(
) -> None:
"""
Args:
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
act: activation type and arguments. Defaults to ReLU.
kernel_initializer: kernel initializer. Defaults to None.
spatial_dims: number of spatial dimensions.
in_channels: number of input channels.
out_channels: number of output channels.
act: activation type and arguments. Defaults to ReLU.
initializer: kernel initializer, either ``"kaiming_uniform"`` or ``"zeros"``. Defaults to
``"kaiming_uniform"``. Note: ``"kaiming_uniform"`` uses ``nn.init.kaiming_normal_`` internally.

Raises:
ValueError: if ``initializer`` is not ``"kaiming_uniform"`` or ``"zeros"``.
"""
super().__init__()
self.conv_block = get_conv_block(
Expand Down
6 changes: 6 additions & 0 deletions monai/networks/blocks/patchembedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,12 @@ def __init__(
spatial_dims: number of spatial dimensions.
pos_embed_kwargs: additional arguments for position embedding. For `sincos`, it can contain
`temperature` and for fourier it can contain `scales`.
Raises:
ValueError: if ``dropout_rate`` is not between 0 and 1.
ValueError: if ``hidden_size`` is not divisible by ``num_heads``.
ValueError: if any dimension of ``patch_size`` is larger than the corresponding ``img_size`` dimension.
ValueError: if ``proj_type`` is ``"perceptron"`` and ``patch_size`` does not evenly divide ``img_size``.
"""

super().__init__()
Expand Down
7 changes: 7 additions & 0 deletions monai/networks/blocks/selfattention.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,13 @@ def __init__(
use_flash_attention: if True, use Pytorch's inbuilt flash attention for a memory efficient attention mechanism
(see https://pytorch.org/docs/2.2/generated/torch.nn.functional.scaled_dot_product_attention.html).

Raises:
ValueError: if ``dropout_rate`` is not between 0 and 1.
ValueError: if ``hidden_size`` is not divisible by ``num_heads``.
ValueError: if ``causal`` is True and ``sequence_length`` is not provided.
ValueError: if both ``save_attn`` and ``use_flash_attention`` are True.
ValueError: if ``rel_pos_embedding`` is not None and ``use_flash_attention`` is True.

"""

super().__init__()
Expand Down
5 changes: 5 additions & 0 deletions monai/networks/blocks/upsample.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,11 @@ def __init__(
size of `scale_factor` with a stride of 1. See also: :py:class:`monai.networks.blocks.SubpixelUpsample`.
Only used in the "pixelshuffle" mode.
Raises:
ValueError: if ``mode`` is ``"deconv"`` or ``"deconvgroup"`` and ``in_channels`` is not specified.
ValueError: if ``mode`` is ``"nontrainable"``, ``pre_conv`` is not set, and
``out_channels != in_channels``.
"""
super().__init__()
scale_factor_ = ensure_tuple_rep(scale_factor, spatial_dims)
Expand Down
Loading