from fastai.vision.models import resnet34Dynamic UNet
Unet model using PixelShuffle ICNR upsampling that can be built on top of any pretrained architecture
UnetBlock
def UnetBlock(
up_in_c, x_in_c, hook, final_div:bool=True, blur:bool=False, act_cls:type=ReLU, self_attention:bool=False,
init:function=kaiming_normal_, norm_type:NoneType=None, ks:int=3, stride:int=1, padding:NoneType=None,
bias:NoneType=None, ndim:int=2, bn_1st:bool=True, transpose:bool=False, xtra:NoneType=None, bias_std:float=0.01,
dilation:Union=1, groups:int=1, padding_mode:Literal='zeros', device:NoneType=None, dtype:NoneType=None
):
A quasi-UNet block, using PixelShuffle_ICNR upsampling.
ResizeToOrig
def ResizeToOrig(
mode:str='nearest'
):
Merge a shortcut with the result of the module by adding them or concatenating them if dense=True.
DynamicUnet
def DynamicUnet(
encoder, n_out, img_size, blur:bool=False, blur_final:bool=True, self_attention:bool=False,
y_range:NoneType=None, last_cross:bool=True, bottle:bool=False, act_cls:type=ReLU, init:function=kaiming_normal_,
norm_type:NoneType=None, kwargs:VAR_KEYWORD
):
Create a U-Net from a given architecture.
m = resnet34()
m = nn.Sequential(*list(m.children())[:-2])
tst = DynamicUnet(m, 5, (128,128), norm_type=None)
x = cast(torch.randn(2, 3, 128, 128), TensorImage)
y = tst(x)
test_eq(y.shape, [2, 5, 128, 128])tst = DynamicUnet(m, 5, (128,128), norm_type=None)
x = torch.randn(2, 3, 127, 128)
y = tst(x)