How to use the albumentations.Transpose function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github buriy / dlbench / tests / cpu_preprocess.py View on Github external
def strong_aug(p=0.5):
    return Compose(
        [
            RandomRotate90(),
            Flip(),
            Transpose(),
            IAAPerspective(),
            OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2),
            OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            OneOf([OpticalDistortion(p=0.3), GridDistortion(p=0.1), IAAPiecewiseAffine(p=0.3)], p=0.2),
            OneOf([CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3),
            HueSaturationValue(p=0.3),
            Resize(256, 256, p=1, always_apply=True),
        ],
        p=p,
    )
github albumentations-team / albumentations / tests / test_augmentations.py View on Github external
        [Transpose, {}],
        [RandomRotate90, {}],
        [Rotate, {}],
        [ShiftScaleRotate, {}],
        [OpticalDistortion, {}],
        [GridDistortion, {}],
        [ElasticTransform, {}],
        [CenterCrop, {"height": 10, "width": 10}],
        [RandomCrop, {"height": 10, "width": 10}],
        [CropNonEmptyMaskIfExists, {"height": 10, "width": 10}],
        [RandomResizedCrop, {"height": 10, "width": 10}],
        [RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
        [ISONoise, {}],
        [RandomGridShuffle, {}],
    ],
)
def test_dual_augmentations(augmentation_cls, params, image, mask):
github albumentations-team / albumentations / tests / test_transforms.py View on Github external
        [A.Transpose, {}],
        [A.RandomRotate90, {}],
        [A.Rotate, {}],
        [A.OpticalDistortion, {}],
        [A.GridDistortion, {}],
        [A.ElasticTransform, {}],
        [A.Normalize, {}],
        [A.ToFloat, {}],
        [A.FromFloat, {}],
        [A.ChannelDropout, {}],
        [A.Solarize, {}],
        [A.Posterize, {}],
        [A.Equalize, {}],
        [A.MultiplicativeNoise, {}],
    ],
)
def test_additional_targets_for_image_only(augmentation_cls, params):
github mrlzla / kaggle_salt / dataset / albumentations.py View on Github external
def aug_mega_hardcore(p=.95):
    return Compose([
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(p=.25),
            IAAEmboss(p=.25)
        ], p=.35),
        OneOf([
            IAAAdditiveGaussianNoise(p=.3),
            GaussNoise(p=.7),
        ], p=.5),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.3),
            Blur(blur_limit=3, p=.5),
        ], p=.4),
        OneOf([
            RandomContrast(p=.5),
            RandomBrightness(p=.5),
        ], p=.4),
        ShiftScaleRotate(shift_limit=.0, scale_limit=.45, rotate_limit=45, p=.7),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.2),
            ElasticTransform(p=.2),
            IAAPerspective(p=.2),
            IAAPiecewiseAffine(p=.3),
github BloodAxe / pytorch-toolbelt / examples / segmentation-inria / models / factory.py View on Github external
A.ElasticTransform(alpha_affine=0, border_mode=cv2.BORDER_CONSTANT),
        ]),

        # Add occasion blur/sharpening
        A.OneOf([
            A.GaussianBlur(),
            A.MotionBlur(),
            A.IAASharpen()
        ]),

        # Crop to desired image size
        A.CenterCrop(image_size[0], image_size[1]),

        # D4 Augmentations
        A.Compose([
            A.Transpose(),
            A.RandomRotate90(),
        ], p=float(use_d4)),
        # In case we don't want to use D4 augmentations, we use flips
        A.HorizontalFlip(p=float(not use_d4)),

        # Spatial-preserving augmentations:
        A.OneOf([
            A.Cutout(),
            A.GaussNoise(),
        ]),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma()
github spytensor / pytorch-image-classification / dataset / augmentations.py View on Github external
A.OneOf([
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10,
                                 sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),

        A.ChannelDropout(),
        A.RandomGridShuffle(p=0.3),

        # D4
        A.Compose([
            A.RandomRotate90(),
            A.Transpose()
        ])
github cswin / AWC / src / train_CADA.py View on Github external
RandomBrightnessContrast,
    IAASharpen, IAAEmboss
)

from models.unet_fine import UNet
from models.discriminator import FCDiscriminator, EncoderDiscriminator
from dataset.refuge_Vmiccai import REFUGE
from pytorch_utils import (adjust_learning_rate, adjust_learning_rate_D,
                           calc_mse_loss, Weighted_Jaccard_loss, dice_loss)
from models import optim_weight_ema
from arguments import get_arguments


aug_student = Compose([
    OneOf([
        Transpose(p=0.5),
        HorizontalFlip(p=0.5),
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5)], p=0.2),

    OneOf([
        IAAAdditiveGaussianNoise(p=0.5),
        GaussNoise(p=0.5),
    ], p=0.2),

    OneOf([
        CLAHE(clip_limit=2),
        IAASharpen(p=0.5),
        IAAEmboss(p=0.5),
        RandomBrightnessContrast(p=0.5),
    ], p=0.2),
    HueSaturationValue(p=0.2),
github n01z3 / kaggle-pneumothorax-segmentation / n04_dataset.py View on Github external
def strong_aug(p=0.5):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            RandomRotate90(p=0.4),
            Transpose(p=0.4),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            # OneOf([
            #     ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            #     GridDistortion(),
            #     OpticalDistortion(distort_limit=2, shift_limit=0.3)
            #     ], p=0.2),
            OneOf(
                [
                    RandomContrast(),
                    RandomGamma(),
                    RandomBrightness()
                    # RandomBrightnessContrast(),
                ],
                p=0.3,
            ),
        ],
github darraghdog / rsna / scripts / efficientnetb0v3 / trainorig.py View on Github external
png = [os.path.basename(png)[:-4] for png in png]
png = np.array(png)
train = train.set_index('Image').loc[png].reset_index()

# get fold
valdf = train[train['fold']==fold].reset_index(drop=True)
trndf = train[train['fold']!=fold].reset_index(drop=True)
    
# Data loaders
transform_train = Compose([
    #ShiftScaleRotate(),
    #CenterCrop(height = SIZE//10, width = SIZE//10, p=0.3),
    HorizontalFlip(p=0.5),
    ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, 
                         rotate_limit=20, p=0.3, border_mode = cv2.BORDER_REPLICATE),
    Transpose(p=0.5),
    ToTensor()
])

transform_test= Compose([
    ToTensor()
])

trndataset = IntracranialDataset(trndf, path=dir_train_img, transform=transform_train, labels=True)
valdataset = IntracranialDataset(valdf, path=dir_train_img, transform=transform_test, labels=False)
tstdataset = IntracranialDataset(test, path=dir_test_img, transform=transform_test, labels=False)

num_workers = 16
trnloader = DataLoader(trndataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
valloader = DataLoader(valdataset, batch_size=batch_size*4, shuffle=False, num_workers=num_workers)
tstloader = DataLoader(tstdataset, batch_size=batch_size*4, shuffle=False, num_workers=num_workers)
github albumentations-team / albumentations / albumentations / pytorch / augmentations / dual / transforms.py View on Github external
class VerticalFlipTorch(BasicTransformTorch, A.VerticalFlip):
    def apply(self, img, **params):
        return F.vflip(img)


class HorizontalFlipTorch(BasicTransformTorch, A.HorizontalFlip):
    def apply(self, img, **params):
        return F.hflip(img)


class FlipTorch(BasicTransformTorch, A.Flip):
    def apply(self, img, d=0, **params):
        return F.random_flip(img, d)


class TransposeTorch(BasicTransformTorch, A.Transpose):
    def apply(self, img, **params):
        return F.transpose(img)


class LongestMaxSizeTorch(BasicTransformTorch, A.LongestMaxSize):
    def apply(self, img, interpolation="nearest", **params):
        return F.longest_max_size(img, max_size=self.max_size, interpolation=interpolation)


class SmallestMaxSizeTorch(BasicTransformTorch, A.SmallestMaxSize):
    def apply(self, img, interpolation="nearest", **params):
        return F.smallest_max_size(img, max_size=self.max_size, interpolation=interpolation)


class ResizeTorch(BasicTransformTorch, A.Resize):
    def apply(self, img, interpolation="nearest", **params):

albumentations

Fast, flexible, and advanced augmentation library for deep learning, computer vision, and medical imaging. Albumentations offers a wide range of transformations for both 2D (images, masks, bboxes, keypoints) and 3D (volumes, volumetric masks, keypoints) data, with optimized performance and seamless integration into ML workflows.

MIT
Latest version published 7 months ago

Package Health Score

64 / 100
Full package analysis