How to use the albumentations.RandomRotate90 function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github albumentations-team / albumentations / tests / test_serialization.py View on Github external
        [A.RandomRotate90, {}],
        [A.Rotate, {}],
        [A.ShiftScaleRotate, {}],
        [A.CenterCrop, {"height": 10, "width": 10}],
        [A.RandomCrop, {"height": 10, "width": 10}],
        [A.RandomSizedCrop, {"min_max_height": (4, 8), "height": 10, "width": 10}],
        [A.Crop, {"x_max": 64, "y_max": 64}],
        [A.FromFloat, {}],
        [A.ToFloat, {}],
        [A.Normalize, {}],
        [A.RandomBrightness, {}],
        [A.RandomContrast, {}],
        [A.RandomScale, {}],
        [A.Resize, {"height": 64, "width": 64}],
        [A.SmallestMaxSize, {}],
        [A.LongestMaxSize, {}],
        [A.RandomSizedBBoxSafeCrop, {"height": 50, "width": 50}],
github albumentations-team / albumentations / tests / test_serialization.py View on Github external
def test_transform_pipeline_serialization_with_bboxes(seed, image, bboxes, bbox_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        bbox_params={"format": bbox_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, bboxes=bboxes, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, bboxes=bboxes, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
github albumentations-team / albumentations / tests / test_serialization.py View on Github external
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, keypoints=keypoints, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"])
github catalyst-team / catalyst / examples / finetune / data.py View on Github external
def train_transform(image_size=224):
    transforms = [
        LongestMaxSize(max_size=image_size),
        PadIfNeeded(image_size, image_size, border_mode=cv2.BORDER_CONSTANT),
        ShiftScaleRotate(
            shift_limit=0.1,
            scale_limit=0.1,
            rotate_limit=45,
            border_mode=cv2.BORDER_REFLECT,
            p=0.5
        ),
        RandomRotate90(),
        JpegCompression(quality_lower=50),
        post_transform()
    ]
    transforms = Compose(transforms)
    return transforms
github BloodAxe / Catalyst-Inria-Segmentation-Example / inria / augmentations.py View on Github external
def hard_augmentations():
    return A.Compose(
        [
            A.RandomRotate90(),
            A.Transpose(),
            A.RandomGridShuffle(),
            A.ShiftScaleRotate(
                scale_limit=0.1, rotate_limit=45, border_mode=cv2.BORDER_CONSTANT, mask_value=0, value=0
            ),
            A.ElasticTransform(border_mode=cv2.BORDER_CONSTANT, alpha_affine=5, mask_value=0, value=0),
            # Add occasion blur
            A.OneOf([A.GaussianBlur(), A.GaussNoise(), A.IAAAdditiveGaussianNoise(), A.NoOp()]),
            # D4 Augmentations
            A.OneOf([A.CoarseDropout(), A.MaskDropout(max_objects=10), A.NoOp()]),
            # Spatial-preserving augmentations:
            A.OneOf(
                [
                    A.RandomBrightnessContrast(brightness_by_max=True),
                    A.CLAHE(),
                    A.HueSaturationValue(),
github spytensor / pytorch-image-classification / dataset / augmentations.py View on Github external
A.ToGray(p=0.2),
            A.NoOp()
        ]),


        A.OneOf([
            A.ChannelDropout(p=0.2),
            A.CoarseDropout(p=0.1, max_holes=2, max_width=256, max_height=256, min_height=16, min_width=16),
            A.NoOp()
        ]),

        A.RandomGridShuffle(p=0.3),

        # D4
        A.Compose([
            A.RandomRotate90(),
            A.Transpose()
        ])
github catalyst-team / catalyst / catalyst / contrib / mixin / rotate.py View on Github external
targets_key: str = None,
        rotate_probability: float = 1.,
        hflip_probability: float = 0.5,
        one_hot_classes: int = None
    ):
        """
        Args:
            input_key (str): input key to use from annotation dict
            output_key (str): output key to use to store the result
        """
        self.input_key = input_key
        self.output_key = output_key
        self.targets_key = targets_key
        self.rotate_probability = rotate_probability
        self.hflip_probability = hflip_probability
        self.rotate = A.RandomRotate90()
        self.hflip = A.HorizontalFlip()
        self.one_hot_classes = one_hot_classes * 8 \
            if one_hot_classes is not None \
            else None
github BloodAxe / pytorch-toolbelt / examples / canny-edge-detector-in-cnn / example_canny_cnn.py View on Github external
def __init__(self, images, image_size=(224, 224), training=True):
        self.images = images
        self.transform = A.Compose([
            A.Compose([
                A.PadIfNeeded(256, 256),
                A.RandomSizedCrop((128, 256), image_size[0], image_size[1]),
                A.RandomRotate90(),
                A.RandomBrightnessContrast(),
                A.GaussNoise(),
                A.Cutout(),
                A.ElasticTransform()
            ], p=float(training)),
            A.Compose([
                A.PadIfNeeded(image_size[0], image_size[1]),
                A.CenterCrop(image_size[0], image_size[1])
            ], p=float(not training))
        ])
        self.normalize = A.Normalize()
github mrlzla / kaggle_salt / dataset / albumentations.py View on Github external
def aug_mega_hardcore(p=.95):
    return Compose([
        OneOf([
            CLAHE(clip_limit=2),
            IAASharpen(p=.25),
            IAAEmboss(p=.25)
        ], p=.35),
        OneOf([
            IAAAdditiveGaussianNoise(p=.3),
            GaussNoise(p=.7),
        ], p=.5),
        RandomRotate90(),
        Flip(),
        Transpose(),
        OneOf([
            MotionBlur(p=.2),
            MedianBlur(blur_limit=3, p=.3),
            Blur(blur_limit=3, p=.5),
        ], p=.4),
        OneOf([
            RandomContrast(p=.5),
            RandomBrightness(p=.5),
        ], p=.4),
        ShiftScaleRotate(shift_limit=.0, scale_limit=.45, rotate_limit=45, p=.7),
        OneOf([
            OpticalDistortion(p=0.3),
            GridDistortion(p=0.2),
            ElasticTransform(p=.2),
github BloodAxe / pytorch-toolbelt / examples / segmentation-inria / models / factory.py View on Github external
]),

        # Add occasion blur/sharpening
        A.OneOf([
            A.GaussianBlur(),
            A.MotionBlur(),
            A.IAASharpen()
        ]),

        # Crop to desired image size
        A.CenterCrop(image_size[0], image_size[1]),

        # D4 Augmentations
        A.Compose([
            A.Transpose(),
            A.RandomRotate90(),
        ], p=float(use_d4)),
        # In case we don't want to use D4 augmentations, we use flips
        A.HorizontalFlip(p=float(not use_d4)),

        # Spatial-preserving augmentations:
        A.OneOf([
            A.Cutout(),
            A.GaussNoise(),
        ]),
        A.OneOf([
            A.RandomBrightnessContrast(),
            A.CLAHE(),
            A.HueSaturationValue(),
            A.RGBShift(),
            A.RandomGamma()
        ]),

albumentations

Fast, flexible, and advanced augmentation library for deep learning, computer vision, and medical imaging. Albumentations offers a wide range of transformations for both 2D (images, masks, bboxes, keypoints) and 3D (volumes, volumetric masks, keypoints) data, with optimized performance and seamless integration into ML workflows.

MIT
Latest version published 7 months ago

Package Health Score

64 / 100
Full package analysis