How to use the albumentations.RandomGamma function in albumentations

To help you get started, we’ve selected a few albumentations examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github albumentations-team / albumentations / tests / pytorch / test_image_only_transforms.py View on Github external
            [A.RandomGamma(), ATorch.RandomGammaTorch()],
            [A.ToFloat(), ATorch.ToFloatTorch()],
            [A.FromFloat("uint8"), ATorch.FromFloatTorch()],
        ],
    ),
)
def test_image_transforms_grayscale(images, augs):
    image, image_torch = images
    dtype = image.dtype

    aug_cpu, aug_torch = augs
    aug_cpu.p = 1
    aug_torch.p = 1

    aug_cpu = A.Compose([aug_cpu])
    aug_torch = A.Compose([aug_torch])
github albumentations-team / albumentations / tests / test_transforms.py View on Github external
        [A.RandomGamma, {}],
        [A.ToGray, {}],
        [A.VerticalFlip, {}],
        [A.HorizontalFlip, {}],
        [A.Flip, {}],
        [A.Transpose, {}],
        [A.RandomRotate90, {}],
        [A.Rotate, {}],
        [A.OpticalDistortion, {}],
        [A.GridDistortion, {}],
        [A.ElasticTransform, {}],
        [A.Normalize, {}],
        [A.ToFloat, {}],
        [A.FromFloat, {}],
        [A.ChannelDropout, {}],
        [A.Solarize, {}],
        [A.Posterize, {}],
github albumentations-team / albumentations / tests / test_serialization.py View on Github external
        [A.RandomGamma, {"gamma_limit": (10, 90)}],
        [A.Cutout, {"num_holes": 4, "max_h_size": 4, "max_w_size": 4}],
        [A.CoarseDropout, {"max_holes": 4, "max_height": 4, "max_width": 4}],
        [A.RandomSnow, {"snow_point_lower": 0.2, "snow_point_upper": 0.4, "brightness_coeff": 4}],
        [
            A.RandomRain,
            {
                "slant_lower": -5,
                "slant_upper": 5,
                "drop_length": 15,
                "drop_width": 2,
                "drop_color": (100, 100, 100),
                "blur_value": 3,
                "brightness_coefficient": 0.5,
                "rain_type": "heavy",
            },
        ],
github ternaus / iglovikov_segmentation / configs / fpn_se_resnext101_32x4d_cityscapes_2V100a.py View on Github external
[
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(
            border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
        ),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
        albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
        albu.GaussNoise(p=0.5),
        albu.Blur(p=0.5),
        albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
        albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
        normalization,
github spytensor / pytorch-image-classification / dataset / augmentations.py View on Github external
A.RandomSizedCrop(min_max_height=(int(image_size[0] * 0.75), image_size[0]),
                              height=image_size[0],
                              width=image_size[1], p=0.3),
            A.NoOp()
        ]),

        A.ISONoise(p=0.5),
        A.JpegCompression(p=0.3, quality_lower=75),

        # Brightness/contrast augmentations
        A.OneOf([
            A.RandomBrightnessContrast(brightness_limit=0.5,
                                       contrast_limit=0.4),
            IndependentRandomBrightnessContrast(brightness_limit=0.25,
                                                contrast_limit=0.24),
            A.RandomGamma(gamma_limit=(50, 150)),
            A.NoOp()
        ]),

        A.OneOf([
            A.RGBShift(r_shift_limit=40, b_shift_limit=30, g_shift_limit=30),
            A.HueSaturationValue(hue_shift_limit=10,
                                 sat_shift_limit=10),
            A.ToGray(p=0.2),
            A.NoOp()
        ]),


        A.OneOf([
            A.ChannelDropout(p=0.2),
            A.CoarseDropout(p=0.1, max_holes=2, max_width=256, max_height=256, min_height=16, min_width=16),
            A.NoOp()
github ternaus / iglovikov_segmentation / configs / fpn_resnext50_32x4d_cityscapes_2gpu_d.py View on Github external
train_augmentations = albu.Compose(
    [
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(rotate_limit=20, scale_limit=0, p=0.5),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
        albu.HueSaturationValue(p=0.5),
        albu.HorizontalFlip(p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
        normalization,
    ],
    p=1,
)
github catalyst-team / segmentation / src / transforms.py View on Github external
scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_REFLECT,
            p=p,
        ),
        IAAPerspective(scale=(0.02, 0.05), p=p),
        OneOf([
            HueSaturationValue(p=p),
            ToGray(p=p),
            RGBShift(p=p),
            ChannelShuffle(p=p),
        ]),
        RandomBrightnessContrast(
            brightness_limit=0.5, contrast_limit=0.5, p=p
        ),
        RandomGamma(p=p),
        CLAHE(p=p),
        JpegCompression(quality_lower=50, p=p),
    ])
    return transforms
github n01z3 / kaggle-pneumothorax-segmentation / n04_dataset.py View on Github external
def strong_aug(p=0.5):
    return Compose(
        [
            HorizontalFlip(p=0.5),
            RandomRotate90(p=0.4),
            Transpose(p=0.4),
            ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
            # OneOf([
            #     ElasticTransform(alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
            #     GridDistortion(),
            #     OpticalDistortion(distort_limit=2, shift_limit=0.3)
            #     ], p=0.2),
            OneOf(
                [
                    RandomContrast(),
                    RandomGamma(),
                    RandomBrightness()
                    # RandomBrightnessContrast(),
                ],
                p=0.3,
            ),
        ],
        p=p,
    )
github ternaus / iglovikov_segmentation / configs / fpn_resnext101_32x8d_cityscapes_2V100.py View on Github external
[
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(
            border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
        ),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
        albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
        albu.GaussNoise(p=0.5),
        albu.Blur(p=0.5),
        albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
        albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
        normalization,
github ternaus / iglovikov_segmentation / configs / fpn_se_resnext101_32x4d_cityscapes_2V100.py View on Github external
[
        albu.RandomSizedCrop(
            min_max_height=(
                int(0.5 * (train_parameters["height_crop_size"])),
                int(2 * (train_parameters["height_crop_size"])),
            ),
            height=train_parameters["height_crop_size"],
            width=train_parameters["width_crop_size"],
            w2h_ratio=1.0,
            p=1,
        ),
        albu.ShiftScaleRotate(
            border_mode=cv2.BORDER_CONSTANT, rotate_limit=10, scale_limit=0, p=0.5, mask_value=ignore_index
        ),
        albu.RandomBrightnessContrast(p=0.5),
        albu.RandomGamma(p=0.5),
        albu.ImageCompression(quality_lower=20, quality_upper=100, p=0.5),
        albu.GaussNoise(p=0.5),
        albu.Blur(p=0.5),
        albu.CoarseDropout(p=0.5, max_height=26, max_width=16),
        albu.OneOf([albu.HueSaturationValue(p=0.5), albu.RGBShift(p=0.5)], p=0.5),
        normalization,
    ],
    p=1,
)

val_augmentations = albu.Compose(
    [
        albu.PadIfNeeded(
            min_height=1024, min_width=2048, border_mode=cv2.BORDER_CONSTANT, mask_value=ignore_index, p=1
        ),
        normalization,