How to use the fastai.vision.imagenet_stats function in fastai

To help you get started, we’ve selected a few fastai examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / computervision-recipes / tests / unit / classification / test_classification_model.py View on Github external
def tiny_ic_data(tiny_ic_data_path):
    """ Returns tiny ic data bunch """
    return (
        ImageList.from_folder(tiny_ic_data_path)
        .split_by_rand_pct(valid_pct=0.2, seed=10)
        .label_from_folder()
        .transform(size=299)
        .databunch(bs=16)
        .normalize(imagenet_stats)
    )
github gurvindersingh / mlapp / model.py View on Github external
global classes
    model = CONFIG['model_name']
    # Check if we need to download Model file
    if CONFIG[model]['url'] != "":
        try:
            logging.info(f"Downloading model file from: {CONFIG[model]['url']}")
            urllib.request.urlretrieve(CONFIG[model]['url'], f"models/{model}.pth")
            logging.info(f"Downloaded model file and stored at path: models/{model}.pth")
        except HTTPError as e:
            logging.critical(f"Failed in downloading file from: {CONFIG[model]['url']}, Exception: '{e}'")
            sys.exit(4)

    init_data = ImageDataBunch.single_from_classes(
                                    path, CONFIG[model]['classes'], tfms=get_transforms(),
                                    size=CONFIG[model]['size']
                                ).normalize(imagenet_stats)
    classes = CONFIG[model]['classes']
    logging.info(f"Loading model: {CONFIG['model_name']}, architecture: {CONFIG[model]['arch']}, file: models/{model}.pth")
    learn = create_cnn(init_data, eval(f"models.{CONFIG[model]['arch']}"))
    learn.load(model, device=CONFIG[model]['device'])

    # Create direcotry to get feedback for this model
    Path.mkdir(Path(path_to(FEEDBACK_DIR, model)), parents=True, exist_ok=True)
github microsoft / MLOps / examples / Fastai / Image-classification / train.py View on Github external
from fastai.vision import (ImageDataBunch, get_transforms, cnn_learner, models, imagenet_stats, accuracy)
from pathlib import Path 
from azureml.core.run import Run 
import numpy as np

# get the Azure ML run object
run = Run.get_context()

# get images
path = Path('data')
np.random.seed(2)
data = ImageDataBunch.from_folder(path,
                                       train=".",
                                       valid_pct=0.2,
                                       ds_tfms=get_transforms(),
                                       size=224).normalize(imagenet_stats)

# build estimator based on ResNet 34
learn = cnn_learner(data, models.resnet34, metrics=accuracy)
learn.fit_one_cycle(2)

# do test time augmentation and get accuracy
acc = accuracy(*learn.TTA())


# log the accuracy to run
run.log('Accuracy', np.float(acc))
print("Accuracy: ", np.float(acc))
github developmentseed / fastai-serving / benchmark / make_fixture_models.py View on Github external
import torchvision
from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats

data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)
learner = cnn_learner(data, torchvision.models.resnet34)
learner.export()

data = (SegmentationItemList.from_folder('fixtures/segmentation/images')
    .split_none()
    .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2])
    .databunch()
    .normalize(imagenet_stats))
learner = unet_learner(data, torchvision.models.resnet50)
learner.export('../export.pkl')
github developmentseed / fastai-serving / benchmark / make_fixture_models.py View on Github external
import torchvision
from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats

data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)
learner = cnn_learner(data, torchvision.models.resnet34)
learner.export()

data = (SegmentationItemList.from_folder('fixtures/segmentation/images')
    .split_none()
    .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2])
    .databunch()
    .normalize(imagenet_stats))
learner = unet_learner(data, torchvision.models.resnet50)
learner.export('../export.pkl')
github microsoft / computervision-recipes / utils_cv / classification / model.py View on Github external
Args:
        model (nn.Module): Base ImageNet model. E.g. models.resnet18()
        im_size (int): Image size the model will expect to have.

    Returns:
         Learner: a model trainer for prediction
    """

    # Currently, fast.ai api requires to pass a DataBunch to create a model trainer (learner).
    # To use the learner for prediction tasks without retraining, we have to pass an empty DataBunch.
    # single_from_classes is deprecated, but this is the easiest go-around method.
    # Create ImageNet data spec as an empty DataBunch.
    # Related thread: https://forums.fast.ai/t/solved-get-prediction-from-the-imagenet-model-without-creating-a-databunch/36197/5
    empty_data = ImageDataBunch.single_from_classes(
        "", classes=imagenet_labels(), size=im_size
    ).normalize(imagenet_stats)

    return Learner(empty_data, model)
github microsoft / computervision-recipes / classification / python / 02_multilabel_classification.py View on Github external
# __Loading data__
#
# Now that we know the structure of our data, lets use fast.ai's data block apis to create our databunches so that we can easily load mini-batches of data from our file system into our trainer.

# In[7]:


np.random.seed(42)
data = (
    ImageList.from_csv(path, "labels.csv", folder="images")
    .split_by_rand_pct(0.2, seed=10)
    .label_from_df(label_delim=" ")
    .transform(size=IM_SIZE)
    .databunch(bs=BATCH_SIZE)
    .normalize(imagenet_stats)
)


# Lets break down the code:
#
# The first thing we need to do is to create an `ImageList`, and we'll do so by creating it from a csv file (`from_csv`). Then we want to do a random split (`random_split_by_pct`) so that we have our validation set. For this method, we've also set a random seed (`np.random.seed(42)`) so that our validation set is consistent. Finally we want to get our labels from the dataframe df (`label_from_df`) that we built from the csv file. Since our labels are space-separated in the csv file, we want to specify that our labels will be delimited by a space (`label_delim=' '`).
#
# In the second part, we use the `ImageList` we created and apply a transformation on it (`transform`) so that all images are resized to 299X299. Then we turn it into a databunch, which is basically the kind of object fast.ai's trainer uses to load mini-batches of data. Finally, we'll normalize the databunch (`normalize(imagenet_states)`) to the ImageNet parameters.

# __Inspect data__
#
# To make sure our data is correctly loaded, let's print out the number of classes, and each of the class labels.

# In[8]:
github microsoft / computervision-recipes / utils_cv / classification / parameter_sweeper.py View on Github external
path (Union[Path, str]): path to data to create databunch with
            transform (bool): a flag to set fastai default transformations (get_transforms())
            im_size (int): image size of databunch
            bs (int): batch size of databunch
        Returns:
            ImageDataBunch
        """
        path = path if type(path) is Path else Path(path)
        tfms = get_transforms() if transform else None
        return (
            ImageList.from_folder(path)
            .split_by_rand_pct(valid_pct=0.33)
            .label_from_folder()
            .transform(tfms=tfms, size=im_size)
            .databunch(bs=bs, num_workers=db_num_workers())
            .normalize(imagenet_stats)
        )
github microsoft / computervision-recipes / similarity / notebooks / script_exploring_hyperparameters.py View on Github external
DROPOUT_RATE = 0.5
        BATCH_SIZE = (
            4
        )  # 16   #batch size has to be lower than nr of training ex
        ARCHITECTURE = models.resnet18
        IM_SIZE = 30  # 300

        # Load images into fast.ai's ImageDataBunch object
        random.seed(642)
        data = (
            ImageList.from_folder(DATA_PATH)
            .split_by_rand_pct(valid_pct=0.5, seed=20)
            .label_from_folder()
            .transform(size=IM_SIZE)
            .databunch(bs=BATCH_SIZE)
            .normalize(imagenet_stats)
        )
        print(
            f"""Training set: {len(data.train_ds.x)} images\nValidation set: {len(data.valid_ds.x)} images"""
        )

        # Init learner
        learn = cnn_learner(
            data,
            ARCHITECTURE,
            metrics=[accuracy],
            callback_fns=[partial(TrainMetricsRecorder, show_graph=True)],
            ps=DROPOUT_RATE,
        )

        # Train the last layer
        learn.fit_one_cycle(EPOCHS_HEAD, LEARNING_RATE)
github henripal / maps_webapp / model_backend / cities.py View on Github external
import numpy as np


async def get_bytes(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            return await response.read()


app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['*'])

path = Path("./")
classes = ['Brazil', 'Armenia', 'Thailand', 'Algeria', 'China', 'United+Kingdom', 'South+Korea', 'Hungary', 'Central+African+Republic', 'Poland', 'Venezuela', 'Germany', 'France', 'Argentina', 'Russia', 'Cuba', 'Spain', 'Mexico', 'Japan', 'Philippines', 'Ukraine', 'Italy', 'Macedonia', 'India', 'Portugal', 'Ghana', 'Madagascar', 'United+States', 'Indonesia', 'Slovakia', 'Malaysia', 'Czech+Republic', 'Romania', 'Sri+Lanka', 'Bulgaria', 'Guinea', 'Colombia', 'Vietnam', 'Turkey', 'Seychelles', 'Pakistan', 'Chad', 'Cambodia', 'Iraq', 'Finland', 'Belarus', 'Afghanistan', 'Burkina+Faso', 'South+Africa', 'Australia', 'Peru', 'Kenya', 'Tunisia', 'Denmark', 'Canada', 'Namibia', 'Nigeria', 'Zambia', 'Iran', 'El+Salvador', 'Myanmar', 'Greece', 'Ethiopia', 'Sweden', 'Oman', 'Egypt', 'Bhutan', 'Belgium', 'Azerbaijan', 'Mozambique', 'Libya', 'Ivory+Coast', 'Croatia', 'East+Timor', 'Niger', 'Netherlands', 'Albania', 'Dominican+Republic', 'Somalia', 'Tanzania', 'Kosovo', 'French+Guiana', 'Ireland', 'Cameroon', 'Norway', 'French+Polynesia', 'Ecuador', 'Botswana', 'Angola', 'Syria', 'Costa+Rica', 'Morocco', 'Bangladesh', 'Yemen', 'Serbia', 'Lithuania', 'Uganda', 'Taiwan', 'Bosnia+and+Herzegovina', 'North+Korea', 'Israel', 'Benin', 'Slovenia', 'Papua+New+Guinea', 'Rwanda', 'Zimbabwe', 'Martinique', 'Moldova', 'Senegal', 'Gambia', 'Kuwait', 'Sudan', 'Malawi', 'Paraguay', 'Austria', 'Georgia', 'Wallis+and+Futuna', 'Kazakhstan', 'Panama', 'Mongolia', 'New+Zealand', 'Tajikistan', 'Switzerland', 'Sierra+Leone', 'Uzbekistan', 'Guam', 'Mauritius', 'Uruguay', 'Mayotte', 'Democratic+Republic+of+the+Congo', 'Guatemala', 'Jordan', 'Bolivia', 'Puerto+Rico', 'Honduras', 'Chile', 'Republic+of+the+Congo', 'Nicaragua', 'Guyana', 'Malta', 'Liberia', 'Mali', 'Trinidad+and+Tobago', 'Togo', 'Qatar', '"Bonaire,+Saint+Eustatius+and+Saba"', 'Lebanon', 'Faroe+Islands', 'Maldives', 'United+Arab+Emirates', 'Gabon', 'Laos', 'Estonia', 'Turkmenistan', 'Mauritania', 'Iceland', 'Saudi+Arabia', 'Hong+Kong', 'Haiti', 'Greenland', 'Brunei', 'Nepal', 'Bahrain', 'Cayman+Islands', 'Cape+Verde', 'Fiji', 'Suriname', 'Aruba', 'Pitcairn', 'Guinea-Bissau', 'Cook+Islands', 'Tonga', 'Eritrea', 'Niue', 'Cyprus', 'Palestinian+Territory', 'Gibraltar', 'Kyrgyzstan', 'Djibouti', 'Burundi', 'Saint+Helena', 'Tuvalu', 'Montenegro', 'Guadeloupe', 'South+Sudan', 'Falkland+Islands', 'Jamaica', 'Latvia', 'New+Caledonia', 'Samoa', 'Reunion', 'Curacao', 'San+Marino', 'Comoros', 'Guernsey', 'Lesotho', 'Sao+Tome+and+Principe', 'Northern+Mariana+Islands', 'Belize', 'Swaziland', 'Equatorial+Guinea', 'Palau', 'Barbados', 'Isle+of+Man', 'Cocos+Islands', 'Aland+Islands', 'Saint+Vincent+and+the+Grenadines', 'Luxembourg', 'Western+Sahara', 'Anguilla', 'Liechtenstein', 'Macao', 'Saint+Pierre+and+Miquelon', 'Andorra', 'French+Southern+Territories', 'British+Virgin+Islands', 'Montserrat', 'Nauru', 'Saint+Barthelemy', 'Bahamas', 'South+Georgia+and+the+South+Sandwich+Islands', 'Dominica', 'Singapore', 'Vanuatu', 'Bermuda', 'Jersey', 'Christmas+Island', 'Grenada', '+D.C.",United+States', 'Solomon+Islands', 'Monaco', 'Kiribati', 'Norfolk+Island', 'Vatican', 'Saint+Lucia', 'Saint+Kitts+and+Nevis', 'Turks+and+Caicos+Islands', 'Micronesia', 'U.S.+Virgin+Islands', 'Antigua+and+Barbuda', 'American+Samoa', 'Saint+Martin', 'Svalbard+and+Jan+Mayen', 'Sint+Maarten']

fake_data = ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats)
learn = create_cnn(fake_data, models.resnet50)
learn.model.load_state_dict(torch.load('resnet50-big-finetuned-bs64.pth', map_location='cpu'))



@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    s = data["file"]
    return predict_image_from_string(s)

@app.route("/heatmap", methods=["POST"])
async def heatmap(request):
    data = await request.form()
    s = data["file"]
    return heatmap_from_string(s)