How to use the oggm.utils function in oggm

To help you get started, we’ve selected a few oggm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OGGM / oggm / oggm / sandbox / run_demtests.py View on Github external
from oggm import graphics, utils

# Initialize OGGM
cfg.initialize()

# Local paths (where to write output and where to download input)
DATA_DIR = '/home/mowglie/disk/OGGM_INPUT'
WORKING_DIR = '/home/mowglie/disk/OGGM_RUNS/TEST_DEMS'
PLOTS_DIR = os.path.join(WORKING_DIR, 'plots')

cfg.PATHS['working_dir'] = WORKING_DIR
cfg.PATHS['topo_dir'] = os.path.join(DATA_DIR, 'topo')
cfg.PATHS['rgi_dir'] = os.path.join(DATA_DIR, 'rgi')
utils.mkdir(WORKING_DIR)
utils.mkdir(cfg.PATHS['topo_dir'])
utils.mkdir(cfg.PATHS['rgi_dir'])

# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 20
cfg.PARAMS['continue_on_error'] = False

# Read in the RGI file
rgisel = os.path.join(WORKING_DIR, 'rgi_selection.shp')
if not os.path.exists(rgisel):
    rgi_dir = utils.get_rgi_dir()
    regions = ['{:02d}'.format(int(p)) for p in range(1, 20)]
    files = [glob.glob(os.path.join(rgi_dir, '*', r + '_rgi50_*.shp'))[0] for r in regions]
    rgidf = []
    for fs in files:
        sh = salem.read_shapefile(os.path.join(rgi_dir, fs), cached=True)
        percs = np.asarray([0, 25, 50, 75, 100])
github OGGM / oggm / oggm / cli / benchmark.py View on Github external
# How many grid points around the glacier?
    # Make it large if you expect your glaciers to grow large
    cfg.PARAMS['border'] = border

    # Set to True for operational runs
    cfg.PARAMS['continue_on_error'] = True

    # For statistics
    odf = pd.DataFrame()

    if rgi_version is None:
        rgi_version = cfg.PARAMS['rgi_version']
    base_dir = os.path.join(output_folder)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    # Read RGI
    start = time.time()
    if test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
github OGGM / oggm / oggm / core / flowline.py View on Github external
with np.errstate(divide='ignore', invalid='ignore'):
            thick = (np.sqrt(b ** 2 + 4 * a * section) - b) / a
        ptrap = (lambdas != 0) & np.isfinite(lambdas)
        bed_h[ptrap] = cl.surface_h[ptrap] - thick[ptrap]

        # For the very last pixs of a glacier, the section might be zero after
        # the inversion, and the bedshapes are chaotic. We interpolate from
        # the downstream. This is not volume conservative
        if not gdir.is_tidewater and inv['is_last']:
            dic_ds = gdir.read_pickle('downstream_line')
            bed_shape[-5:] = np.nan

            # Interpolate
            bed_shape = utils.interp_nans(np.append(bed_shape,
                                                    dic_ds['bedshapes'][0]))
            bed_shape = utils.clip_min(bed_shape[:-1], min_shape)

            # Correct the section volume
            h = inv['thick']
            section[-5:] = (2 / 3 * h * np.sqrt(4 * h / bed_shape))[-5:]

            # Add the downstream
            bed_shape = np.append(bed_shape, dic_ds['bedshapes'])
            lambdas = np.append(lambdas, dic_ds['bedshapes'] * np.NaN)
            section = np.append(section, dic_ds['bedshapes'] * 0.)
            surface_h = np.append(surface_h, dic_ds['surface_h'])
            bed_h = np.append(bed_h, dic_ds['surface_h'])
            widths_m = np.append(widths_m, dic_ds['bedshapes'] * 0.)
            line = dic_ds['full_line']

        nfl = MixedBedFlowline(line=line, dx=cl.dx, map_dx=map_dx,
                               surface_h=surface_h, bed_h=bed_h,
github OGGM / oggm / oggm / cli / prepro_levels.py View on Github external
rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))

    # Input
    if test_topofile:
        cfg.PATHS['dem_file'] = test_topofile

    # L1 - initialize working directories
    # Which DEM source?
    if dem_source.upper() == 'ALL':
        # This is the complex one, just do the job an leave
        log.workflow('Running prepro on ALL sources')
        for i, s in enumerate(utils.DEM_SOURCES):
            rs = i == 0
            rgidf['DEM_SOURCE'] = s
            log.workflow('Running prepro on sources: {}'.format(s))
            gdirs = workflow.init_glacier_regions(rgidf, reset=rs, force=rs)
            workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)

        # make a GeoTiff mask of the glacier, choose any source
        workflow.execute_entity_task(gis.rasterio_glacier_mask,
                                     gdirs, source='ALL')

        # Compress all in output directory
        l_base_dir = os.path.join(base_dir, 'L1')
        workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
                                     base_dir=l_base_dir)
        utils.base_dir_to_tar(l_base_dir)
github OGGM / oggm / oggm / core / centerlines.py View on Github external
for fl, catch_h in zip(fls, catchment_heights):

        # Interpolate widths
        widths = utils.interp_nans(fl.widths)
        widths = utils.clip_min(widths, 0.1)

        # Get topo per catchment and per flowline point
        fhgt = fl.surface_h

        # Max and mins for the histogram
        maxh = np.max(fhgt)
        minh = np.min(fhgt)

        # Sometimes, the centerline does not reach as high as each pix on the
        # glacier. (e.g. RGI40-11.00006)
        catch_h = utils.clip_max(catch_h, maxh)
        # Same for min
        if fl.flows_to is None:
            # We clip only for main flowline (this has reasons)
            catch_h = utils.clip_min(catch_h, minh)

        # Now decide on a binsize which ensures at least N element per bin
        bsize = cfg.PARAMS['base_binsize']
        while True:
            maxb = utils.nicenumber(maxh, 1)
            minb = utils.nicenumber(minh, 1, lower=True)
            bins = np.arange(minb, maxb+bsize+0.01, bsize)
            minb = np.min(bins)

            # Ignore the topo pixels below the last bin
            tmp_ght = catch_h[np.where(catch_h >= minb)]
github OGGM / oggm / oggm / sandbox / gmd_paper / plot_hef_scenarios.py View on Github external
tasks.compute_downstream_bedshape(gdir)
tasks.catchment_area(gdir)
tasks.catchment_intersections(gdir)
tasks.catchment_width_geom(gdir)
tasks.catchment_width_correction(gdir)
tasks.process_cru_data(gdir)
tasks.mu_candidates(gdir)
tasks.compute_ref_t_stars([gdir])
tasks.distribute_t_stars([gdir])
tasks.apparent_mb(gdir)
tasks.prepare_for_inversion(gdir)
tasks.volume_inversion(gdir, glen_a=cfg.A, fs=0)
tasks.filter_inversion_output(gdir)
tasks.init_present_time_glacier(gdir)

df = utils.glacier_characteristics([gdir], path=False)

reset = False
seed = 0

tasks.random_glacier_evolution(gdir, nyears=800, seed=0, y0=2000,
                               filesuffix='_2000_def', reset=reset)

tasks.random_glacier_evolution(gdir, nyears=800, seed=0, y0=1920,
                               filesuffix='_1920_def', reset=reset)


f = gdir.get_filepath('model_diagnostics', filesuffix='_2000_def')
ds1 = xr.open_dataset(f)
f = gdir.get_filepath('model_diagnostics', filesuffix='_1920_def')
ds2 = xr.open_dataset(f)
github OGGM / oggm / docs / run_examples / _code / run_from_calibrated_and_prepro.py View on Github external
# Locals
import oggm.cfg as cfg
from oggm import tasks, utils, workflow
from oggm.workflow import execute_entity_task

# Module logger
log = logging.getLogger(__name__)

# For timing the run
start = time.time()

# Initialize OGGM and set up the run parameters
cfg.initialize()

# Local working directory (where OGGM will write its output)
WORKING_DIR = utils.gettempdir('OGGM_precalibrated_run')
cfg.PATHS['working_dir'] = WORKING_DIR

# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True

# Initialize from existing directories, no need for shapefiles
gdirs = workflow.init_glacier_regions()

log.info('Starting OGGM run')
log.info('Number of glaciers: {}'.format(len(gdirs)))

# We can step directly to a new experiment!
# Random climate representative for the recent climate (1985-2015)
# This is a kind of "commitment" run
execute_entity_task(tasks.run_random_climate, gdirs,
                    nyears=200, y0=2000, seed=1,
github OGGM / oggm / oggm / core / vascaling.py View on Github external
# define different temporal indices
        yearly_time = np.arange(np.floor(self.year), np.floor(year_end) + 1)

        # TODO: include `store_monthly_step` in parameter list or remove IF:
        store_monthly_step = False
        if store_monthly_step:
            # get monthly time index
            monthly_time = utils.monthly_timeseries(self.year, year_end)
        else:
            # monthly time
            monthly_time = yearly_time.copy()
        # get years and month for hydrological year and calender year
        yrs, months = utils.floatyear_to_date(monthly_time)
        sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
        cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
                                                        start_month=sm)

        # get number of temporal indices
        ny = len(yearly_time)
        nm = len(monthly_time)
        # deal with one dimensional temporal indices
        if ny == 1:
            yrs = [yrs]
            cyrs = [cyrs]
            months = [months]
            cmonths = [cmonths]

        # initialize diagnostics output file
        diag_ds = xr.Dataset()

        # Global attributes
github OGGM / oggm / oggm / workflow.py View on Github external
return gdir_main, gdirs

    # seperate those glaciers which are not already found to be a tributary
    gdirs = [gd for gd in gdirs if gd not in tributaries]

    gdirs_to_merge = []

    for trib in tributaries:
        # for each tributary: check if we can merge additional glaciers to it
        merged, gdirs = _recursive_merging(gdirs, trib, glcdf=glcdf,
                                           filename=filename,
                                           input_filesuffix=input_filesuffix)
        gdirs_to_merge.append(merged)

    # create merged glacier directory
    gdir_merged = utils.initialize_merged_gdir(
        gdir_main, tribs=gdirs_to_merge, glcdf=glcdf, filename=filename,
        input_filesuffix=input_filesuffix)

    flowline.merge_to_one_glacier(gdir_merged, gdirs_to_merge,
                                  filename=filename,
                                  input_filesuffix=input_filesuffix)

    return gdir_merged, gdirs
github OGGM / oggm / oggm / cli / prepro_levels.py View on Github external
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)

    # Add a package version file
    utils.mkdir(base_dir)
    opath = os.path.join(base_dir, 'package_versions.txt')
    with open(opath, 'w') as vfile:
        vfile.write(utils.show_versions(logger=log))

    if demo:
        rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
    elif test_rgidf is None:
        # Get the RGI file
        rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
                                                        version=rgi_version))
        # We use intersects
        rgif = utils.get_rgi_intersects_region_file(rgi_reg,
                                                    version=rgi_version)
        cfg.set_intersects_db(rgif)
    else:
        rgidf = test_rgidf
        cfg.set_intersects_db(test_intersects_file)

    if is_test:
        # Just for fun
        rgidf = rgidf.sample(test_nr)

    # Sort for more efficient parallel computing
    rgidf = rgidf.sort_values('Area', ascending=False)

    log.workflow('Starting prepro run for RGI reg: {} '
                 'and border: {}'.format(rgi_reg, border))
    log.workflow('Number of glaciers: {}'.format(len(rgidf)))