How to use the yt.utilities.logger.ytLogger.debug function in yt

To help you get started, we’ve selected a few yt examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github yt-project / yt / yt / frontends / enzo / data_structures.py View on Github external
def _detect_output_fields(self):
        self.field_list = []
        # Do this only on the root processor to save disk work.
        if self.comm.rank in (0, None):
            mylog.info("Gathering a field list (this may take a moment.)")
            field_list = set()
            random_sample = self._generate_random_grids()
            for grid in random_sample:
                if not hasattr(grid, 'filename'): continue
                try:
                    gf = self.io._read_field_names(grid)
                except self.io._read_exception:
                    raise IOError("Grid %s is a bit funky?", grid.id)
                mylog.debug("Grid %s has: %s", grid.id, gf)
                field_list = field_list.union(gf)
            if "AppendActiveParticleType" in self.dataset.parameters:
                ap_fields = self._detect_active_particle_fields()
                field_list = list(set(field_list).union(ap_fields))
                if not any(f[0] == 'io' for f in field_list):
                    if 'io' in self.dataset.particle_types_raw:
                        ptypes_raw = list(self.dataset.particle_types_raw)
                        ptypes_raw.remove('io')
                        self.dataset.particle_types_raw = tuple(ptypes_raw)

                    if 'io' in self.dataset.particle_types:
                        ptypes = list(self.dataset.particle_types)
                        ptypes.remove('io')
                        self.dataset.particle_types = tuple(ptypes)
            ptypes = self.dataset.particle_types
            ptypes_raw = self.dataset.particle_types_raw
github yt-project / yt / yt / geometry / grid_geometry_handler.py View on Github external
def _setup_geometry(self):
        mylog.debug("Counting grids.")
        self._count_grids()

        mylog.debug("Initializing grid arrays.")
        self._initialize_grid_arrays()

        mylog.debug("Parsing index.")
        self._parse_index()

        mylog.debug("Constructing grid objects.")
        self._populate_grid_objects()

        mylog.debug("Re-examining index")
        self._initialize_level_stats()
github yt-project / yt / yt / utilities / parallel_tools / io_runner.py View on Github external
def terminate(self):
        msg = dict(op='end')
        if self.wg.comm.rank == 0:
            for rank in self.pool['io'].ranks:
                mylog.debug("Sending termination message to %s", rank)
                self.comm.comm.send(msg, dest=rank, tag=YT_TAG_MESSAGE)
github yt-project / yt / yt / frontends / stream / data_structures.py View on Github external
def _parse_index(self):
        self.grid_dimensions = self.stream_handler.dimensions
        self.grid_left_edge[:] = self.stream_handler.left_edges
        self.grid_right_edge[:] = self.stream_handler.right_edges
        self.grid_levels[:] = self.stream_handler.levels
        self.grid_procs = self.stream_handler.processor_ids
        self.grid_particle_count[:] = self.stream_handler.particle_count
        mylog.debug("Copying reverse tree")
        self.grids = []
        # We enumerate, so it's 0-indexed id and 1-indexed pid
        for id in range(self.num_grids):
            self.grids.append(self.grid(id, self))
            self.grids[id].Level = self.grid_levels[id, 0]
        parent_ids = self.stream_handler.parent_ids
        if parent_ids is not None:
            reverse_tree = self.stream_handler.parent_ids.tolist()
            # Initial setup:
            for gid, pid in enumerate(reverse_tree):
                if pid >= 0:
                    self.grids[gid]._parent_id = pid
                    self.grids[pid]._children_ids.append(self.grids[gid].id)
        else:
            mylog.debug("Reconstructing parent-child relationships")
            self._reconstruct_parent_child()
github yt-project / yt / yt / frontends / art / io.py View on Github external
def _read_fluid_selection(self, chunks, selector, fields, size):
        # Chunks in this case will have affiliated domain subset objects
        # Each domain subset will contain a hydro_offset array, which gives
        # pointers to level-by-level hydro information
        tr = defaultdict(list)
        cp = 0
        for chunk in chunks:
            for subset in chunk.objs:
                # Now we read the entire thing
                f = open(subset.domain.ds._file_amr, "rb")
                # This contains the boundary information, so we skim through
                # and pick off the right vectors
                rv = subset.fill(f, fields, selector)
                for ft, f in fields:
                    d = rv.pop(f)
                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s:%s)",
                                f, d.size, d.min(), d.max(),
                                cp, cp+d.size)
                    tr[(ft, f)].append(d)
                cp += d.size
        d = {}
        for field in fields:
            d[field] = np.concatenate(tr.pop(field))
        return d
github yt-project / yt / yt / frontends / stream / data_structures.py View on Github external
# First we fix our field names, apply units to data
    # and check for consistency of field shapes
    field_units, data, number_of_particles = process_data(
        data, grid_dims=tuple(domain_dimensions))

    sfh = StreamDictFieldHandler()

    if number_of_particles > 0:
        particle_types = set_particle_types(data)
        # Used much further below.
        pdata = {"number_of_particles": number_of_particles}
        for key in list(data.keys()):
            if len(data[key].shape) == 1 or key[0] == 'io':
                if not isinstance(key, tuple):
                    field = ("io", key)
                    mylog.debug("Reassigning '%s' to '%s'", key, field)
                else:
                    field = key
                sfh._additional_fields += (field,)
                pdata[field] = data.pop(key)
    else:
        particle_types = {}

    if nprocs > 1:
        temp = {}
        new_data = {}
        for key in data.keys():
            psize = get_psize(np.array(data[key].shape), nprocs)
            grid_left_edges, grid_right_edges, shapes, slices = \
                             decompose_array(data[key].shape, psize, bbox)
            grid_dimensions = np.array([shape for shape in shapes],
                                       dtype="int32")
github yt-project / yt / yt / geometry / unstructured_mesh_handler.py View on Github external
def _setup_geometry(self):
        mylog.debug("Initializing Unstructured Mesh Geometry Handler.")
        self._initialize_mesh()
github yt-project / yt / yt / frontends / enzo / data_structures.py View on Github external
def _generate_random_grids(self):
        if self.num_grids > 40:
            starter = np.random.randint(0, 20)
            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
            # We also add in a bit to make sure that some of the grids have
            # particles
            gwp = self.grid_particle_count > 0
            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                # We just add one grid.  This is not terribly efficient.
                first_grid = np.where(gwp)[0][0]
                random_sample.resize((21,))
                random_sample[-1] = first_grid
                mylog.debug("Added additional grid %s", first_grid)
            mylog.debug("Checking grids: %s", random_sample.tolist())
        else:
            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
        return self.grids[(random_sample,)]
github yt-project / yt / yt / frontends / enzo / data_structures.py View on Github external
test_grid = os.path.join(self.directory, test_grid)
        if not os.path.exists(test_grid):
            test_grid = os.path.join(self.directory,
                                    os.path.basename(test_grid))
            mylog.debug("Your data uses the annoying hardcoded path.")
            self._strip_path = True
        if self.dataset_type is not None: return
        if rank == 3:
            mylog.debug("Detected packed HDF5")
            if self.parameters.get("WriteGhostZones", 0) == 1:
                self.dataset_type= "enzo_packed_3d_gz"
                self.grid = EnzoGridGZ
            else:
                self.dataset_type = 'enzo_packed_3d'
        elif rank == 2:
            mylog.debug("Detect packed 2D")
            self.dataset_type = 'enzo_packed_2d'
        elif rank == 1:
            mylog.debug("Detect packed 1D")
            self.dataset_type = 'enzo_packed_1d'
        else:
            raise NotImplementedError
github yt-project / yt / yt / frontends / enzo / data_structures.py View on Github external
def _generate_random_grids(self):
        if self.num_grids > 40:
            starter = np.random.randint(0, 20)
            random_sample = np.mgrid[starter:len(self.grids)-1:20j].astype("int32")
            # We also add in a bit to make sure that some of the grids have
            # particles
            gwp = self.grid_particle_count > 0
            if np.any(gwp) and not np.any(gwp[(random_sample,)]):
                # We just add one grid.  This is not terribly efficient.
                first_grid = np.where(gwp)[0][0]
                random_sample.resize((21,))
                random_sample[-1] = first_grid
                mylog.debug("Added additional grid %s", first_grid)
            mylog.debug("Checking grids: %s", random_sample.tolist())
        else:
            random_sample = np.mgrid[0:max(len(self.grids),1)].astype("int32")
        return self.grids[(random_sample,)]