Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
with self.assertRaises(ValueError):
PathManager.ls(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
PathManager.mkdirs(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
PathManager.open(self._tmpfile, foo="foo") # type: ignore
with self.assertRaises(ValueError):
PathManager.rm(self._tmpfile, foo="foo") # type: ignore
PathManager.set_strict_kwargs_checking(False)
PathManager.copy(
self._tmpfile, self._tmpfile, foo="foo" # type: ignore
)
PathManager.exists(self._tmpfile, foo="foo") # type: ignore
PathManager.get_local_path(self._tmpfile, foo="foo") # type: ignore
PathManager.isdir(self._tmpfile, foo="foo") # type: ignore
PathManager.isfile(self._tmpfile, foo="foo") # type: ignore
PathManager.ls(self._tmpdir, foo="foo") # type: ignore
PathManager.mkdirs(self._tmpdir, foo="foo") # type: ignore
f = PathManager.open(self._tmpfile, foo="foo") # type: ignore
f.close()
with open(os.path.join(self._tmpdir, "test_rm.txt"), "w") as f:
rm_file = f.name
f.write(self._tmpfile_contents)
f.flush()
PathManager.rm(rm_file, foo="foo") # type: ignore
for p in self._predictions:
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
with open(gt_json, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(
gt_json,
PathManager.get_local_path(self._predictions_json),
gt_folder=gt_folder,
pred_folder=pred_dir,
)
res = {}
res["PQ"] = 100 * pq_res["All"]["pq"]
res["SQ"] = 100 * pq_res["All"]["sq"]
res["RQ"] = 100 * pq_res["All"]["rq"]
res["PQ_th"] = 100 * pq_res["Things"]["pq"]
res["SQ_th"] = 100 * pq_res["Things"]["sq"]
res["RQ_th"] = 100 * pq_res["Things"]["rq"]
res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
results = OrderedDict({"panoptic_seg": res})
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets `_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
json_file = PathManager.get_local_path(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
if dataset_name is not None:
meta = get_lvis_instances_meta(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta)
# sort indices for reproducible results
img_ids = sorted(list(lvis_api.imgs.keys()))
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
def _loadGEval(self):
smpl_subdiv_fpath = PathManager.get_local_path("detectron2://densepose/SMPL_subdiv.mat")
pdist_transform_fpath = PathManager.get_local_path(
"detectron2://densepose/SMPL_SUBDIV_TRANSFORM.mat"
)
pdist_matrix_fpath = PathManager.get_local_path("detectron2://densepose/Pdist_matrix.pkl")
SMPL_subdiv = loadmat(smpl_subdiv_fpath)
self.PDIST_transform = loadmat(pdist_transform_fpath)
self.PDIST_transform = self.PDIST_transform["index"].squeeze()
UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze()
ClosestVertInds = np.arange(UV.shape[1]) + 1
self.Part_UVs = []
self.Part_ClosestVertInds = []
for i in np.arange(24):
self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)])
self.Part_ClosestVertInds.append(
ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]
)
def evaluate(self):
comm.synchronize()
self._predictions = comm.gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return
gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
gt_folder = self._metadata.panoptic_root
with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
for p in self._predictions:
with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
with open(gt_json, "r") as f:
json_data = json.load(f)
json_data["annotations"] = self._predictions
with PathManager.open(self._predictions_json, "w") as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
def _get_local_path(self, path):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
logger.info("Catalog entry {} points to {}".format(path, catalog_path))
return PathManager.get_local_path(catalog_path)
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.densepose_on:
densepose_transform_srcs = [
MetadataCatalog.get(ds).densepose_transform_src
for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
]
assert len(densepose_transform_srcs) > 0
# TODO: check that DensePose transformation data is the same for
# all the datasets. Otherwise one would have to pass DB ID with
# each entry to select proper transformation data. For now, since
# all DensePose annotated data uses the same data semantics, we
# omit this check.
densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0])
self.densepose_transform_data = DensePoseTransformData.load(
densepose_transform_data_fpath
)
self.is_train = is_train
def _get_local_path(self, path):
name = path[len(self.PREFIX) :]
return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name)
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets `_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds())
cats = coco_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.