Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from BlackBoxAuditing.repairers.GeneralRepairer import Repairer
except ImportError as error:
INSTALL_HINT = """
Try:
pip install -e .[disparate_impact_remover]
or
pip install -e .[all]
See additional instructions for Windows with Python 2:
https://github.com/IBM/AIF360#blackboxauditing"""
error.msg += INSTALL_HINT
raise error
from aif360.algorithms import Transformer
class DisparateImpactRemover(Transformer):
"""Disparate impact remover is a preprocessing technique that edits feature
values increase group fairness while preserving rank-ordering within groups
[1]_.
References:
.. [1] M. Feldman, S. A. Friedler, J. Moeller, C. Scheidegger, and
S. Venkatasubramanian, "Certifying and removing disparate impact."
ACM SIGKDD International Conference on Knowledge Discovery and Data
Mining, 2015.
"""
def __init__(self, repair_level=1.0, sensitive_attribute=''):
"""
Args:
repair_level (float): Repair amount. 0.0 is no repair while 1.0 is
full repair."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import tempfile
import os
import subprocess
from aif360.algorithms import Transformer
class PrejudiceRemover(Transformer):
"""Prejudice remover is an in-processing technique that adds a
discrimination-aware regularization term to the learning objective [6]_.
References:
.. [6] T. Kamishima, S. Akaho, H. Asoh, and J. Sakuma, "Fairness-Aware
Classifier with Prejudice Remover Regularizer," Joint European
Conference on Machine Learning and Knowledge Discovery in Databases,
2012.
"""
def __init__(self, eta=1.0, sensitive_attr="", class_attr=""):
"""
Args:
eta (double, optional): fairness penalty parameter
sensitive_attr (str, optional): name of protected attribute#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
from aif360.algorithms import Transformer
from aif360.metrics import ClassificationMetric, utils
class CalibratedEqOddsPostprocessing(Transformer):
"""Calibrated equalized odds postprocessing is a post-processing technique
that optimizes over calibrated classifier score outputs to find
probabilities with which to change output labels with an equalized odds
objective [7]_.
References:
.. [7] G. Pleiss, M. Raghavan, F. Wu, J. Kleinberg, and
K. Q. Weinberger, "On Fairness and Calibration," Conference on Neural
Information Processing Systems, 2017
Adapted from:
https://github.com/gpleiss/equalized_odds_and_calibration/blob/master/calib_eq_odds.py
"""
def __init__(self, unprivileged_groups, privileged_groups,
cost_constraint='weighted', seed=None):import numpy as np
from aif360.algorithms import Transformer
from aif360.metrics import utils
class Reweighing(Transformer):
"""Reweighing is a preprocessing technique that Weights the examples in each
(group, label) combination differently to ensure fairness before
classification [4]_.
References:
.. [4] F. Kamiran and T. Calders, "Data Preprocessing Techniques for
Classification without Discrimination," Knowledge and Information
Systems, 2012.
"""
def __init__(self, unprivileged_groups, privileged_groups):
"""
Args:
unprivileged_groups (list(dict)): Representation for unprivileged
group.
privileged_groups (list(dict)): Representation for privileged group.from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sklearn.preprocessing
import numpy as np
from aif360.algorithms import Transformer
class LimeEncoder(Transformer):
"""
Tranformer for converting aif360 dataset into LIME dataset and vice versa.
(LIME - Local Interpretable Model-Agnostic Explanations)
See for details/usage:
https://github.com/marcotcr/lime
Reference:
M.T. Ribeiro, S. Singh, and C. Guestrin. "Why should I trust you?" Explaining the predictions of any classifier
https://arxiv.org/pdf/1602.04938v1.pdf
"""
def __init__(self):from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
# from copy import deepcopy
import pandas as pd
from logging import warn
from aif360.algorithms import Transformer
# from aif360.datasets import StructuredDataset
from aif360.datasets import BinaryLabelDataset
class OptimPreproc(Transformer):
"""Optimized preprocessing is a preprocessing technique that learns a
probabilistic transformation that edits the features and labels in the data
with group fairness, individual distortion, and data fidelity constraints
and objectives [3]_.
References:
.. [3] F. P. Calmon, D. Wei, B. Vinzamuri, K. Natesan Ramamurthy, and
K. R. Varshney. "Optimized Pre-Processing for Discrimination
Prevention." Conference on Neural Information Processing Systems,
2017.
Based on code available at: https://github.com/fair-preprocessing/nips2017
"""
def __init__(self, optimizer, optim_options, unprivileged_groups,
privileged_groups, verbose=False, seed=None):import numpy as np
from aif360.datasets import BinaryLabelDataset
from aif360.algorithms import Transformer
class ARTClassifier(Transformer):
"""Wraps an instance of an :obj:`art.classifiers.Classifier` to extend
:obj:`~aif360.algorithms.Transformer`.
"""
def __init__(self, art_classifier):
"""Initialize ARTClassifier.
Args:
art_classifier (art.classifier.Classifier): A Classifier
object from the `adversarial-robustness-toolbox`_.
.. _adversarial-robustness-toolbox:
https://github.com/IBM/adversarial-robustness-toolbox
"""
super(ARTClassifier, self).__init__(art_classifier=art_classifier)
self._art_classifier = art_classifier# The code for Meta-Classification-Algorithm is based on, the paper https://arxiv.org/abs/1806.06055
# See: https://github.com/vijaykeswani/FairClassification
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from aif360.algorithms import Transformer
from aif360.algorithms.inprocessing.celisMeta.FalseDiscovery import FalseDiscovery
from aif360.algorithms.inprocessing.celisMeta.StatisticalRate import StatisticalRate
class MetaFairClassifier(Transformer):
"""The meta algorithm here takes the fairness metric as part of the input
and returns a classifier optimized w.r.t. that fairness metric [11]_.
References:
.. [11] L. E. Celis, L. Huang, V. Keswani, and N. K. Vishnoi.
"Classification with Fairness Constraints: A Meta-Algorithm with
Provable Guarantees," 2018.
"""
def __init__(self, tau=0.8, sensitive_attr="", type="fdr"):
"""
Args:
tau (double, optional): Fairness penalty parameter.
sensitive_attr (str, optional): Name of protected attribute.
type (str, optional): The type of fairness metric to be used.import numpy as np
from warnings import warn
from aif360.algorithms import Transformer
from aif360.metrics import utils
from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric
class RejectOptionClassification(Transformer):
"""Reject option classification is a postprocessing technique that gives
favorable outcomes to unpriviliged groups and unfavorable outcomes to
priviliged groups in a confidence band around the decision boundary with the
highest uncertainty [10]_.
References:
.. [10] F. Kamiran, A. Karim, and X. Zhang, "Decision Theory for
Discrimination-Aware Classification," IEEE International Conference
on Data Mining, 2012.
"""
def __init__(self, unprivileged_groups, privileged_groups,
low_class_thresh=0.01, high_class_thresh=0.99,
num_class_thresh=100, num_ROC_margin=50,
metric_name="Statistical parity difference",# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
from scipy.optimize import linprog
from aif360.algorithms import Transformer
from aif360.metrics import ClassificationMetric, utils
class EqOddsPostprocessing(Transformer):
"""Equalized odds postprocessing is a post-processing technique that solves
a linear program to find probabilities with which to change output labels to
optimize equalized odds [8]_ [9]_.
References:
.. [8] M. Hardt, E. Price, and N. Srebro, "Equality of Opportunity in
Supervised Learning," Conference on Neural Information Processing
Systems, 2016.
.. [9] G. Pleiss, M. Raghavan, F. Wu, J. Kleinberg, and
K. Q. Weinberger, "On Fairness and Calibration," Conference on Neural
Information Processing Systems, 2017.
"""
def __init__(self, unprivileged_groups, privileged_groups, seed=None):
"""
Args: