Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __call__(self, examples, weight = 0,fulldata=0):
if not(examples.domain.classVar.varType == 1 and len(examples.domain.classVar.values)==2):
raise "BasicBayes learner only works with binary discrete class."
for attr in examples.domain.attributes:
if not(attr.varType == 1):
raise "BasicBayes learner does not work with continuous attributes."
translate = orng2Array.DomainTranslation(self.translation_mode_d,self.translation_mode_c)
if fulldata != 0:
translate.analyse(fulldata, weight)
else:
translate.analyse(examples, weight)
translate.prepareLR()
(beta, coeffs) = self._process(orange.BayesLearner(examples), examples)
return BasicBayesClassifier(beta,coeffs,translate)
train_data = data.select(selection, test_fold, negate=1)
test_data = data.select(selection, test_fold)
classifiers = []
for l in learners:
classifiers.append(l(train_data))
acc1 = accuracy(test_data, classifiers)
print "%d: %s" % (test_fold + 1, ["%.6f" % a for a in acc1])
for j in range(len(learners)):
acc[j] += acc1[j]
for j in range(len(learners)):
acc[j] = acc[j] / k
return acc
orange.setrandseed(0)
# set up the learners
bayes = orange.BayesLearner()
tree = orngTree.TreeLearner(mForPruning=2)
bayes.name = "bayes"
tree.name = "tree"
learners = [bayes, tree]
# compute accuracies on data
data = orange.ExampleTable("voting")
acc = cross_validation(data, learners, k=10)
print "Classification accuracies:"
for i in range(len(learners)):
print learners[i].name, acc[i]
# Description: Read data, build naive Bayesian classifier, and output class probabilities for the first few instances
# Category: modelling
# Uses: voting.tab
# Referenced: c_basics.htm
import orange
data = orange.ExampleTable("voting")
classifier = orange.BayesLearner(data)
print "Possible classes:", data.domain.classVar.values
print "Probabilities for democrats:"
for i in range(5):
p = classifier(data[i], orange.GetProbabilities)
print "%d: %5.3f (originally %s)" % (i+1, p[1], data[i].getclass())
# Description: Demostration of use of cross-validation as provided in orngEval module
# Category: evaluation
# Uses: voting.tab
# Classes: orngTest.crossValidation
# Referenced: c_performance.htm
import orange
import orngTest, orngStat, orngTree
# set up the learners
bayes = orange.BayesLearner()
tree = orngTree.TreeLearner(mForPruning=2)
bayes.name = "bayes"
tree.name = "tree"
learners = [bayes, tree]
# compute accuracies on data
data = orange.ExampleTable("voting")
res = orngTest.crossValidation(learners, data, folds=10)
cm = orngStat.computeConfusionMatrices(res,
classIndex=data.domain.classVar.values.index('democrat'))
stat = (('CA', lambda res,cm: orngStat.CA(res)),
('Sens', lambda res,cm: orngStat.sens(cm)),
('Spec', lambda res,cm: orngStat.spec(cm)),
('AUC', lambda res,cm: orngStat.AUC(res)),
('IS', lambda res,cm: orngStat.IS(res)),
#Preprocessor name replacement rules
REPLACE = {preprocess.Discretize: "Discretize ({0.method})",
preprocess.DiscretizeEntropy: "Discretize (entropy)",
preprocess.RemoveContinuous: "Discretize (remove continuous)",
preprocess.Continuize: "Continuize ({0.multinomialTreatment})",
preprocess.RemoveDiscrete: "Continuize (remove discrete)",
preprocess.Impute: "Impute ({0.model})",
preprocess.ImputeByLearner: "Impute ({0.learner})",
preprocess.DropMissing: "Remove missing",
preprocess.FeatureSelection: "Feature selection ({0.measure}, {0.filter}, {0.limit})",
preprocess.Sample: "Sample ({0.filter}, {0.limit})",
orange.EntropyDiscretization: "entropy",
orange.EquiNDiscretization: "freq, {0.numberOfIntervals}",
orange.EquiDistDiscretization: "width, {0.numberOfIntervals}",
orange.RandomLearner: "random",
orange.BayesLearner: "bayes model",
orange.MajorityLearner: "average",
orange.MeasureAttribute_relief: "ReliefF",
orange.MeasureAttribute_info: "Info gain",
orange.MeasureAttribute_gainRatio: "Gain ratio",
orange.MeasureAttribute_gini: "Gini",
orange.MeasureAttribute_logOddsRatio: "Log Odds",
orngSVM.MeasureAttribute_SVMWeights: "Linear SVM weights",
type(lambda : None): _funcName}
import re
INSERT_RE = re.compile(r"{0\.(\w+)}")
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def displayText(self, value, locale):
p = max(maxp) # max class probability
classifier_index = maxp.index(p)
c = pmatrix[classifier_index].modus()
if resultType == orange.GetValue:
return c
elif resultType == orange.getClassDistribution:
return pmatrix[classifier_index]
else:
return (c, pmatrix[classifier_index])
tree = orngTree.TreeLearner(mForPruning=5.0)
tree.name = 'class. tree'
bayes = orange.BayesLearner()
bayes.name = 'naive bayes'
winner = WinnerLearner(learners=[tree, bayes])
winner.name = 'winner'
majority = orange.MajorityLearner()
majority.name = 'default'
learners = [majority, tree, bayes, winner]
data = orange.ExampleTable("promoters")
results = orngTest.crossValidation(learners, data)
print "Classification Accuracy:"
for i in range(len(learners)):
print ("%15s: %5.3f") % (learners[i].name, orngStat.CA(results)[i])
import orange, orngTree
def accuracy(test_data, classifiers):
correct = [0.0]*len(classifiers)
for ex in test_data:
for i in range(len(classifiers)):
if classifiers[i](ex) == ex.getclass():
correct[i] += 1
for i in range(len(correct)):
correct[i] = correct[i] / len(test_data)
return correct
# set up the classifiers
data = orange.ExampleTable("voting")
bayes = orange.BayesLearner(data)
bayes.name = "bayes"
tree = orngTree.TreeLearner(data);
tree.name = "tree"
classifiers = [bayes, tree]
# compute accuracies
acc = accuracy(data, classifiers)
print "Classification accuracies:"
for i in range(len(classifiers)):
print classifiers[i].name, acc[i]
# Description: Demostration of use of cross-validation as provided in orngEval module
# Category: evaluation
# Uses: voting.tab
# Classes: orngTest.crossValidation
# Referenced: c_performance.htm
import orange
import orngTest, orngStat, orngTree
# set up the learners
bayes = orange.BayesLearner()
tree = orngTree.TreeLearner(mForPruning=2)
bayes.name = "bayes"
tree.name = "tree"
learners = [bayes, tree]
# compute accuracies on data
data = orange.ExampleTable("voting")
res = orngTest.crossValidation(learners, data, folds=10)
cm = orngStat.computeConfusionMatrices(res,
classIndex=data.domain.classVar.values.index('democrat'))
stat = (('CA', lambda res,cm: orngStat.CA(res)),
('Sens', lambda res,cm: orngStat.sens(cm)),
('Spec', lambda res,cm: orngStat.spec(cm)),
('AUC', lambda res,cm: orngStat.AUC(res)),
('IS', lambda res,cm: orngStat.IS(res)),