Package TEES :: Package Evaluators :: Module AveragingMultiClassEvaluator
[hide private]

Source Code for Module TEES.Evaluators.AveragingMultiClassEvaluator

  1  """ 
  2  For multi-class classifications 
  3  """ 
  4  __version__ = "$Revision: 1.24 $" 
  5   
  6  from Evaluator import Evaluator 
  7  from Evaluator import EvaluationData 
  8  import sys, os, types 
  9  sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/..") 
 10  from Core.IdSet import IdSet 
 11  import Core.ExampleUtils as ExampleUtils 
 12  import itertools 
13 14 -class AveragingMultiClassEvaluator(Evaluator):
15 """ 16 An evaluator for multiclass classification results, where an example can belong to one 17 of several classes. For calculating averages over multiple classes, one of the classes, 18 "neg"/1 is considered to be negative while the others are considered to be different 19 types of positive instances. 20 """ 21 type = "multiclass" 22
23 - def __init__(self, examples, predictions=None, classSet=None):
24 if type(classSet) == types.StringType: # class names are in file 25 classSet = IdSet(filename=classSet) 26 if type(predictions) == types.StringType: # predictions are in file 27 predictions = ExampleUtils.loadPredictions(predictions) 28 if type(examples) == types.StringType: # examples are in file 29 examples = ExampleUtils.readExamples(examples, False) 30 31 self.classSet = classSet 32 # define class ids in alphabetical order 33 self.classSet = classSet 34 if classSet != None: 35 classNames = sorted(classSet.Ids.keys()) 36 else: 37 classNames = [] 38 # make an ordered list of class ids 39 self.classes = [] 40 for className in classNames: 41 self.classes.append(classSet.getId(className)) 42 # create data structures for per-class evaluation 43 self.dataByClass = {} 44 for cls in self.classes: 45 self.dataByClass[cls] = EvaluationData() 46 # hack for unnamed classes 47 if len(self.dataByClass) == 0: 48 self.dataByClass[1] = EvaluationData() 49 self.dataByClass[2] = EvaluationData() 50 51 #self.untypedUndirected = None 52 self.untypedCurrentMajorId = None 53 self.untypedPredictionQueue = [] 54 self.untypedUndirected = EvaluationData() 55 #self.AUC = None 56 if predictions != None: 57 self._calculate(examples, predictions)
58 59 @classmethod
60 - def evaluate(cls, examples, predictions, classSet=None, outputFile=None, verbose=True):
61 """ 62 Enables using this class without having to manually instantiate it 63 """ 64 evaluator = cls(examples, predictions, classSet) 65 if verbose: 66 print >> sys.stderr, evaluator.toStringConcise() 67 if outputFile != None: 68 evaluator.saveCSV(outputFile) 69 return evaluator
70
71 - def compare(self, evaluation):
72 if self.microF.fscore > evaluation.microF.fscore: 73 return 1 74 elif self.microF.fscore == evaluation.microF.fscore: 75 return 0 76 else: 77 return -1
78
79 - def getData(self):
80 return self.microF
81 82 @classmethod
83 - def threshold(cls, examples, predictions):
84 # Make negative confidence score / true class pairs 85 if type(examples) in types.StringTypes: 86 examples = ExampleUtils.readExamples(examples, False) 87 if type(predictions) in types.StringTypes: 88 predictions = ExampleUtils.loadPredictions(predictions) 89 pairs = [] 90 realPositives = 0 91 for example, prediction in itertools.izip(examples, predictions): 92 trueClass = example[1] 93 assert(trueClass > 0) # multiclass classification uses non-negative integers 94 if trueClass > 1: 95 realPositives += 1 96 negClassValue = prediction[1] 97 pairs.append( (negClassValue, trueClass) ) 98 pairs.sort(reverse=True) 99 realNegatives = len(pairs) - realPositives 100 101 # When starting thresholding, all examples are considered positive 102 binaryF = EvaluationData() 103 binaryF._tp = realPositives 104 binaryF._fp = realNegatives 105 binaryF._fn = 0 106 binaryF.calculateFScore() 107 fscore = binaryF.fscore 108 threshold = pairs[0][0]-1. 109 110 # Turn one example negative at a time 111 for pair in pairs: 112 if pair[1] == 1: # the real class is negative 113 binaryF._fp -= 1 # false positive -> true negative 114 else: # the real class is a positive class 115 binaryF._tp -= 1 # true positive -> ... 116 binaryF._fn += 1 # ... false negative 117 binaryF.calculateFScore() 118 if binaryF.fscore > fscore: 119 fscore = binaryF.fscore 120 threshold = pair[0]+0.00000001 121 return threshold, fscore
122 123 # def pool(evaluators): 124 # predictions = [] 125 # for evaluator in evaluators: 126 # assert(isinstance(evaluator,AveragingMultiClassEvaluator)) 127 # predictions.extend(evaluator.predictions) 128 # return AveragingMultiClassEvaluator(predictions, evaluators[0].classSet) 129 # pool = staticmethod(pool) 130 # 131 # def average(evaluators): 132 # averageEvaluator = AveragingMultiClassEvaluator(None, None) 133 # averageEvaluator.microPrecision = 0 134 # averageEvaluator.microRecall = 0 135 # averageEvaluator.microFScore = 0 136 # averageEvaluator.macroPrecision = 0 137 # averageEvaluator.macroRecall = 0 138 # averageEvaluator.macroFScore = 0 139 # averageEvaluator.truePositives = "-" 140 # averageEvaluator.falsePositives = "-" 141 # averageEvaluator.trueNegatives = "-" 142 # averageEvaluator.falseNegatives = "-" 143 # sumWeight = 0.0 144 # for evaluator in evaluators: 145 # assert(isinstance(evaluator,AveragingMultiClassEvaluator)) 146 # weight = float(len(evaluator.predictions)) 147 # sumWeight += weight 148 # averageEvaluator.macroPrecision += weight * evaluator.macroPrecision 149 # averageEvaluator.macroRecall += weight * evaluator.macroRecall 150 # averageEvaluator.macroFScore += weight * evaluator.macroFScore 151 # averageEvaluator.microPrecision += weight * evaluator.microPrecision 152 # averageEvaluator.microRecall += weight * evaluator.microRecall 153 # averageEvaluator.microFScore += weight * evaluator.microFScore 154 # averageEvaluator.macroPrecision /= sumWeight 155 # averageEvaluator.macroRecall /= sumWeight 156 # averageEvaluator.macroFScore /= sumWeight 157 # averageEvaluator.microPrecision /= sumWeight 158 # averageEvaluator.microRecall /= sumWeight 159 # averageEvaluator.microFScore /= sumWeight 160 # return averageEvaluator 161 # average = staticmethod(average) 162
163 - def _queueUntypedUndirected(self, example, prediction):
164 """ 165 All examples within the same majorId (same sentence) are 166 put in queue. Once major id (sentence) changes, these 167 examples are processed. 168 """ 169 majorId, minorId = example[0].rsplit(".x", 1) 170 if majorId != self.untypedCurrentMajorId: # new sentence 171 self._processUntypedUndirectedQueue() # process queue 172 self.untypedCurrentMajorId = majorId 173 self.untypedPredictionQueue.append( (example, prediction) ) # queue example
174
176 """ 177 Determines the untyped undirected performance by merging example 178 pairs. This statistic is only meaningful for examples representing 179 directed edges where two consecutive examples are the two directed 180 edges between a pair of nodes. 181 """ 182 prevExample = None 183 prevPrediction = None 184 for example, prediction in self.untypedPredictionQueue: 185 majorId, minorId = example[0].rsplit(".x", 1) 186 if prevExample != None and prevPrediction != None and int(minorId) % 2 != 0: 187 # A positive example in either direction counts as a positive 188 if example[1] != 1 or prevExample[1] != 1: # 1 is the multiclass "neg" class id 189 trueClass = 1 # binary positive class 190 else: 191 trueClass = -1 # binary negative class 192 # A positive prediction in either direction counts as a positive 193 if prediction[0] != 1 or prevPrediction[0] != 1: 194 predictedClass = 1 195 else: 196 predictedClass = -1 197 self.untypedUndirected.addInstance(trueClass == 1, predictedClass == 1) 198 prevExample = example 199 prevPrediction = prediction 200 self.untypedPredictionQueue = [] # clear the queue
201 202 # def _calculateUntypedUndirected(self, examples, predictions): 203 # untypedUndirectedPredictions = [] 204 # predictionsById = {} 205 # for i in range(len(examples)): 206 # id = examples[i][0] 207 # if id != None and id != "": 208 # majorId, minorId = id.rsplit(".x", 1) 209 # if not predictionsById.has_key(majorId): 210 # predictionsById[majorId] = {} 211 # predictionsById[majorId][int(minorId)] = (examples[i], predictions[i]) 212 # for majorId in sorted(predictionsById.keys()): 213 # prevPrediction = None 214 # for minorId in sorted(predictionsById[majorId]): 215 # prediction = predictionsById[majorId][minorId] 216 # if prevPrediction != None and minorId % 2 != 0: 217 # if prediction[0][1] != 1 or prevPrediction[0][1] != 1: 218 # trueClass = 1 219 # else: 220 # trueClass = -1 221 # if prediction[1][0] != 1 or prevPrediction[1][0] != 1: 222 # predictedClass = 1 223 # else: 224 # predictedClass = -1 225 # untypedUndirectedPredictions.append( ((None,trueClass),predictedClass) ) 226 # prevPrediction = prediction 227 # if len(untypedUndirectedPredictions) > 0: 228 # self.untypedUndirected = BinaryEvaluator(untypedUndirectedPredictions) 229
230 - def _calculate(self, examples, predictions):
231 """ 232 The actual evaluation 233 """ 234 #self._calculateUntypedUndirected(examples, predictions) 235 # First count instances 236 self.microF = EvaluationData() 237 self.binaryF = EvaluationData() 238 #self.classifications = [] 239 #assert(len(examples) == len(predictions)) 240 #for i in range(len(examples)): 241 for example, prediction in itertools.izip(examples, predictions): 242 # self._queueUntypedUndirected(example, prediction) 243 #example = examples[i] # examples and predictions are in matching lists 244 #prediction = predictions[i] # examples and predictions are in matching lists 245 trueClass = example[1] 246 assert(trueClass > 0) # multiclass classification uses non-negative integers 247 predictedClass = prediction[0] 248 #print predictedClass 249 assert(predictedClass > 0) # multiclass classification uses non-negative integers 250 if predictedClass == trueClass: # correct classification 251 # correctly classified for its class -> true positive for that class 252 self.dataByClass[trueClass].addTP() 253 if trueClass != 1: # a non-negative example -> correct = true positive 254 #self.classifications.append("tp") 255 #self.classifications.append((prediction[0],"tp",self.type,prediction[1],prediction[3])) 256 self.microF.addTP() 257 self.binaryF.addTP() 258 else: # a negative example -> correct = true negative 259 #self.classifications.append((prediction[0],"tn",self.type,prediction[1],prediction[3])) 260 #self.classifications.append("tn") 261 self.microF.addTN() 262 self.binaryF.addTN() 263 for cls in self.classes: 264 # this example was correctly classified for its class, 265 # so it is also correctly classified for each class, 266 # i.e. true negative for them 267 if cls != trueClass: 268 self.dataByClass[cls].addTN() 269 else: # predictedClass != trueClass: 270 # prediction was incorrect -> false positive for the predicted class 271 self.dataByClass[predictedClass].addFP() 272 if predictedClass == 1: # non-negative example, negative prediction -> incorrect = false negative 273 #self.classifications.append("fn") 274 #self.classifications.append((prediction[0],"fn",self.type,prediction[1],prediction[3])) 275 self.microF.addFN() 276 self.binaryF.addFN() 277 else: # non-negative incorrect prediction -> false positive 278 #self.classifications.append("fp") 279 #self.classifications.append((prediction[0],"fp",self.type,prediction[1],prediction[3])) 280 self.microF.addFP() 281 if trueClass == 1: 282 self.binaryF.addFP() 283 else: 284 self.binaryF.addTP() 285 for cls in self.classes: 286 if cls == trueClass: # example not found -> false negative 287 self.dataByClass[cls].addFN() 288 elif cls != predictedClass: 289 self.dataByClass[cls].addTN() 290 291 # Process remaining untyped undirected examples and calculate untyped undirected f-score 292 # self._processUntypedUndirectedQueue() 293 # self.untypedUndirected.calculateFScore() 294 295 # Then calculate statistics 296 for cls in self.classes: 297 self.dataByClass[cls].calculateFScore() 298 self.microF.calculateFScore() 299 self.binaryF.calculateFScore() 300 301 # Finally calculate macro-f-score 302 # macro-average is simply the unweighted average of per-class f-scores 303 numClassesWithInstances = 0 304 self.macroF = EvaluationData() 305 self.macroF.precision = 0.0 306 self.macroF.recall = 0.0 307 self.macroF.fscore = 0.0 308 for cls in self.classes: 309 if (self.dataByClass[cls].getNumInstances() > 0 or self.dataByClass[cls].getFP() > 0) and cls != self.classSet.getId("neg", False): 310 numClassesWithInstances += 1 311 self.macroF.precision += self.dataByClass[cls].precision 312 self.macroF.recall += self.dataByClass[cls].recall 313 if self.dataByClass[cls].fscore != "N/A": 314 self.macroF.fscore += self.dataByClass[cls].fscore 315 if numClassesWithInstances > 0: 316 if self.macroF.precision != 0: self.macroF.precision /= float(numClassesWithInstances) 317 if self.macroF.recall != 0: self.macroF.recall /= float(numClassesWithInstances) 318 if self.macroF.fscore != 0: self.macroF.fscore /= float(numClassesWithInstances)
319
320 - def toStringConcise(self, indent="", title=None):
321 """ 322 Evaluation results in a human readable string format 323 """ 324 if title != None: 325 string = indent + title + "\n" 326 indent += " " 327 string += indent 328 else: 329 string = indent 330 negativeClassId = None 331 for cls in self.classes: 332 if cls != self.classSet.getId("neg", False): 333 string += self.classSet.getName(cls) 334 string += " " + self.dataByClass[cls].toStringConcise() + "\n" + indent 335 else: 336 negativeClassId = cls 337 if negativeClassId != None: 338 cls = negativeClassId 339 string += "(neg " + self.dataByClass[cls].toStringConcise() + ")\n" + indent 340 341 string += "averages:\n" + indent 342 # Micro results 343 string += "micro " + self.microF.toStringConcise() + "\n" + indent 344 # Macro results 345 string += "macro " + self.macroF.prfToString() + "\n" + indent 346 # Binary results 347 string += "untyped " + self.binaryF.toStringConcise() 348 # Untyped undirected results 349 if self.untypedUndirected != None: 350 string += "\n" + indent 351 string += "untyped undirected " + self.untypedUndirected.toStringConcise() 352 return string
353 354 # def __addClassToCSV(self, csvWriter, cls): 355 # values = [] 356 # values.append( self.classSet.getName(cls) ) 357 # values.append( self.truePositivesByClass[cls]+self.falseNegativesByClass[cls] ) 358 # values.append( self.trueNegativesByClass[cls]+self.falsePositivesByClass[cls] ) 359 # values.append(self.truePositivesByClass[cls]) 360 # values.append(self.falsePositivesByClass[cls]) 361 # values.append(self.trueNegativesByClass[cls]) 362 # values.append(self.falseNegativesByClass[cls]) 363 # if self.instancesByClass[cls] > 0 or self.falsePositivesByClass[cls] > 0: 364 # values.append(self.precisionByClass[cls]) 365 # values.append(self.recallByClass[cls]) 366 # values.append(self.fScoreByClass[cls]) 367 # else: 368 # values.extend(["N/A","N/A","N/A"]) 369 # csvWriter.writerow(values) 370 # 371
372 - def toDict(self):
373 """ 374 Evaluation results in a computationally easy to process dictionary format 375 """ 376 dicts = [] 377 if len(self.classes) > 0: 378 assert(not ("1" in self.classSet.getNames() and "neg" in self.classSet.getNames())) 379 negativeClassId = None 380 for cls in self.classes: 381 if cls != self.classSet.getId("neg", False) and cls != self.classSet.getId("1", False): 382 values = self.dataByClass[cls].toDict() 383 values["class"] = self.classSet.getName(cls) 384 dicts.append(values) 385 else: 386 assert(negativeClassId == None) 387 negativeClassId = cls 388 if negativeClassId != None: 389 values = self.dataByClass[negativeClassId].toDict() 390 values["class"] = "neg" 391 dicts.append(values) 392 dicts.append( self.microF.toDict() ) 393 dicts[-1]["class"] = "micro" 394 dicts.append( self.macroF.toDict() ) 395 dicts[-1]["class"] = "macro" 396 dicts.append( self.binaryF.toDict() ) 397 dicts[-1]["class"] = "untyped" 398 if self.untypedUndirected != None: 399 dicts.append(self.untypedUndirected.toDict()) 400 dicts[-1]["class"] = "untyped undirected" 401 return dicts
402