Package TEES :: Package Evaluators :: Module Evaluator
[hide private]

Source Code for Module TEES.Evaluators.Evaluator

  1  """ 
  2  Base class for Evaluators 
  3  """ 
  4  __version__ = "$Revision: 1.17 $" 
  5   
  6  g_evaluatorFieldnames = ["fold","class","positives","negatives","true positives","false positives","true negatives","false negatives","precision","recall","f-score","AUC"] 
  7   
8 -def compare(e1, e2):
9 return e1.compare(e2)
10
11 -class Evaluator:
12 """ 13 An abstract base class for classes used to evaluate the performance of different classifiers. 14 """
15 - def compare(self, evaluator):
16 """ 17 Compare overall performance between two sets of classified examples. 18 """ 19 raise NotImplementedError
20
21 - def getData(self):
22 """ 23 Return the EvaluationData corresponding to the main evaluation criterion for this Evaluator. 24 """ 25 raise NotImplementedError
26
27 - def average(evaluators):
28 """ 29 Return the average of the main evaluation criteria for this Evaluator type. 30 """ 31 raise NotImplementedError
32 average = staticmethod(average) 33
34 - def pool(evaluators):
35 """ 36 Return the average of the main evaluation criteria for this Evaluator type calculated 37 by pooling all individual classifications. 38 """ 39 raise NotImplementedError
40 pool = staticmethod(pool) 41
42 - def _calculate(self, predictions):
43 raise NotImplementedError
44
45 - def toStringConcise(self, indent="", title=None):
46 raise NotImplementedError
47
48 - def toDict(self):
49 raise NotImplementedError
50
51 - def saveCSV(self, filename, fold=None):
52 import sys 53 sys.path.append("..") 54 import Utils.TableUtils as TableUtils 55 dicts = self.toDict() 56 if fold != None: 57 for d in dicts: 58 d["fold"] = fold 59 TableUtils.addToCSV(dicts, filename, g_evaluatorFieldnames)
60
61 -class EvaluationData:
62 """ 63 Calculates F-score for data that can be divided into true/false positives and 64 true/false negatives. 65 """
66 - def __init__(self):
67 self._tp = 0 68 self._fp = 0 69 self._tn = 0 70 self._fn = 0 71 self.resetStats()
72
73 - def resetStats(self):
74 self.fscore = None 75 self.precision = None 76 self.recall = None
77
78 - def addInstance(self, trueClassIsPositive, predictedClassIsPositive):
79 if trueClassIsPositive and predictedClassIsPositive: 80 self.addTP() 81 elif trueClassIsPositive and not predictedClassIsPositive: 82 self.addFN() 83 elif (not trueClassIsPositive) and predictedClassIsPositive: 84 self.addFP() 85 else: # (not trueClassIsPositive) and (not predictedClassIsPositive) 86 self.addTN()
87
88 - def removeInstance(self, trueClassIsPositive, predictedClassIsPositive):
89 self.resetStats() 90 if trueClassIsPositive and predictedClassIsPositive: 91 self._tp -= 1 92 elif trueClassIsPositive and not predictedClassIsPositive: 93 self._fn -= 1 94 elif (not trueClassIsPositive) and predictedClassIsPositive: 95 self._fp -= 1 96 else: # (not trueClassIsPositive) and (not predictedClassIsPositive) 97 self._tn -= 1
98
99 - def addTP(self):
100 self.resetStats() 101 self._tp += 1
102
103 - def addFP(self):
104 self.resetStats() 105 self._fp += 1
106
107 - def addTN(self):
108 self.resetStats() 109 self._tn += 1
110
111 - def addFN(self):
112 self.resetStats() 113 self._fn += 1
114
115 - def getTP(self): return self._tp
116 - def getFP(self): return self._fp
117 - def getTN(self): return self._tn
118 - def getFN(self): return self._fn
119
120 - def getNumInstances(self):
121 return self._tp + self._fp + self._tn + self._fn
122
123 - def calculateFScore(self):
124 assert self._tp >= 0 and self._fp >= 0 and self._tn >= 0 and self._fn >= 0, (self._tp, self._fp, self._tn, self._fn) 125 if self._tp + self._fp > 0: 126 self.precision = float(self._tp) / float(self._tp + self._fp) 127 else: 128 self.precision = 0.0 129 if self._tp + self._fn > 0: 130 self.recall = float(self._tp) / float(self._tp + self._fn) 131 else: 132 self.recall = 0.0 133 if self.precision + self.recall > 0.0: 134 self.fscore = (2*self.precision*self.recall) / (self.precision + self.recall) 135 else: 136 self.fscore = 0
137
138 - def prfToString(self):
139 if self.fscore != "N/A": 140 return "p/r/f:" + str(self.precision)[0:6] + "/" + str(self.recall)[0:6] + "/" + str(self.fscore)[0:6] 141 else: 142 return "p/r/f:N/A"
143
144 - def pnToString(self):
145 return "p/n:" + str(self._tp+self._fn) + "/" + str(self._tn+self._fp)
146
147 - def instanceCountsToString(self):
148 return "tp/fp|tn/fn:" + str(self._tp) + "/" + str(self._fp) + "|" + str(self._tn) + "/" + str(self._fn)
149
150 - def toStringConcise(self):
151 return self.pnToString() + " " + self.instanceCountsToString() + " " + self.prfToString()
152
153 - def toDict(self):
154 values = {} 155 values["positives"] = self._tp+self._fn 156 values["negatives"] = self._tn+self._fp 157 values["true positives"] = self._tp 158 values["false positives"] = self._fp 159 values["true negatives"] = self._tn 160 values["false negatives"] = self._fn 161 values["precision"] = self.precision 162 values["recall"] = self.recall 163 values["f-score"] = self.fscore 164 values["AUC"] = "N/A" 165 return values
166
167 - def saveCSV(self, filename, fold=None):
168 global g_evaluatorFieldnames 169 import sys 170 sys.path.append("..") 171 import Utils.TableUtils as TableUtils 172 dicts = self.toDict() 173 if fold != None: 174 for d in dicts: 175 d["fold"] = fold 176 #TableUtils.addToCSV(dicts, filename, g_evaluatorFieldnames) 177 TableUtils.writeCSV(dicts, filename, g_evaluatorFieldnames, writeTitles=True)
178
179 -def calculateFromCSV(rows, EvaluatorClass, classSet=None):
180 if EvaluatorClass().type == "multiclass" and classSet == None: 181 classSet = getClassSet(rows) 182 183 predictions = [] 184 for row in rows: 185 if classSet != None: 186 predictions.append( ((row["id"],classSet.getId(row["class"])),classSet.getId(row["prediction"]),None,None) ) 187 else: 188 predictions.append( ((row["id"],int(row["class"])),float(row["prediction"]),None,None) ) 189 # Calculate statistics 190 return EvaluatorClass(predictions, classSet)
191
192 -def getClassSet(rows, classSet=None):
193 from Core.IdSet import IdSet 194 classNames = set() 195 for row in rows: 196 classNames.add(row["class"]) 197 classNames.add(row["prediction"]) 198 199 # In the case of multiclass, give integer id:s for the classes 200 if classSet == None: 201 classSet = IdSet() 202 assert(not ("1" in classNames and "neg" in classNames)) 203 assert("1" in classNames or "neg" in classNames) 204 if "1" in classNames: 205 classSet.defineId("1",1) 206 else: 207 classSet.defineId("neg",1) 208 for i in sorted(list(classNames)): 209 if i != "1" and i != "neg": 210 classSet.getId(i) 211 return classSet
212
213 -def evaluateCSV(rows, options, EvaluatorClass = None):
214 import sys, os 215 sys.path.append("..") 216 from Core.IdSet import IdSet 217 import Utils.TableUtils as TableUtils 218 219 if EvaluatorClass == None: 220 print >> sys.stderr, "Importing modules" 221 exec "from Evaluators." + options.evaluator + " import " + options.evaluator + " as EvaluatorClass" 222 223 foldDict = {} 224 for row in rows: 225 if row["fold"] != None and row["fold"] != "": 226 if not foldDict.has_key(row["fold"]): 227 foldDict[row["fold"]] = [] 228 foldDict[row["fold"]].append(row) 229 230 classSet = None 231 if EvaluatorClass().type == "multiclass": 232 classSet = getClassSet(rows) 233 234 # Calculate performance per fold and the averages 235 if len(foldDict) == 0: 236 evaluator = calculateFromCSV(rows, EvaluatorClass, classSet) 237 print >> sys.stderr, evaluator.toStringConcise(" ") 238 if options.output != None: 239 evaluator.saveCSV(options.output) 240 else: 241 evaluators = [] 242 for key in sorted(foldDict.keys()): 243 print >> sys.stderr, "Fold", key 244 evaluator = calculateFromCSV(foldDict[key], EvaluatorClass, classSet) 245 print >> sys.stderr, evaluator.toStringConcise(" ") 246 if options.output != None: 247 evaluator.saveCSV(options.output, key) 248 evaluators.append(evaluator) 249 250 print >> sys.stderr, "Averages:" 251 print >> sys.stderr, "Avg" 252 averageResult = EvaluatorClass.average(evaluators) 253 print >> sys.stderr, averageResult.toStringConcise(" ") 254 pooledResult = EvaluatorClass.pool(evaluators) 255 print >> sys.stderr, "Pool" 256 print >> sys.stderr, pooledResult.toStringConcise(" ") 257 if options.output != None: 258 averageResult.saveCSV(options.output, "Avg") 259 pooledResult.saveCSV(options.output, "Pool")
260 261 if __name__=="__main__": 262 import sys, os 263 # Import Psyco if available 264 try: 265 import psyco 266 psyco.full() 267 print >> sys.stderr, "Found Psyco, using" 268 except ImportError: 269 print >> sys.stderr, "Psyco not installed" 270 sys.path.append("..") 271 from Utils.ProgressCounter import ProgressCounter 272 from Utils.Parameters import splitParameters 273 from optparse import OptionParser 274 import Core.ExampleUtils as ExampleUtils 275 from Core.IdSet import IdSet 276 import Utils.TableUtils as TableUtils 277 optparser = OptionParser(usage="%prog [options]\nCalculate f-score and other statistics.") 278 optparser.add_option("-i", "--input", default=None, dest="input", help="Input file in csv-format", metavar="FILE") 279 optparser.add_option("-o", "--output", default=None, dest="output", help="Output file for the statistics") 280 optparser.add_option("-e", "--evaluator", default="BinaryEvaluator", dest="evaluator", help="Prediction evaluator class") 281 (options, args) = optparser.parse_args() 282 283 print >> sys.stderr, "Importing modules" 284 exec "from Evaluators." + options.evaluator + " import " + options.evaluator + " as EvaluatorClass" 285 286 if options.output != None: 287 print >> sys.stderr, "Outputfile exists, removing", options.output 288 if os.path.exists(options.output): 289 os.remove(options.output) 290 291 # Read input data 292 fieldnames = ["class","prediction","id","fold"] 293 rows = TableUtils.readCSV(options.input, fieldnames) 294 evaluateCSV(rows, options, EvaluatorClass) 295