Package TEES :: Package Utils :: Package Convert :: Module convertBioNLP
[hide private]

Source Code for Module TEES.Utils.Convert.convertBioNLP

  1  import sys, os, time, shutil 
  2  import tempfile 
  3  thisPath = os.path.dirname(os.path.abspath(__file__)) 
  4  sys.path.append(os.path.abspath(os.path.join(thisPath,"../.."))) 
  5  import Utils.STFormat.STTools as ST 
  6  import Utils.STFormat.ConvertXML as STConvert 
  7  import Utils.STFormat.Equiv 
  8  import Utils.STFormat.Validate 
  9  #import Utils.InteractionXML.RemoveUnconnectedEntities 
 10  import Utils.InteractionXML.DivideSets 
 11  import Utils.InteractionXML.MixSets 
 12  import Utils.ProteinNameSplitter as ProteinNameSplitter 
 13  import Utils.Stream as Stream 
 14  import Utils.FindHeads as FindHeads 
 15  import Tools.SentenceSplitter 
 16  import Tools.BLLIPParser 
 17  import Tools.StanfordParser 
 18  import Utils.ElementTreeUtils as ETUtils 
 19  import Evaluators.BioNLP11GeniaTools as BioNLP11GeniaTools 
 20  import Utils.Download 
 21  import Utils.Settings as Settings 
 22   
 23  moveBI = ["PMID-10333516-S3", "PMID-10503549-S4", "PMID-10788508-S10", "PMID-1906867-S3", 
 24            "PMID-9555886-S6", "PMID-10075739-S13", "PMID-10400595-S1", "PMID-10220166-S12"] 
 25   
26 -def installPreconverted(url="BIONLP_CORPORA", destPath=None, downloadPath=None, redownload=False, updateLocalSettings=False):
27 print >> sys.stderr, "---------------", "Downloading preconverted corpora", "---------------" 28 if destPath == None: 29 destPath = os.path.join(Settings.DATAPATH, "corpora") 30 if downloadPath == None: 31 downloadPath = os.path.join(Settings.DATAPATH, "corpora/download") 32 Utils.Download.downloadAndExtract(Settings.URL[url], destPath, downloadPath, redownload=redownload) 33 Settings.setLocal("CORPUS_DIR", destPath, updateLocalSettings)
34
35 -def installEvaluators(destPath=None, downloadPath=None, redownload=False, updateLocalSettings=False):
36 print >> sys.stderr, "---------------", "Downloading BioNLP Shared Task evaluators", "---------------" 37 if destPath == None: 38 destPath = os.path.join(Settings.DATAPATH, "tools/evaluators") 39 if downloadPath == None: 40 downloadPath = os.path.join(Settings.DATAPATH, "tools/download") 41 Utils.Download.downloadAndExtract(Settings.URL["BIONLP11_EVALUATORS"], destPath, downloadPath, redownload=redownload) 42 Settings.setLocal("BIONLP_EVALUATOR_DIR", destPath, updateLocalSettings) 43 Settings.setLocal("BIONLP_EVALUATOR_GOLD_DIR", os.path.join(destPath, "gold"), updateLocalSettings)
44
45 -def downloadCorpus(corpus, destPath=None, downloadPath=None, clear=False):
46 print >> sys.stderr, "---------------", "Downloading BioNLP Shared Task files", "---------------" 47 downloaded = {} 48 if destPath == None: 49 finalDestPath = os.path.join(Settings.DATAPATH, "corpora/BioNLP11-original") 50 else: 51 finalDestPath = destPath 52 for setName in ["_DEVEL", "_TRAIN", "_TEST"]: 53 downloaded[corpus + setName] = Utils.Download.download(Settings.URL[corpus + setName], downloadPath, clear=clear) 54 if corpus in ["REL", "REN", "CO"]: 55 if destPath == None: 56 teesParseFinalDestPath = os.path.join(Settings.DATAPATH, "TEES-parses") 57 else: 58 teesParseFinalDestPath = os.path.join(destPath, "TEES-parses") 59 if downloadPath == None: 60 downloadPath = os.path.join(Settings.DATAPATH, "download") 61 Utils.Download.downloadAndExtract(Settings.URL["TEES_PARSES"], teesParseFinalDestPath, downloadPath, redownload=clear) 62 downloaded["TEES_PARSES"] = teesParseFinalDestPath 63 else: 64 if corpus == "GE09": 65 analyses = ["_ANALYSES"] 66 else: 67 analyses = ["_TOKENS", "_McCC"] 68 for analysis in analyses: 69 for setName in ["_DEVEL", "_TRAIN", "_TEST"]: 70 downloaded[corpus + setName + analysis] = Utils.Download.download(Settings.URL[corpus + setName + analysis], downloadPath + "/support/", clear=clear) 71 return downloaded
72
73 -def convert(corpora, outDir=None, downloadDir=None, redownload=False, makeIntermediateFiles=True, evaluate=False):
74 if outDir == None: 75 os.path.normpath(Settings.DATAPATH + "/corpora") 76 if not os.path.exists(outDir): 77 os.makedirs(outDir) 78 else: 79 assert os.path.isdir(outDir) 80 count = 1 81 for corpus in corpora: 82 print >> sys.stderr, "=======================", "Converting BioNLP Shared Task", corpus, "corpus ("+str(count)+"/"+str(len(corpora))+")", "=======================" 83 logFileName = outDir + "/conversion/" + corpus + "-conversion-log.txt" 84 Stream.openLog(logFileName) 85 downloaded = downloadCorpus(corpus, outDir, downloadDir, redownload) 86 convertDownloaded(outDir, corpus, downloaded, makeIntermediateFiles, evaluate) 87 Stream.closeLog(logFileName) 88 count += 1
89
90 -def corpusRENtoASCII(xml):
91 print >> sys.stderr, "Converting REN corpus to ASCII" 92 for document in xml.getiterator("document"): 93 text = document.get("text") 94 text = text.replace(u"\xc3\xb6", u"a") 95 text = text.replace(u"\xc3\xa4", u"a") 96 text = text.replace(u"\xc3\xa9", u"e") 97 text = text.replace("and Wikstram, M. (1991) Eur. J. Biochem. 197", "and Wikstrom, M. (1991) Eur. J. Biochem. 197") 98 document.set("text", text)
99
100 -def convertDownloaded(outdir, corpus, files, intermediateFiles=True, evaluate=True):
101 global moveBI 102 if evaluate: 103 workdir = outdir + "/conversion/" + corpus 104 if os.path.exists(workdir): 105 shutil.rmtree(workdir) 106 os.makedirs(workdir) 107 108 print >> sys.stderr, "---------------", "Converting to XML", "---------------" 109 # All datasets are processed as one XML, to ensure all the steps (parse modification etc.) are 110 # applied equally 111 datasets = ["devel", "train", "test"] 112 bigfileName = os.path.join(outdir, corpus + "-" + "-and-".join(datasets)) 113 documents = [] 114 for setName in datasets: 115 sourceFile = files[corpus + "_" + setName.upper()] 116 print >> sys.stderr, "Reading", setName, "set from", sourceFile 117 sitesAreArguments = False 118 if corpus == "EPI": 119 sitesAreArguments = True 120 docs = ST.loadSet(sourceFile, setName, "a2", sitesAreArguments=sitesAreArguments) 121 print >> sys.stderr, "Read", len(docs), "documents" 122 documents.extend(docs) 123 124 if len(docs) > 0 and docs[0].license != None: 125 licenseFile = open(os.path.join(outdir, corpus + "-LICENSE"), "wt") 126 licenseFile.write(docs[0].license) 127 licenseFile.close() 128 129 print >> sys.stderr, "Resolving equivalences" 130 Utils.STFormat.Equiv.process(documents) 131 132 if evaluate: 133 print >> sys.stderr, "Checking data validity" 134 for doc in documents: 135 Utils.STFormat.Validate.validate(doc.events, simulation=True, verbose=True, docId=doc.id) 136 print >> sys.stderr, "Writing all documents to geniaformat" 137 ST.writeSet(documents, os.path.join(workdir, "all-geniaformat"), resultFileTag="a2", debug=False, task=2, validate=False) 138 139 if intermediateFiles: 140 print >> sys.stderr, "Converting to XML, writing combined corpus to", bigfileName+"-documents.xml" 141 xml = STConvert.toInteractionXML(documents, corpus, bigfileName+"-documents.xml") 142 else: 143 print >> sys.stderr, "Converting to XML" 144 xml = STConvert.toInteractionXML(documents, corpus, None) 145 146 if corpus == "BI": 147 Utils.InteractionXML.MixSets.mixSets(xml, None, set(moveBI), "train", "devel") 148 if corpus == "REN": 149 corpusRENtoASCII(xml) 150 151 addAnalyses(xml, corpus, datasets, files, bigfileName) 152 if intermediateFiles: 153 print >> sys.stderr, "Writing combined corpus", bigfileName+"-sentences.xml" 154 ETUtils.write(xml, bigfileName+"-sentences.xml") 155 processParses(xml) 156 157 print >> sys.stderr, "---------------", "Writing corpora", "---------------" 158 # Write out converted data 159 if intermediateFiles: 160 print >> sys.stderr, "Writing combined corpus", bigfileName+".xml" 161 ETUtils.write(xml, bigfileName+".xml") 162 print >> sys.stderr, "Dividing into sets" 163 Utils.InteractionXML.DivideSets.processCorpus(xml, outdir, corpus, ".xml") 164 165 if evaluate and "devel" in datasets: 166 print >> sys.stderr, "---------------", "Evaluating conversion", "---------------" 167 if corpus != "REL": # Task 1 (removal of Entity-entities) cannot work for REL 168 print >> sys.stderr, "Evaluating task 1 back-conversion" 169 STConvert.toSTFormat(os.path.join(outdir, corpus + "-devel.xml"), workdir + "/roundtrip/" + corpus + "-devel" + "-task1", outputTag="a2", task=1) 170 BioNLP11GeniaTools.evaluate(workdir + "/roundtrip/" + corpus + "-devel" + "-task1", corpus + ".1") 171 print >> sys.stderr, "Evaluating task 2 back-conversion" 172 STConvert.toSTFormat(os.path.join(outdir, corpus + "-devel.xml"), workdir + "/roundtrip/" + corpus + "-devel" + "-task2", outputTag="a2", task=2) 173 BioNLP11GeniaTools.evaluate(workdir + "/roundtrip/" + corpus + "-devel" + "-task2", corpus + ".2") 174 print >> sys.stderr, "Note! Evaluation of Task 2 back-conversion can be less than 100% due to site-argument mapping"
175
176 -def addAnalyses(xml, corpus, datasets, files, bigfileName):
177 if "TEES_PARSES" in files: # corpus for which no official parse exists 178 print >> sys.stderr, "---------------", "Inserting TEES-generated analyses", "---------------" 179 extractedFilename = files["TEES_PARSES"] + "/" + corpus 180 print >> sys.stderr, "Making sentences" 181 Tools.SentenceSplitter.makeSentences(xml, extractedFilename, None) 182 print >> sys.stderr, "Inserting McCC parses" 183 Tools.BLLIPParser.insertParses(xml, extractedFilename, None, extraAttributes={"source":"TEES-preparsed"}) 184 print >> sys.stderr, "Inserting Stanford conversions" 185 Tools.StanfordParser.insertParses(xml, extractedFilename, None, extraAttributes={"stanfordSource":"TEES-preparsed"}) 186 elif corpus == "GE09": # the BioNLP'09 corpus 187 for i in range(len(datasets)): 188 print >> sys.stderr, "---------------", "Inserting analyses " + str(i+1) + "/" + str(len(datasets)), "---------------" 189 setName = datasets[i] 190 print >> sys.stderr, "Inserting", setName, "analyses" 191 tempdir = tempfile.mkdtemp() 192 analysesSetName = corpus + "_" + setName.upper() + "_ANALYSES" 193 packagePath = Utils.Download.getTopDir(tempdir, Utils.Download.extractPackage(files[analysesSetName], tempdir)) 194 print >> sys.stderr, "Making sentences" 195 Tools.SentenceSplitter.makeSentences(xml, packagePath + "/tokenized", None, escDict=Tools.BLLIPParser.escDict) 196 print >> sys.stderr, "Inserting McCC parses" 197 Tools.BLLIPParser.insertParses(xml, packagePath + "/McClosky-Charniak/pstree", None, extraAttributes={"source":"BioNLP'09"}) 198 print >> sys.stderr, "Inserting Stanford conversions" 199 Tools.StanfordParser.insertParses(xml, packagePath + "/McClosky-Charniak/dep", None, skipExtra=1, extraAttributes={"stanfordSource":"BioNLP'09"}) 200 print >> sys.stderr, "Removing temporary directory", tempdir 201 shutil.rmtree(tempdir) 202 else: # use official BioNLP'11 parses 203 for i in range(len(datasets)): 204 print >> sys.stderr, "---------------", "Inserting analyses " + str(i+1) + "/" + str(len(datasets)), "---------------" 205 setName = datasets[i] 206 print >> sys.stderr, "Inserting", setName, "analyses" 207 tempdir = tempfile.mkdtemp() 208 Utils.Download.extractPackage(files[corpus + "_" + setName.upper() + "_TOKENS"], tempdir) 209 Utils.Download.extractPackage(files[corpus + "_" + setName.upper() + "_McCC"], tempdir) 210 print >> sys.stderr, "Making sentences" 211 Tools.SentenceSplitter.makeSentences(xml, tempdir + "/" + os.path.basename(files[corpus + "_" + setName.upper() + "_TOKENS"])[:-len(".tar.gz")].split("-", 1)[-1] + "/tokenised", None) 212 print >> sys.stderr, "Inserting McCC parses" 213 Tools.BLLIPParser.insertParses(xml, tempdir + "/" + os.path.basename(files[corpus + "_" + setName.upper() + "_McCC"])[:-len(".tar.gz")].split("-", 2)[-1] + "/mccc/ptb", None, extraAttributes={"source":"BioNLP'11"}) 214 print >> sys.stderr, "Inserting Stanford conversions" 215 Tools.StanfordParser.insertParses(xml, tempdir + "/" + os.path.basename(files[corpus + "_" + setName.upper() + "_McCC"])[:-len(".tar.gz")].split("-", 2)[-1] + "/mccc/sd_ccproc", None, extraAttributes={"stanfordSource":"BioNLP'11"}) 216 print >> sys.stderr, "Removing temporary directory", tempdir 217 shutil.rmtree(tempdir)
218
219 -def processParses(xml, splitTarget="McCC"):
220 print >> sys.stderr, "Protein Name Splitting" 221 #ProteinNameSplitter.mainFunc(xml, None, splitTarget, splitTarget, "split-"+splitTarget, "split-"+splitTarget) 222 ProteinNameSplitter.mainFunc(xml, None, splitTarget, removeOld=True) 223 print >> sys.stderr, "Head Detection" 224 #xml = FindHeads.findHeads(xml, "split-"+splitTarget, tokenization=None, output=None, removeExisting=True) 225 xml = FindHeads.findHeads(xml, splitTarget, tokenization=None, output=None, removeExisting=True)
226 227 if __name__=="__main__": 228 # Import Psyco if available 229 try: 230 import psyco 231 psyco.full() 232 print >> sys.stderr, "Found Psyco, using" 233 except ImportError: 234 print >> sys.stderr, "Psyco not installed" 235 236 from optparse import OptionParser 237 from Utils.Parameters import * 238 optparser = OptionParser(usage="%prog [options]\nBioNLP'11 Shared Task corpus conversion") 239 optparser.add_option("-c", "--corpora", default=None, dest="corpora", help="corpus names in a comma-separated list, e.g. \"GE,EPI,ID\"") 240 optparser.add_option("-e", "--evaluators", default=False, action="store_true", dest="evaluators", help="Install evaluators") 241 optparser.add_option("-o", "--outdir", default=None, dest="outdir", help="directory for output files") 242 optparser.add_option("-d", "--downloaddir", default=None, dest="downloaddir", help="directory to download corpus files to") 243 optparser.add_option("--intermediateFiles", default=False, action="store_true", dest="intermediateFiles", help="save intermediate corpus files") 244 optparser.add_option("--forceDownload", default=False, action="store_true", dest="forceDownload", help="re-download all source files") 245 optparser.add_option("--evaluate", default=False, action="store_true", dest="evaluate", help="Convert devel sets back to ST format and evaluate") 246 (options, args) = optparser.parse_args() 247 248 if options.evaluators: 249 installEvaluators(options.outdir, options.downloaddir, options.forceDownload) 250 if options.corpora != None: 251 #Stream.openLog(os.path.join(options.outdir, "conversion-log.txt")) 252 convert(options.corpora.split(","), options.outdir, options.downloaddir, options.forceDownload, options.intermediateFiles, evaluate=options.evaluate) 253