forked from dbpedia/fact-extractor
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathget_meaningful_sentences.py
74 lines (62 loc) · 2.34 KB
/
get_meaningful_sentences.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import sys
from collections import defaultdict
from nltk import RegexpParser
CHUNKER_GRAMMAR = r"""
SN: {<PRO.*|DET.*|>?<ADJ>*<NUM>?<NOM|NPR>+<NUM>?<ADJ|VER:pper>*}
CHUNK: {<SN><VER.*>+<SN>}
"""
def load_tokens(tokens_file):
with codecs.open(tokens_file, 'rb', 'utf-8') as i:
tokens = [l.strip() for l in i.readlines()]
return tokens if tokens else False
def load_pos_data(dir):
pos_data = defaultdict(list)
for path, subdirs, files in os.walk(dir):
for name in files:
f = os.path.join(path, name)
data = codecs.open(f, 'rb', 'utf-8')
tokens = data.readlines()
for line in tokens:
parts = line.split('\t')
if len(parts) > 1:
(token, pos) = (parts[0], parts[1])
pos_data[name].append((token, pos))
return pos_data
def filter_sentences_by_chunk(pos_data, tokens):
chunker = RegexpParser(CHUNKER_GRAMMAR)
filtered = []
for sentence_id, data in pos_data.iteritems():
result = chunker.parse(data)
good_one = False
if 'CHUNK' in [s.label() for s in result.subtrees()]:
for t in result.subtrees(lambda result: result.label() == 'CHUNK'):
for token, pos in t.leaves():
if pos.find('VER') != -1 and token in tokens: good_one = True
if good_one:
filtered.append(' '.join([item[0] for item in data]))
return filtered
def write_sentences(sentences, outfile='gold'):
with codecs.open(outfile, 'wb', 'utf-8') as o:
o.writelines([s + '\n' for s in sentences])
return 0
if __name__ == "__main__":
if len(sys.argv) == 4:
pos_data = load_pos_data(sys.argv[1])
tokens = load_tokens(sys.argv[2])
outfile = sys.argv[3]
sentences = filter_sentences_by_chunk(pos_data, tokens)
write_sentences(sentences, outfile)
sys.exit(0)
elif len(sys.argv) == 3:
pos_data = load_pos_data(sys.argv[1])
tokens = load_tokens(sys.argv[2])
sentences = filter_sentences_by_chunk(pos_data, tokens)
write_sentences(sentences)
sys.exit(0)
else:
print "Usage: python %s <POS_DATA_DIR> <TOKENS_FILE> [OUTPUT_FILE]" % __file__
sys.exit(1)