-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathMatchingAnnotator.java
More file actions
242 lines (213 loc) · 9.01 KB
/
MatchingAnnotator.java
File metadata and controls
242 lines (213 loc) · 9.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
package de.datexis.ner;
import de.datexis.annotator.Annotator;
import de.datexis.common.Resource;
import de.datexis.common.WordHelpers;
import de.datexis.model.Annotation;
import de.datexis.model.Document;
import de.datexis.model.Token;
import net.amygdalum.stringsearchalgorithms.search.MatchOption;
import net.amygdalum.stringsearchalgorithms.search.StringFinder;
import net.amygdalum.stringsearchalgorithms.search.StringMatch;
import net.amygdalum.stringsearchalgorithms.search.chars.SetBackwardOracleMatching;
import net.amygdalum.stringsearchalgorithms.search.chars.StringSearchAlgorithm;
import net.amygdalum.util.io.CharProvider;
import net.amygdalum.util.io.StringCharProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* An Annotator that creates MentionAnnotations based on a term list for String matching.
* @author Sebastian Arnold <sarnold@beuth-hochschule.de>
*/
public class MatchingAnnotator extends Annotator {
protected final static Logger log = LoggerFactory.getLogger(MatchingAnnotator.class);
public static enum MatchingStrategy { CASE_SENSITIVE, LOWERCASE, LEMMA, SKIP_STOPWORDS };
protected int minimumWordLength = 3; // absolute minimum word length
protected String type = MentionAnnotation.Type.GENERIC;
protected Pattern wordLengthMatcher = Pattern.compile("\\b\\w{4,}\\b"); // matches words of length > 3, so that "UPS" will never match "ups"
protected Pattern uppercaseMatcher = Pattern.compile("^[A-Z0-9]+$"); // matches uppercase words
protected StringSearchAlgorithm stringSearch;
protected WordHelpers wordHelpers = new WordHelpers(WordHelpers.Language.EN);
Collection<String> terms = new ArrayList<>();
protected MatchingStrategy matchingStrategy = MatchingStrategy.CASE_SENSITIVE;
protected Annotation.Source source = Annotation.Source.SILVER;
public MatchingAnnotator() {
this(MatchingStrategy.CASE_SENSITIVE, Annotation.Source.SILVER);
}
public MatchingAnnotator(MatchingStrategy matchingStrategy) {
this(matchingStrategy, Annotation.Source.SILVER);
}
public MatchingAnnotator(MatchingStrategy matchLowercase, Annotation.Source source) {
super();
this.matchingStrategy = matchLowercase;
this.source = source;
}
public MatchingAnnotator(MatchingStrategy matchLowercase, Annotation.Source source, String type) {
this(matchLowercase, source);
this.type = type;
}
public MatchingAnnotator(MatchingStrategy matchLowercase, Annotation.Source source, String type, int minWordLength) {
this(matchLowercase, source, type);
this.minimumWordLength = minWordLength;
}
protected Collection<String> convertTerms(Stream<String> terms) {
Comparator<String> compByLength = (a, b) -> a.length() - b.length();
switch(matchingStrategy) {
case LOWERCASE:
return terms
.filter(w -> w.length() >= minimumWordLength)
.map(w -> convertToLowercase(w))
.distinct()
.sorted(compByLength.reversed())
.collect(Collectors.toList());
case LEMMA:
return terms
.filter(w -> w.length() >= minimumWordLength)
.map(w -> removePlurals(convertToLowercase(w)))
.distinct()
.sorted(compByLength.reversed())
.collect(Collectors.toList());
case SKIP_STOPWORDS:
return terms
.filter(w -> w.length() >= minimumWordLength && !wordHelpers.isStopWord(w))
.distinct()
.sorted(compByLength.reversed())
.collect(Collectors.toList());
default:
return terms.distinct().collect(Collectors.toList());
}
}
public void clearTermsToMatch() {
this.terms.clear();
stringSearch = new SetBackwardOracleMatching(this.terms);
}
public void loadTermsToMatch(Collection<String> terms) {
loadTermsToMatch(terms.stream());
}
public void loadTermsToMatch(Stream<String> terms) {
this.terms.addAll(convertTerms(terms));
log.info("Rebuildung dictionary with {} distinct terms", this.terms.size());
// AhoCorasick - fast for small matches, correct but memory-intensive
// WuManber - correct matches, but slow
// SetBackwardOracleMatching - suoer fast, but invalid LONGEST_MATCH
stringSearch = new SetBackwardOracleMatching(this.terms);
}
public void loadTermsToMatch(Resource path) throws IOException {
if(path.isDirectory()) {
Files.walk(path.getPath())
.filter(p -> Files.isRegularFile(p, LinkOption.NOFOLLOW_LINKS))
//.filter(p -> p.getFileName().toString().matches(".+"))
.forEach(p -> {
try {
loadTermsToMatch(Resource.fromFile(p.toString()));
} catch(IOException ex) {
// IOException is now allowed in Stream
log.error(ex.toString());
}
});
} else if(path.isFile()) {
try (BufferedReader br = new BufferedReader(new InputStreamReader(path.getInputStream(), "UTF-8"))) {
loadTermsToMatch(br.lines());
}
} else throw new FileNotFoundException("cannot open path: " + path.toString());
}
public void deleteTermsToMatch(Collection<String> terms) {
deleteTermsToMatch(terms.stream());
}
public void deleteTermsToMatch(Stream<String> terms) {
this.terms.removeAll(convertTerms(terms));
log.info("Rebuildung dictionary with {} distinct terms", this.terms.size());
stringSearch = new SetBackwardOracleMatching(this.terms);
}
public void deleteTermsToMatch(Resource path) throws IOException {
try (BufferedReader br = new BufferedReader(new InputStreamReader(path.getInputStream(), "UTF-8"))) {
deleteTermsToMatch(br.lines());
}
}
public int countTerms() {
return terms.size();
}
/**
* @return text with all words >3 chars converted to lowercase.
*/
protected String convertToLowercase(String text) {
Matcher m = wordLengthMatcher.matcher(text);
StringBuffer sb = new StringBuffer();
while(m.find()) {
String match = m.group();
Matcher u = uppercaseMatcher.matcher(match);
if(u.matches()) { // all uppercase
if(match.length() >= 8) m.appendReplacement(sb, m.group().toLowerCase());
} else {
m.appendReplacement(sb, m.group().toLowerCase());
}
}
m.appendTail(sb);
return sb.toString();
}
protected String removePlurals(String text) {
// TODO: use OpenNLP Lemmatizer
throw new UnsupportedOperationException("Lemma matching is not yet implemented.");
}
@Override
public void annotate(Collection<Document> docs) {
annotate(docs, source);
}
/**
* Annotates a Dataset using the pre-trained list.
* @param docs - the Documents to annotate
* @param source - the type of annotations to create, e.g. SILVER
*/
public void annotate(Iterable<Document> docs, Annotation.Source source) {
for(Document doc : docs) {
// see http://stringsearchalgorithms.amygdalum.net/
String text = doc.getText();
if(matchingStrategy.equals(MatchingStrategy.LOWERCASE)) text = convertToLowercase(doc.getText());
CharProvider chars = new StringCharProvider(text, 0);
if(stringSearch == null) {
log.warn("MatchingAnnotator called without terms loaded");
return;
}
StringFinder finder = stringSearch.createFinder(chars, MatchOption.LONGEST_MATCH, MatchOption.NON_OVERLAP);
for(StringMatch match : finder.findAll()) {
int begin = (int)match.start();
int end = (int)match.end();
final List<Token> list = doc.streamTokensInRange(begin, end, true).collect(Collectors.toList());
if(spanIsAtTokenBoundaries(list, begin, end, doc)) {
MentionAnnotation ann = new MentionAnnotation(source, list);
ann.setType(type);
doc.addAnnotation(ann);
// check if there is another overlapping annotation - should not be required with NON_OVERLAP
/*Collection<MentionAnnotation> existing = doc.getAnnotationsForSpan(source, MentionAnnotation.class, ann);
if(existing.size() > 1) {
log.warn("removing overlapping Annotation");
existing.forEach(a -> doc.removeAnnotation(a));
ann = Collections.max(existing, Comparator.comparing(MentionAnnotation::getLength));
doc.addAnnotation(ann);
}*/
}
}
}
}
/**
* @return True, if given span is exactly at a word boundary.
*/
private boolean spanIsAtTokenBoundaries(List<Token> list, int begin, int end, Document doc) {
if(list.isEmpty()) return false;
else if(list.size() == 1 && list.get(0).getBegin() == begin && list.get(0).getEnd() == end) return true;
else return list.get(0).getBegin() == begin && list.get(list.size() - 1).getEnd() == end;
}
}