@@ -130,35 +130,35 @@ default Collection<String> exactRequirements() {
130
130
put (STANFORD_CDC_TOKENIZE , new LinkedHashSet <>(Arrays .asList ()));
131
131
put (STANFORD_CLEAN_XML , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
132
132
put (STANFORD_SSPLIT , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
133
- put (STANFORD_MWT , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT )));
133
+ put (STANFORD_MWT , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
134
134
put (STANFORD_DOCDATE , new LinkedHashSet <>(Arrays .asList ()));
135
- put (STANFORD_POS , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT )));
136
- put (STANFORD_LEMMA , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS )));
137
- put (STANFORD_NER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA )));
135
+ put (STANFORD_POS , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
136
+ put (STANFORD_LEMMA , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS )));
137
+ put (STANFORD_NER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA )));
138
138
put (STANFORD_TOKENSREGEX , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
139
- put (STANFORD_REGEXNER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT )));
140
- put (STANFORD_ENTITY_MENTIONS , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER )));
141
- put (STANFORD_GENDER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER )));
142
- put (STANFORD_TRUECASE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT )));
143
- put (STANFORD_PARSE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS )));
144
- put (STANFORD_DETERMINISTIC_COREF , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_PARSE )));
145
- put (STANFORD_COREF , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_DEPENDENCIES )));
146
- put (STANFORD_COREF_MENTION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_DEPENDENCIES )));
147
- put (STANFORD_RELATION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_PARSE , STANFORD_DEPENDENCIES )));
148
- put (STANFORD_SENTIMENT , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_PARSE )));
139
+ put (STANFORD_REGEXNER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
140
+ put (STANFORD_ENTITY_MENTIONS , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER )));
141
+ put (STANFORD_GENDER , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER )));
142
+ put (STANFORD_TRUECASE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE )));
143
+ put (STANFORD_PARSE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS )));
144
+ put (STANFORD_DETERMINISTIC_COREF , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_PARSE )));
145
+ put (STANFORD_COREF , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_DEPENDENCIES )));
146
+ put (STANFORD_COREF_MENTION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_DEPENDENCIES )));
147
+ put (STANFORD_RELATION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_PARSE , STANFORD_DEPENDENCIES )));
148
+ put (STANFORD_SENTIMENT , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_PARSE )));
149
149
put (STANFORD_COLUMN_DATA_CLASSIFIER , new LinkedHashSet <>(Arrays .asList ()));
150
- put (STANFORD_DEPENDENCIES , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS )));
151
- put (STANFORD_NATLOG , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_DEPENDENCIES )));
152
- put (STANFORD_OPENIE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_DEPENDENCIES , STANFORD_NATLOG )));
153
- put (STANFORD_QUOTE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_COREF )));
154
- put (STANFORD_QUOTE_ATTRIBUTION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_COREF_MENTION , STANFORD_DEPENDENCIES , STANFORD_QUOTE )));
155
- put (STANFORD_UD_FEATURES , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_DEPENDENCIES )));
156
- put (STANFORD_LINK , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_DEPENDENCIES , STANFORD_LEMMA , STANFORD_NER , STANFORD_ENTITY_MENTIONS )));
150
+ put (STANFORD_DEPENDENCIES , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS )));
151
+ put (STANFORD_NATLOG , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_DEPENDENCIES )));
152
+ put (STANFORD_OPENIE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_DEPENDENCIES , STANFORD_NATLOG )));
153
+ put (STANFORD_QUOTE , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_COREF )));
154
+ put (STANFORD_QUOTE_ATTRIBUTION , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_LEMMA , STANFORD_NER , STANFORD_COREF_MENTION , STANFORD_DEPENDENCIES , STANFORD_QUOTE )));
155
+ put (STANFORD_UD_FEATURES , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_DEPENDENCIES )));
156
+ put (STANFORD_LINK , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_DEPENDENCIES , STANFORD_LEMMA , STANFORD_NER , STANFORD_ENTITY_MENTIONS )));
157
157
// TODO: there are language specific dependencies which we may
158
158
// want to encode somehow. For example, English KBP needs coref
159
159
// to function. Spanish KBP doesn't need coref, and in fact,
160
160
// Spanish coref doesn't even exist.
161
- put (STANFORD_KBP , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_SSPLIT , STANFORD_POS , STANFORD_DEPENDENCIES , STANFORD_LEMMA , STANFORD_NER )));
161
+ put (STANFORD_KBP , new LinkedHashSet <>(Arrays .asList (STANFORD_TOKENIZE , STANFORD_POS , STANFORD_DEPENDENCIES , STANFORD_LEMMA , STANFORD_NER )));
162
162
}};
163
163
164
164
}
0 commit comments