Skip to content

Commit caf376c

Browse files
authored
Start building analysis-common module (#23614)
Start moving built in analysis components into the new analysis-common module. The goal of this project is: 1. Remove core's dependency on lucene-analyzers-common.jar which should shrink the dependencies for transport client and high level rest client. 2. Prove that analysis plugins can do all the "built in" things by moving all "built in" behavior to a plugin. 3. Force tests not to depend on any oddball analyzer behavior. If tests need anything more than the standard analyzer they can use the mock analyzer provided by Lucene's test infrastructure.
1 parent 151a65e commit caf376c

File tree

33 files changed

+956
-473
lines changed

33 files changed

+956
-473
lines changed

buildSrc/src/main/resources/checkstyle_suppressions.xml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1096,7 +1096,6 @@
10961096
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergeSchedulerConfig.java" checks="LineLength" />
10971097
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
10981098
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]VersionType.java" checks="LineLength" />
1099-
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]ASCIIFoldingTokenFilterFactory.java" checks="LineLength" />
11001099
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AbstractCharFilterFactory.java" checks="LineLength" />
11011100
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AbstractIndexAnalyzerProvider.java" checks="LineLength" />
11021101
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AbstractTokenFilterFactory.java" checks="LineLength" />
@@ -1225,8 +1224,6 @@
12251224
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]UpperCaseTokenFilterFactory.java" checks="LineLength" />
12261225
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WhitespaceAnalyzerProvider.java" checks="LineLength" />
12271226
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WhitespaceTokenizerFactory.java" checks="LineLength" />
1228-
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WordDelimiterGraphTokenFilterFactory.java" checks="LineLength" />
1229-
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WordDelimiterTokenFilterFactory.java" checks="LineLength" />
12301227
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]compound[/\\]AbstractCompoundWordTokenFilterFactory.java" checks="LineLength" />
12311228
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]compound[/\\]DictionaryCompoundWordTokenFilterFactory.java" checks="LineLength" />
12321229
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]compound[/\\]HyphenationCompoundWordTokenFilterFactory.java" checks="LineLength" />
@@ -2686,11 +2683,8 @@
26862683
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SettingsListenerIT.java" checks="LineLength" />
26872684
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]VersionTypeTests.java" checks="LineLength" />
26882685
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]WaitUntilRefreshIT.java" checks="LineLength" />
2689-
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]ASCIIFoldingTokenFilterFactoryTests.java" checks="LineLength" />
26902686
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistryTests.java" checks="LineLength" />
26912687
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisTests.java" checks="LineLength" />
2692-
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisTestsHelper.java" checks="LineLength" />
2693-
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]BaseWordDelimiterTokenFilterFactoryTestCase.java" checks="LineLength" />
26942688
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CJKFilterFactoryTests.java" checks="LineLength" />
26952689
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CharFilterTests.java" checks="LineLength" />
26962690
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CompoundAnalysisTests.java" checks="LineLength" />
@@ -2709,8 +2703,6 @@
27092703
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]StemmerTokenFilterFactoryTests.java" checks="LineLength" />
27102704
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]StopAnalyzerTests.java" checks="LineLength" />
27112705
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]StopTokenFilterTests.java" checks="LineLength" />
2712-
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WordDelimiterGraphTokenFilterFactoryTests.java" checks="LineLength" />
2713-
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]WordDelimiterTokenFilterFactoryTests.java" checks="LineLength" />
27142706
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]commongrams[/\\]CommonGramsTokenFilterFactoryTests.java" checks="LineLength" />
27152707
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]filter1[/\\]MyFilterTokenFilterFactory.java" checks="LineLength" />
27162708
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]synonyms[/\\]SynonymsAnalysisTests.java" checks="LineLength" />

core/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenFilterFactory.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,4 +71,9 @@ public TokenStream create(TokenStream tokenStream) {
7171

7272
return result;
7373
}
74+
75+
@Override
76+
public boolean breaksFastVectorHighlighter() {
77+
return true;
78+
}
7479
}

core/src/main/java/org/elasticsearch/index/analysis/TokenFilterFactory.java

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,10 +20,20 @@
2020
package org.elasticsearch.index.analysis;
2121

2222
import org.apache.lucene.analysis.TokenStream;
23+
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
24+
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
2325

2426
public interface TokenFilterFactory {
25-
2627
String name();
2728

2829
TokenStream create(TokenStream tokenStream);
30+
31+
/**
32+
* Does this analyzer mess up the {@link OffsetAttribute}s in such as way as to break the
33+
* {@link FastVectorHighlighter}? If this is {@code true} then the
34+
* {@linkplain FastVectorHighlighter} will attempt to work around the broken offsets.
35+
*/
36+
default boolean breaksFastVectorHighlighter() {
37+
return false;
38+
}
2939
}

core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
import org.elasticsearch.common.settings.Settings;
2626
import org.elasticsearch.env.Environment;
2727
import org.elasticsearch.index.IndexSettings;
28-
import org.elasticsearch.index.analysis.ASCIIFoldingTokenFilterFactory;
2928
import org.elasticsearch.index.analysis.AnalysisRegistry;
3029
import org.elasticsearch.index.analysis.AnalyzerProvider;
3130
import org.elasticsearch.index.analysis.ApostropheFilterFactory;
@@ -140,8 +139,6 @@
140139
import org.elasticsearch.index.analysis.UpperCaseTokenFilterFactory;
141140
import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider;
142141
import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
143-
import org.elasticsearch.index.analysis.WordDelimiterGraphTokenFilterFactory;
144-
import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory;
145142
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
146143
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
147144
import org.elasticsearch.plugins.AnalysisPlugin;
@@ -205,7 +202,6 @@ private NamedRegistry<AnalysisProvider<TokenFilterFactory>> setupTokenFilters(Li
205202
NamedRegistry<AnalysisProvider<TokenFilterFactory>> tokenFilters = new NamedRegistry<>("token_filter");
206203
tokenFilters.register("stop", StopTokenFilterFactory::new);
207204
tokenFilters.register("reverse", ReverseTokenFilterFactory::new);
208-
tokenFilters.register("asciifolding", ASCIIFoldingTokenFilterFactory::new);
209205
tokenFilters.register("length", LengthTokenFilterFactory::new);
210206
tokenFilters.register("lowercase", LowerCaseTokenFilterFactory::new);
211207
tokenFilters.register("uppercase", UpperCaseTokenFilterFactory::new);
@@ -225,8 +221,6 @@ private NamedRegistry<AnalysisProvider<TokenFilterFactory>> setupTokenFilters(Li
225221
tokenFilters.register("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
226222
tokenFilters.register("snowball", SnowballTokenFilterFactory::new);
227223
tokenFilters.register("stemmer", StemmerTokenFilterFactory::new);
228-
tokenFilters.register("word_delimiter", WordDelimiterTokenFilterFactory::new);
229-
tokenFilters.register("word_delimiter_graph", WordDelimiterGraphTokenFilterFactory::new);
230224
tokenFilters.register("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new);
231225
tokenFilters.register("elision", ElisionTokenFilterFactory::new);
232226
tokenFilters.register("flatten_graph", FlattenGraphTokenFilterFactory::new);

core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FragmentBuilderHelper.java

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,9 @@
2626
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
2727
import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
2828
import org.apache.lucene.util.CollectionUtil;
29-
import org.apache.lucene.util.Version;
3029
import org.elasticsearch.index.analysis.CustomAnalyzer;
31-
import org.elasticsearch.index.analysis.EdgeNGramTokenFilterFactory;
32-
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
33-
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
34-
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
3530
import org.elasticsearch.index.analysis.NamedAnalyzer;
3631
import org.elasticsearch.index.analysis.TokenFilterFactory;
37-
import org.elasticsearch.index.analysis.WordDelimiterTokenFilterFactory;
3832
import org.elasticsearch.index.mapper.FieldMapper;
3933

4034
import java.util.Comparator;
@@ -56,7 +50,7 @@ private FragmentBuilderHelper() {
5650
public static WeightedFragInfo fixWeightedFragInfo(FieldMapper mapper, Field[] values, WeightedFragInfo fragInfo) {
5751
assert fragInfo != null : "FragInfo must not be null";
5852
assert mapper.fieldType().name().equals(values[0].name()) : "Expected FieldMapper for field " + values[0].name();
59-
if (!fragInfo.getSubInfos().isEmpty() && (containsBrokenAnalysis(mapper.fieldType().indexAnalyzer()))) {
53+
if (!fragInfo.getSubInfos().isEmpty() && containsBrokenAnalysis(mapper.fieldType().indexAnalyzer())) {
6054
/* This is a special case where broken analysis like WDF is used for term-vector creation at index-time
6155
* which can potentially mess up the offsets. To prevent a SAIIOBException we need to resort
6256
* the fragments based on their offsets rather than using soley the positions as it is done in
@@ -91,8 +85,7 @@ private static boolean containsBrokenAnalysis(Analyzer analyzer) {
9185
final CustomAnalyzer a = (CustomAnalyzer) analyzer;
9286
TokenFilterFactory[] tokenFilters = a.tokenFilters();
9387
for (TokenFilterFactory tokenFilterFactory : tokenFilters) {
94-
if (tokenFilterFactory instanceof WordDelimiterTokenFilterFactory
95-
|| tokenFilterFactory instanceof EdgeNGramTokenFilterFactory) {
88+
if (tokenFilterFactory.breaksFastVectorHighlighter()) {
9689
return true;
9790
}
9891
}

core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java

Lines changed: 52 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
*/
1919
package org.elasticsearch.action.admin.indices;
2020

21+
import org.apache.lucene.analysis.MockTokenFilter;
22+
import org.apache.lucene.analysis.TokenStream;
2123
import org.elasticsearch.Version;
2224
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
2325
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
@@ -27,18 +29,28 @@
2729
import org.elasticsearch.common.settings.Settings;
2830
import org.elasticsearch.env.Environment;
2931
import org.elasticsearch.index.IndexSettings;
32+
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
3033
import org.elasticsearch.index.analysis.AnalysisRegistry;
3134
import org.elasticsearch.index.analysis.IndexAnalyzers;
35+
import org.elasticsearch.index.analysis.TokenFilterFactory;
3236
import org.elasticsearch.index.mapper.AllFieldMapper;
3337
import org.elasticsearch.indices.analysis.AnalysisModule;
38+
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
39+
import org.elasticsearch.plugins.AnalysisPlugin;
3440
import org.elasticsearch.test.ESTestCase;
3541
import org.elasticsearch.test.IndexSettingsModule;
3642

3743
import java.io.IOException;
3844
import java.util.List;
45+
import java.util.Map;
3946

40-
import static java.util.Collections.emptyList;
47+
import static java.util.Collections.singletonList;
48+
import static java.util.Collections.singletonMap;
4149

50+
/**
51+
* Tests for {@link TransportAnalyzeAction}. See the more "intense" version of this test in the
52+
* {@code common-analysis} module.
53+
*/
4254
public class TransportAnalyzeActionTests extends ESTestCase {
4355

4456
private IndexAnalyzers indexAnalyzers;
@@ -53,23 +65,28 @@ public void setUp() throws Exception {
5365
Settings indexSettings = Settings.builder()
5466
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
5567
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
56-
.put("index.analysis.filter.wordDelimiter.type", "word_delimiter")
57-
.put("index.analysis.filter.wordDelimiter.split_on_numerics", false)
58-
.put("index.analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
59-
.putArray("index.analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter")
60-
.put("index.analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
61-
.putArray("index.analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter")
62-
.put("index.analysis.tokenizer.trigram.type", "ngram")
63-
.put("index.analysis.tokenizer.trigram.min_gram", 3)
64-
.put("index.analysis.tokenizer.trigram.max_gram", 3)
65-
.put("index.analysis.filter.synonym.type", "synonym")
66-
.putArray("index.analysis.filter.synonym.synonyms", "kimchy => shay")
67-
.put("index.analysis.filter.synonym.tokenizer", "trigram")
68-
.put("index.analysis.filter.synonym.min_gram", 3)
69-
.put("index.analysis.filter.synonym.max_gram", 3).build();
68+
.put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard")
69+
.put("index.analysis.analyzer.custom_analyzer.filter", "mock").build();
7070
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
7171
environment = new Environment(settings);
72-
registry = new AnalysisModule(environment, emptyList()).getAnalysisRegistry();
72+
AnalysisPlugin plugin = new AnalysisPlugin() {
73+
class MockFactory extends AbstractTokenFilterFactory {
74+
MockFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
75+
super(indexSettings, name, settings);
76+
}
77+
78+
@Override
79+
public TokenStream create(TokenStream tokenStream) {
80+
return new MockTokenFilter(tokenStream, MockTokenFilter.ENGLISH_STOPSET);
81+
}
82+
}
83+
84+
@Override
85+
public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
86+
return singletonMap("mock", MockFactory::new);
87+
}
88+
};
89+
registry = new AnalysisModule(environment, singletonList(plugin)).getAnalysisRegistry();
7390
indexAnalyzers = registry.build(idxSettings);
7491
}
7592

@@ -143,51 +160,44 @@ public void testFillsAttributes() throws IOException {
143160
}
144161

145162
public void testWithIndexAnalyzers() throws IOException {
146-
147163
AnalyzeRequest request = new AnalyzeRequest();
148-
request.analyzer("standard");
149164
request.text("the quick brown fox");
150165
request.analyzer("custom_analyzer");
151-
request.text("the qu1ck brown fox");
152166
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
153167
List<AnalyzeResponse.AnalyzeToken> tokens = analyze.getTokens();
154-
assertEquals(4, tokens.size());
168+
assertEquals(3, tokens.size());
169+
assertEquals("quick", tokens.get(0).getTerm());
170+
assertEquals("brown", tokens.get(1).getTerm());
171+
assertEquals("fox", tokens.get(2).getTerm());
155172

156-
request.analyzer("whitespace");
157-
request.text("the qu1ck brown fox-dog");
173+
request.analyzer("standard");
158174
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
159175
tokens = analyze.getTokens();
160176
assertEquals(4, tokens.size());
177+
assertEquals("the", tokens.get(0).getTerm());
178+
assertEquals("quick", tokens.get(1).getTerm());
179+
assertEquals("brown", tokens.get(2).getTerm());
180+
assertEquals("fox", tokens.get(3).getTerm());
161181

162-
request.analyzer("custom_analyzer");
163-
request.text("the qu1ck brown fox-dog");
164-
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
165-
tokens = analyze.getTokens();
166-
assertEquals(5, tokens.size());
167-
182+
// Switch the analyzer out for just a tokenizer
168183
request.analyzer(null);
169-
request.tokenizer("whitespace");
170-
request.addTokenFilter("lowercase");
171-
request.addTokenFilter("wordDelimiter");
172-
request.text("the qu1ck brown fox-dog");
184+
request.tokenizer("standard");
173185
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
174186
tokens = analyze.getTokens();
175-
assertEquals(5, tokens.size());
187+
assertEquals(4, tokens.size());
176188
assertEquals("the", tokens.get(0).getTerm());
177-
assertEquals("qu1ck", tokens.get(1).getTerm());
189+
assertEquals("quick", tokens.get(1).getTerm());
178190
assertEquals("brown", tokens.get(2).getTerm());
179191
assertEquals("fox", tokens.get(3).getTerm());
180-
assertEquals("dog", tokens.get(4).getTerm());
181192

182-
request.analyzer(null);
183-
request.tokenizer("trigram");
184-
request.addTokenFilter("synonym");
185-
request.text("kimchy");
193+
// Now try applying our token filter
194+
request.addTokenFilter("mock");
186195
analyze = TransportAnalyzeAction.analyze(request, AllFieldMapper.NAME, null, indexAnalyzers, registry, environment);
187196
tokens = analyze.getTokens();
188-
assertEquals(2, tokens.size());
189-
assertEquals("sha", tokens.get(0).getTerm());
190-
assertEquals("hay", tokens.get(1).getTerm());
197+
assertEquals(3, tokens.size());
198+
assertEquals("quick", tokens.get(0).getTerm());
199+
assertEquals("brown", tokens.get(1).getTerm());
200+
assertEquals("fox", tokens.get(2).getTerm());
191201
}
192202

193203
public void testGetIndexAnalyserWithoutIndexAnalyzers() throws IOException {

core/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,5 @@
2222
import org.elasticsearch.AnalysisFactoryTestCase;
2323

2424
public class AnalysisFactoryTests extends AnalysisFactoryTestCase {
25-
// tests are inherited
25+
// tests are inherited and nothing needs to be defined here
2626
}

0 commit comments

Comments
 (0)