Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LUCENE-10301: make test framework a module (remove split packages) #551

Closed
wants to merge 35 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
985d52f
move util -> tests.util
dweiss Dec 19, 2021
c1d88f5
Move package docs to java files.
dweiss Dec 19, 2021
fe5a0a4
Fix security manager.
dweiss Dec 19, 2021
8e5344a
Moved store -> tests.store
dweiss Dec 19, 2021
f80aaf2
Moved geo -> tests.geo
dweiss Dec 19, 2021
d77735b
Moved mockfile -> tests.mockfile
dweiss Dec 19, 2021
9505852
Moved search -> tests.search
dweiss Dec 19, 2021
4c01ac4
Moved analysis -> tests.analysios
dweiss Dec 19, 2021
3aa3379
Moved asserting codec.
dweiss Dec 19, 2021
624c54c
More codecs.
dweiss Dec 19, 2021
58b37a4
Moved more codecs.
dweiss Dec 19, 2021
6dede7a
Moved remaining test codecs.
dweiss Dec 19, 2021
f91a864
Partial conversion of .index package. This one has lots of package-pr…
dweiss Dec 19, 2021
4a28dda
Use public api instead of pkg-private method
dweiss Dec 19, 2021
d304660
Make filter merge policy unwrappable.
dweiss Dec 19, 2021
1cc0442
Exposed package-private internals via test secrets sub-package.
dweiss Dec 20, 2021
838dace
Add license.
dweiss Dec 20, 2021
8643180
Only export internal packages to the test framework.
dweiss Dec 20, 2021
7dffc9f
No more split packages.
dweiss Dec 20, 2021
66c108b
Add module descriptor to the test framework.
dweiss Dec 20, 2021
69ac965
Add service providers to the test framework and include it in modules…
dweiss Dec 20, 2021
745c26a
Use static secret accessors. This doesn't expose any public API and i…
dweiss Dec 20, 2021
40c515e
Rephrase comment.
dweiss Dec 20, 2021
7366b81
Added changes and migration entry.
dweiss Dec 20, 2021
36b0ef4
Merge branch 'main' of https://gitbox.apache.org/repos/asf/lucene int…
uschindler Dec 21, 2021
d8689c0
Fix compile error after merge
uschindler Dec 21, 2021
a8b562c
Spotless apply
uschindler Dec 21, 2021
b1c2fa9
Add a check that we can only call TestSecrets from the test framework
uschindler Dec 21, 2021
66bf9db
Add a test
uschindler Dec 21, 2021
8ad2834
Use a separate method to better emulate the bad behaviour
uschindler Dec 21, 2021
6d3a43a
Add javadocs
uschindler Dec 21, 2021
f954864
Improve test and fix bug in the class initialization check
uschindler Dec 21, 2021
4559c73
Remove one more permission in test policy
uschindler Dec 21, 2021
d9fadf5
Merge pull request #14 from uschindler/tframework-checkcaller
dweiss Dec 21, 2021
e4399b1
Merge remote-tracking branch 'origin/main' into tframework
dweiss Dec 21, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion dev-tools/scripts/smokeTestRelease.py
Expand Up @@ -588,7 +588,7 @@ def verifyUnpacked(java, artifact, unpackPath, gitRevision, version, testArgs):
if len(in_lucene_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s lucene/ folder: %s' % (artifact, in_lucene_folder))
else:
is_in_list(in_root_folder, ['bin', 'docs', 'licenses', 'modules', 'modules-test-framework', 'modules-thirdparty'])
is_in_list(in_root_folder, ['bin', 'docs', 'licenses', 'modules', 'modules-thirdparty'])

if len(in_root_folder) > 0:
raise RuntimeError('lucene: unexpected files/dirs in artifact %s: %s' % (artifact, in_root_folder))
Expand Down
19 changes: 0 additions & 19 deletions gradle/documentation/render-javadoc.gradle
Expand Up @@ -226,22 +226,6 @@ configure(project(":lucene:core")) {
}
}

// Fix for Java 11 Javadoc tool that cannot handle split packages between modules correctly.
// (by removing all the packages which are part of lucene-core)
// See: https://issues.apache.org/jira/browse/LUCENE-8738?focusedCommentId=16818106&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-16818106
// LUCENE-9499: This workaround should be applied only to test-framework (we have no split package in other modules).
configure(project(":lucene:test-framework")) {
project.tasks.withType(RenderJavadocTask) {
doLast {
Set luceneCorePackages = file("${project(':lucene:core').tasks[name].outputDir}/element-list").readLines('UTF-8').toSet();
File elementFile = file("${outputDir}/element-list");
List elements = elementFile.readLines('UTF-8');
elements.removeAll(luceneCorePackages)
elementFile.write(elements.join('\n').concat('\n'), 'UTF-8');
}
}
}

configure(project(':lucene:demo')) {
project.tasks.withType(RenderJavadocTask) {
// For the demo, we link the example source in the javadocs, as it's ref'ed elsewhere
Expand Down Expand Up @@ -415,9 +399,6 @@ class RenderJavadocTask extends DefaultTask {
// - find all (enabled) tasks this tasks depends on (with same name), calling findRenderTasksInDependencies()
// - sort the tasks preferring those whose project name equals 'core', then lexigraphical by path
// - for each task get output dir to create relative or absolute link
// NOTE: explicitly exclude solr/test-framework, or attempting to link to lucene-test-framework because if we did javadoc would
// attempt to link class refs in in org.apache.lucene, causing broken links. (either broken links to things like "Directory" if
// lucene-test-framework was first, or broken links to things like LuceneTestCase if lucene-core was first)
findRenderTasksInDependencies()
.sort(false, Comparator.comparing { (it.project.name != 'core') as Boolean }.thenComparing(Comparator.comparing { it.path }))
.each { otherTask ->
Expand Down
3 changes: 3 additions & 0 deletions gradle/java/modules.gradle
Expand Up @@ -17,6 +17,9 @@

// Configure miscellaneous aspects required for supporting the java module system layer.

// Debugging utilities.
apply from: buildscript.sourceFile.toPath().resolveSibling("modules-debugging.gradle")

allprojects {
plugins.withType(JavaPlugin) {
// We won't be using gradle's built-in automatic module finder.
Expand Down
4 changes: 2 additions & 2 deletions gradle/testing/randomization.gradle
Expand Up @@ -167,10 +167,10 @@ allprojects {
if (project.path.endsWith(".tests")) {
// LUCENE-10301: for now, do not use the security manager for modular tests (test framework is not available).
} else if (project.path == ":lucene:replicator") {
systemProperty 'java.security.manager', "org.apache.lucene.util.TestSecurityManager"
systemProperty 'java.security.manager', "org.apache.lucene.tests.util.TestSecurityManager"
systemProperty 'java.security.policy', file("${resources}/policies/replicator-tests.policy")
} else if (project.path.startsWith(":lucene")) {
systemProperty 'java.security.manager', "org.apache.lucene.util.TestSecurityManager"
systemProperty 'java.security.manager', "org.apache.lucene.tests.util.TestSecurityManager"
systemProperty 'java.security.policy', file("${resources}/policies/tests.policy")
}

Expand Down
3 changes: 0 additions & 3 deletions gradle/testing/randomization/policies/tests.policy
Expand Up @@ -57,10 +57,7 @@ grant {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed by cyberneko usage by benchmarks on J9
permission java.lang.RuntimePermission "accessClassInPackage.org.apache.xerces.util";
// needed by org.apache.logging.log4j
permission java.lang.RuntimePermission "getenv.*";
permission java.lang.RuntimePermission "getClassLoader";
permission java.lang.RuntimePermission "setContextClassLoader";

// Needed for loading native library (lucene:misc:native) in lucene:misc
permission java.lang.RuntimePermission "getFileStoreAttributes";
Expand Down
4 changes: 4 additions & 0 deletions lucene/CHANGES.txt
Expand Up @@ -46,6 +46,10 @@ API Changes
* LUCENE-10197: UnifiedHighlighter now has a Builder to construct it. The UH's setters are now
deprecated. (Animesh Pandey, David Smiley)

* LUCENE-10301: the test framework is now a module. All the classes have been moved from
org.apache.lucene.* to org.apache.lucene.tests.* to avoid package name conflicts with the
core module. (Dawid Weiss)

New Features
---------------------

Expand Down
7 changes: 7 additions & 0 deletions lucene/MIGRATE.md
Expand Up @@ -27,6 +27,13 @@ behind the scenes. It is the responsibility of the caller to to call

## Migration from Lucene 9.0 to Lucene 9.1

### Test framework package migration and module (LUCENE-10301)

The test framework is now a module. All the classes have been moved from
`org.apache.lucene.*` to `org.apache.lucene.tests.*` to avoid package name conflicts
with the core module. If you were using the Lucene test framework, the migration should be
fairly automatic (package prefix).

### Minor syntactical changes in StandardQueryParser (LUCENE-10223)

Added interval functions and min-should-match support to `StandardQueryParser`. This
Expand Down
Expand Up @@ -17,8 +17,8 @@
package org.apache.lucene.analysis.ar;

import java.io.IOException;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

/** Test the Arabic Analyzer */
public class TestArabicAnalyzer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,9 +18,9 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Simple tests to ensure the Arabic filter Factories are working. */
public class TestArabicFilters extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -19,10 +19,10 @@
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

/** Test the Arabic Normalization Filter */
public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,12 +18,12 @@

import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

/** Test the Arabic Normalization Filter */
public class TestArabicStemFilter extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,8 +18,8 @@

import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

/** Test the Bulgarian analyzer */
public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,9 +18,9 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Simple tests to ensure the Bulgarian stem filter factory is working. */
public class TestBulgarianStemFilterFactory extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -19,12 +19,12 @@
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

/** Test the Bulgarian Stemmer */
public class TestBulgarianStemmer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -17,7 +17,7 @@
package org.apache.lucene.analysis.bn;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

/** Tests the BengaliAnalyzer */
public class TestBengaliAnalyzer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,8 +18,8 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Test Bengali Filter Factory */
public class TestBengaliFilters extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -18,11 +18,11 @@

import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.util.TestUtil;

/** Test BengaliNormalizer */
public class TestBengaliNormalizer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,10 +18,10 @@

import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

/** Test Codes for BengaliStemmer */
public class TestBengaliStemmer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -16,10 +16,10 @@
*/
package org.apache.lucene.analysis.boost;

import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.search.BoostAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

public class TestDelimitedBoostTokenFilter extends BaseTokenStreamTestCase {

Expand Down
Expand Up @@ -19,13 +19,13 @@
import java.io.IOException;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.LetterTokenizer;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

/**
* Test the Brazilian Stem Filter, which only modifies the term text.
Expand Down
Expand Up @@ -18,10 +18,10 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

/** Simple tests to ensure the Brazilian stem filter factory is working. */
public class TestBrazilianStemFilterFactory extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -18,8 +18,8 @@

import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;

public class TestCatalanAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the stopwords file is missing in classpath */
Expand Down
Expand Up @@ -26,10 +26,10 @@
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.TestUtil;

public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {

Expand Down
Expand Up @@ -18,8 +18,8 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Simple tests to ensure this factory is working */
public class TestHTMLStripCharFilterFactory extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -26,12 +26,12 @@
import java.util.Random;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;
import org.apache.lucene.tests.util.TestUtil;
import org.apache.lucene.util.UnicodeUtil;

public class TestMappingCharFilter extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -16,7 +16,7 @@
*/
package org.apache.lucene.analysis.charfilter;

import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

public class TestMappingCharFilterFactory extends BaseTokenStreamFactoryTestCase {
public void testParseString() throws Exception {
Expand Down
Expand Up @@ -19,9 +19,7 @@
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
Expand All @@ -31,6 +29,8 @@
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

/** Most tests adopted from TestCJKTokenizer */
public class TestCJKAnalyzer extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,9 +18,9 @@

import java.util.Random;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.util.IOUtils;

public class TestCJKBigramFilter extends BaseTokenStreamTestCase {
Expand Down
Expand Up @@ -18,9 +18,9 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Simple tests to ensure the CJK bigram factory is working. */
public class TestCJKBigramFilterFactory extends BaseTokenStreamFactoryTestCase {
Expand Down
Expand Up @@ -21,11 +21,11 @@
import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.tests.analysis.MockTokenizer;

public class TestCJKWidthCharFilter extends BaseTokenStreamTestCase {
/** Full-width ASCII forms normalized to half-width (basic latin) */
Expand Down
Expand Up @@ -19,8 +19,8 @@

import java.io.Reader;
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.tests.analysis.BaseTokenStreamFactoryTestCase;

/** Simple tests to ensure {@link CJKWidthCharFilter} is working */
public class TestCJKWidthCharFilterFactory extends BaseTokenStreamFactoryTestCase {
Expand Down