Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions solr/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,8 @@ Bug Fixes

* SOLR-16648: NullPointerException when excluding facets in More Like This Handler (Mikhail Khludnev)

* SOLR-16628: Ensure that InputStreams are closed after Xml parsing (Michael Gibney, David Smiley, Kevin Risden)

Build
---------------------
* Upgrade forbiddenapis to 3.4 (Uwe Schindler)
Expand Down
8 changes: 4 additions & 4 deletions solr/core/src/java/org/apache/solr/core/SolrConfig.java
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,8 @@ private class ResourceProvider implements Function<String, InputStream> {
InputStream in;
String fileName;

ResourceProvider(SolrResourceLoader loader, String res) throws IOException {
this.in = loader.openResource(res);
ResourceProvider(InputStream in) throws IOException {
this.in = in;
if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
ZkSolrResourceLoader.ZkByteArrayInputStream zkin =
(ZkSolrResourceLoader.ZkByteArrayInputStream) in;
Expand Down Expand Up @@ -401,8 +401,8 @@ private SolrConfig(
}

private IndexSchemaFactory.VersionedConfig readXml(SolrResourceLoader loader, String name) {
try {
ResourceProvider rp = new ResourceProvider(loader, name);
try (InputStream in = loader.openResource(name)) {
ResourceProvider rp = new ResourceProvider(in);
XmlConfigFile xml = new XmlConfigFile(loader, rp, name, null, "/config/", null);
return new IndexSchemaFactory.VersionedConfig(
rp.zkVersion,
Expand Down
7 changes: 6 additions & 1 deletion solr/core/src/java/org/apache/solr/core/XmlConfigFile.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.DOMUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.util.SafeXMLParsing;
import org.apache.solr.util.SystemIdResolver;
import org.slf4j.Logger;
Expand Down Expand Up @@ -119,9 +120,10 @@ public XmlConfigFile(
this.name = name;
this.prefix = (prefix != null && !prefix.endsWith("/")) ? prefix + '/' : prefix;

InputStream in = null;
try {
if (is == null && fileSupplier != null) {
InputStream in = fileSupplier.apply(name);
in = fileSupplier.apply(name);
if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) {
zkVersion = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion();
log.debug("loaded config {} with version {} ", name, zkVersion);
Expand All @@ -138,6 +140,9 @@ public XmlConfigFile(
} catch (SAXException e) {
SolrException.log(log, "Exception during parsing file: " + name, e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
// XML Parser should close but in exceptional cases might not; so let's be safe
IOUtils.closeQuietly(in);
}
if (substituteProps != null) {
DOMUtil.substituteProperties(doc, getSubstituteProperties());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ public static ConfigSetService.ConfigResource getConfigResource(

private static VersionedConfig loadConfig(
InputStream schemaInputStream, SolrResourceLoader loader, String name) {
try {
InputStream is = (schemaInputStream == null ? loader.openResource(name) : schemaInputStream);
try (InputStream is =
(schemaInputStream == null ? loader.openResource(name) : schemaInputStream)) {
ConfigNode node = getParsedSchema(is, loader, name);
int version =
is instanceof ZkSolrResourceLoader.ZkByteArrayInputStream
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.Map.Entry;
import net.jcip.annotations.NotThreadSafe;
import org.apache.commons.io.IOUtils;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.ZkSolrResourceLoader;
Expand All @@ -48,6 +51,7 @@
import org.xml.sax.InputSource;

/** Factory for ManagedIndexSchema */
@NotThreadSafe
public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements SolrCoreAware {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String UPGRADED_SCHEMA_EXTENSION = ".bak";
Expand Down Expand Up @@ -76,7 +80,6 @@ public SolrResourceLoader getResourceLoader() {
private SolrCore core;
private ZkIndexSchemaReader zkIndexSchemaReader;

private String loadedResource;
private boolean shouldUpgrade = false;

@Override
Expand Down Expand Up @@ -197,89 +200,99 @@ public ManagedIndexSchema create(
this.config = config;
this.loader = config.getResourceLoader();
InputStream schemaInputStream = null;
String loadedResource = null;

if (null == resourceName) {
resourceName = IndexSchema.DEFAULT_SCHEMA_FILE;
}

int schemaZkVersion = -1;
if (!(loader instanceof ZkSolrResourceLoader)) {
schemaInputStream = readSchemaLocally();
} else { // ZooKeeper
final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
final SolrZkClient zkClient = zkLoader.getZkController().getZkClient();
final String managedSchemaPath = lookupZKManagedSchemaPath();
managedSchemaResourceName =
managedSchemaPath.substring(managedSchemaPath.lastIndexOf("/") + 1); // not loving this
Stat stat = new Stat();
try {
// Attempt to load the managed schema
byte[] data = zkClient.getData(managedSchemaPath, null, stat, true);
schemaZkVersion = stat.getVersion();
schemaInputStream =
new ZkSolrResourceLoader.ZkByteArrayInputStream(data, managedSchemaPath, stat);
loadedResource = managedSchemaResourceName;
warnIfNonManagedSchemaExists();
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("", e);
} catch (KeeperException.NoNodeException e) {
log.info(
"The schema is configured as managed, but managed schema resource {} not found - loading non-managed schema {} instead",
managedSchemaResourceName,
resourceName);
} catch (KeeperException e) {
String msg = "Error attempting to access " + managedSchemaPath;
log.error(msg, e);
throw new SolrException(ErrorCode.SERVER_ERROR, msg, e);
try {
if (null == resourceName) {
resourceName = IndexSchema.DEFAULT_SCHEMA_FILE;
}
if (null == schemaInputStream) {
// The managed schema file could not be found - load the non-managed schema

int schemaZkVersion = -1;
if (!(loader instanceof ZkSolrResourceLoader)) {
Entry<String, InputStream> localSchemaInput = readSchemaLocally();
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

5% of developers fix this issue

THREAD_SAFETY_VIOLATION: Unprotected write. Non-private method ManagedIndexSchemaFactory.create(...) indirectly writes to field this.managedSchemaResourceName outside of synchronization.
Reporting because this access may occur on a background thread.


ℹ️ Expand to see all @sonatype-lift commands

You can reply with the following commands. For example, reply with @sonatype-lift ignoreall to leave out all findings.

Command Usage
@sonatype-lift ignore Leave out the above finding from this PR
@sonatype-lift ignoreall Leave out all the existing findings from this PR
@sonatype-lift exclude <file|issue|path|tool> Exclude specified file|issue|path|tool from Lift findings by updating your config.toml file

Note: When talking to LiftBot, you need to refresh the page to see its response.
Click here to add LiftBot to another repo.


Help us improve LIFT! (Sonatype LiftBot external survey)

Was this a good recommendation for you? Answering this survey will not impact your Lift settings.

[ 🙁 Not relevant ] - [ 😕 Won't fix ] - [ 😑 Not critical, will fix ] - [ 🙂 Critical, will fix ] - [ 😊 Critical, fixing now ]

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It sucks to still see this because we have a NotThreadSafe annotation, thus I wasn't anticipating the checks. From reading the Infer/RacerD docs in more detail, it appears this is because this class, despite not being ThreadSafe, and advertising itself as such nonetheless contains the "synchronized" keyword in one spot at the end of create(). The presence of the keyword in the code takes precedence over NotThreadSafe annotation. It seems to me we don't need that synchronized there because the schema is not published yet. Any way, such changes / explorations don't belong here. I'll create a PR to toy with this.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, ok, that makes sense! Thanks for following up on this. I expect to merge/backport tomorrow.

loadedResource = localSchemaInput.getKey();
schemaInputStream = localSchemaInput.getValue();
} else { // ZooKeeper
final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) loader;
final SolrZkClient zkClient = zkLoader.getZkController().getZkClient();
final String managedSchemaPath = lookupZKManagedSchemaPath();
managedSchemaResourceName =
managedSchemaPath.substring(managedSchemaPath.lastIndexOf("/") + 1); // not loving this
Stat stat = new Stat();
try {
schemaInputStream = loader.openResource(resourceName);
loadedResource = resourceName;
shouldUpgrade = true;
} catch (IOException e) {
// Attempt to load the managed schema
byte[] data = zkClient.getData(managedSchemaPath, null, stat, true);
schemaZkVersion = stat.getVersion();
schemaInputStream =
new ZkSolrResourceLoader.ZkByteArrayInputStream(data, managedSchemaPath, stat);
loadedResource = managedSchemaResourceName;
warnIfNonManagedSchemaExists();
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("", e);
} catch (KeeperException.NoNodeException e) {
log.info(
"The schema is configured as managed, but managed schema resource {} not found - loading non-managed schema {} instead",
managedSchemaResourceName,
resourceName);
} catch (KeeperException e) {
String msg = "Error attempting to access " + managedSchemaPath;
log.error(msg, e);
throw new SolrException(ErrorCode.SERVER_ERROR, msg, e);
}
if (null == schemaInputStream) {
// The managed schema file could not be found - load the non-managed schema
try {
// Retry to load the managed schema, in case it was created since the first attempt
byte[] data = zkClient.getData(managedSchemaPath, null, stat, true);
schemaZkVersion = stat.getVersion();
schemaInputStream = new ByteArrayInputStream(data);
loadedResource = managedSchemaPath;
warnIfNonManagedSchemaExists();
} catch (Exception e1) {
if (e1 instanceof InterruptedException) {
Thread.currentThread().interrupt(); // Restore the interrupted status
schemaInputStream = loader.openResource(resourceName);
loadedResource = resourceName;
shouldUpgrade = true;
} catch (IOException e) {
try {
// Retry to load the managed schema, in case it was created since the first attempt
byte[] data = zkClient.getData(managedSchemaPath, null, stat, true);
schemaZkVersion = stat.getVersion();
schemaInputStream = new ByteArrayInputStream(data);
loadedResource = managedSchemaPath;
warnIfNonManagedSchemaExists();
} catch (Exception e1) {
if (e1 instanceof InterruptedException) {
Thread.currentThread().interrupt(); // Restore the interrupted status
}
final String msg =
"Error loading both non-managed schema '"
+ resourceName
+ "' and managed schema '"
+ managedSchemaResourceName
+ "'";
log.error(msg, e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg, e);
}
final String msg =
"Error loading both non-managed schema '"
+ resourceName
+ "' and managed schema '"
+ managedSchemaResourceName
+ "'";
log.error(msg, e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg, e);
}
}
}
assert loadedResource != null;
InputSource inputSource = new InputSource(schemaInputStream);
inputSource.setSystemId(SystemIdResolver.createSystemIdFromResourceName(loadedResource));
try {
schema =
new ManagedIndexSchema(
config,
loadedResource,
IndexSchemaFactory.getConfigResource(
configSetService, schemaInputStream, loader, managedSchemaResourceName),
isMutable,
managedSchemaResourceName,
schemaZkVersion,
getSchemaUpdateLock());
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading parsing schema", e);
}
} finally {
// XML Parser should close but in exceptional cases might not; so let's be safe
IOUtils.closeQuietly(schemaInputStream);
}
InputSource inputSource = new InputSource(schemaInputStream);
inputSource.setSystemId(SystemIdResolver.createSystemIdFromResourceName(loadedResource));
try {
schema =
new ManagedIndexSchema(
config,
loadedResource,
IndexSchemaFactory.getConfigResource(
configSetService, schemaInputStream, loader, managedSchemaResourceName),
isMutable,
managedSchemaResourceName,
schemaZkVersion,
getSchemaUpdateLock());
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading parsing schema", e);
}

if (shouldUpgrade) {
// Persist the managed schema if it doesn't already exist
synchronized (schema.getSchemaUpdateLock()) {
Expand All @@ -290,8 +303,9 @@ public ManagedIndexSchema create(
return schema;
}

private InputStream readSchemaLocally() {
private Entry<String, InputStream> readSchemaLocally() {
InputStream schemaInputStream = null;
String loadedResource = null;
try {
// Attempt to load the managed schema
final Path managedSchemaPath = lookupLocalManagedSchemaPath();
Expand Down Expand Up @@ -323,7 +337,8 @@ private InputStream readSchemaLocally() {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg, e);
}
}
return schemaInputStream;
assert loadedResource != null;
return new SimpleImmutableEntry<>(loadedResource, schemaInputStream);
}

/** Return whether a non-managed schema exists, either in local storage or on ZooKeeper. */
Expand Down