Skip to content

Commit

Permalink
Additional redact processor license check tests (#96541)
Browse files Browse the repository at this point in the history
  • Loading branch information
joegallo committed Jun 5, 2023
1 parent 8a5bd8a commit f8edfcc
Show file tree
Hide file tree
Showing 2 changed files with 171 additions and 1 deletion.
128 changes: 127 additions & 1 deletion server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@

import static org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils.executeAndAssertSuccessful;
import static org.elasticsearch.core.Tuple.tuple;
import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException;
import static org.elasticsearch.ingest.IngestService.NOOP_PIPELINE_NAME;
import static org.elasticsearch.ingest.IngestService.hasPipeline;
import static org.hamcrest.Matchers.containsInAnyOrder;
Expand Down Expand Up @@ -309,6 +310,68 @@ public void testInnerUpdatePipelines() {
assertThat(ingestService.pipelines(), sameInstance(pipelines));
}

public void testInnerUpdatePipelinesValidation() {
Map<String, Processor.Factory> processors = new HashMap<>();
processors.put("fail_validation", (factories, tag, description, config) -> {
// ordinary validation issues happen at processor construction time
throw newConfigurationException("fail_validation", tag, "no_property_name", "validation failure reason");
});
processors.put("fail_extra_validation", (factories, tag, description, config) -> {
// 'extra validation' issues happen post- processor construction time
return new FakeProcessor("fail_extra_validation", tag, description, ingestDocument -> {}) {
@Override
public void extraValidation() throws Exception {
throw newConfigurationException("fail_extra_validation", tag, "no_property_name", "extra validation failure reason");
}
};
});

{
// a processor that fails ordinary validation (i.e. the processor factory throws an exception while constructing it)
// will result in a placeholder pipeline being substituted

IngestService ingestService = createWithProcessors(processors);
PipelineConfiguration config = new PipelineConfiguration("_id", new BytesArray("""
{"processors": [{"fail_validation" : {}}]}"""), XContentType.JSON);
IngestMetadata ingestMetadata = new IngestMetadata(Map.of("_id", config));
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
ClusterState previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState)
.metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata))
.build();
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));

Pipeline pipeline = ingestService.getPipeline("_id");
assertThat(
pipeline.getDescription(),
equalTo("this is a place holder pipeline, because pipeline with id [_id] could not be loaded")
);
}

{
// a processor that fails extra validation (i.e. an exception is throw from `extraValidation`)
// will be processed just fine -- extraValidation is for rest/transport validation, not for when
// a processor is being created from a processor factory

IngestService ingestService = createWithProcessors(processors);
PipelineConfiguration config = new PipelineConfiguration("_id", new BytesArray("""
{"processors": [{"fail_extra_validation" : {}}]}"""), XContentType.JSON);
IngestMetadata ingestMetadata = new IngestMetadata(Map.of("_id", config));
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
ClusterState previousClusterState = clusterState;
clusterState = ClusterState.builder(clusterState)
.metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata))
.build();
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));

Pipeline pipeline = ingestService.getPipeline("_id");
assertThat(pipeline.getDescription(), nullValue());
assertThat(pipeline.getProcessors().size(), equalTo(1));
Processor processor = pipeline.getProcessors().get(0);
assertThat(processor.getType(), equalTo("fail_extra_validation"));
}
}

public void testDelete() {
IngestService ingestService = createWithProcessors();
PipelineConfiguration config = new PipelineConfiguration("_id", new BytesArray("""
Expand Down Expand Up @@ -887,7 +950,7 @@ public void testGetPipelines() {
assertThat(pipelines.get(1).getId(), equalTo("_id2"));
}

public void testValidate() throws Exception {
public void testValidateProcessorTypeOnAllNodes() throws Exception {
IngestService ingestService = createWithProcessors();
PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("""
{
Expand Down Expand Up @@ -928,6 +991,69 @@ public void testValidate() throws Exception {
ingestService.validatePipeline(ingestInfos, putRequest.getId(), pipelineConfig2);
}

public void testValidateConfigurationExceptions() {
IngestService ingestService = createWithProcessors(Map.of("fail_validation", (factories, tag, description, config) -> {
// ordinary validation issues happen at processor construction time
throw newConfigurationException("fail_validation", tag, "no_property_name", "validation failure reason");
}));
PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("""
{
"processors": [
{
"fail_validation": {
}
}
]
}"""), XContentType.JSON);
var pipelineConfig = XContentHelper.convertToMap(putRequest.getSource(), false, putRequest.getXContentType()).v2();

// other validation actually consults this map, but this validation does not. however, it must not be empty.
DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), Map.of(), Set.of(), Version.CURRENT);
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
ingestInfos.put(node1, new IngestInfo(List.of()));

ElasticsearchParseException e = expectThrows(
ElasticsearchParseException.class,
() -> ingestService.validatePipeline(ingestInfos, putRequest.getId(), pipelineConfig)
);
assertEquals("[no_property_name] validation failure reason", e.getMessage());
assertEquals("fail_validation", e.getMetadata("es.processor_type").get(0));
}

public void testValidateExtraValidationConfigurationExceptions() {
IngestService ingestService = createWithProcessors(Map.of("fail_extra_validation", (factories, tag, description, config) -> {
// 'extra validation' issues happen post- processor construction time
return new FakeProcessor("fail_extra_validation", tag, description, ingestDocument -> {}) {
@Override
public void extraValidation() throws Exception {
throw newConfigurationException("fail_extra_validation", tag, "no_property_name", "extra validation failure reason");
}
};
}));
PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("""
{
"processors": [
{
"fail_extra_validation": {
}
}
]
}"""), XContentType.JSON);
var pipelineConfig = XContentHelper.convertToMap(putRequest.getSource(), false, putRequest.getXContentType()).v2();

// other validation actually consults this map, but this validation does not. however, it must not be empty.
DiscoveryNode node1 = new DiscoveryNode("_node_id1", buildNewFakeTransportAddress(), Map.of(), Set.of(), Version.CURRENT);
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
ingestInfos.put(node1, new IngestInfo(List.of()));

ElasticsearchParseException e = expectThrows(
ElasticsearchParseException.class,
() -> ingestService.validatePipeline(ingestInfos, putRequest.getId(), pipelineConfig)
);
assertEquals("[no_property_name] extra validation failure reason", e.getMessage());
assertEquals("fail_extra_validation", e.getMetadata("es.processor_type").get(0));
}

public void testExecuteIndexPipelineExistsButFailedParsing() {
IngestService ingestService = createWithProcessors(
Map.of("mock", (factories, tag, description, config) -> new AbstractProcessor("mock", "description") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,50 @@ public void testLicenseChecks() throws Exception {
}
}

public void testLicenseChanges() throws Exception {
// initially the license is allowed
final boolean allowed[] = new boolean[] { true };
MockLicenseState licenseState = TestUtils.newMockLicenceState();
when(licenseState.isAllowed(RedactProcessor.REDACT_PROCESSOR_FEATURE)).thenAnswer(invocation -> allowed[0]);

Map<String, Object> config = new HashMap<>();
config.put("field", "to_redact");
config.put("patterns", List.of("%{MY_PATTERN:after}"));
config.put("pattern_definitions", Map.of("MY_PATTERN", "before"));
if (randomBoolean()) {
config.put("skip_if_unlicensed", false); // sometimes set to false explicitly, sometimes rely on the default (also false)
}

// constructing the processor is allowed, including extraValidation
RedactProcessor.Factory factory = new RedactProcessor.Factory(licenseState, MatcherWatchdog.noop());
RedactProcessor processor = factory.create(null, null, null, config);
processor.extraValidation();

// it works great as long as the feature is allowed for the license
final int times = randomIntBetween(1, 5);
for (int i = 0; i < times; i++) {
var ingestDoc = createIngestDoc(Map.of("to_redact", "before"));
var redacted = processor.execute(ingestDoc);
assertEquals("<after>", redacted.getFieldValue("to_redact", String.class));
}

// but stops working when the feature is not allowed for the license
allowed[0] = false;
for (int i = 0; i < times; i++) {
var ingestDoc = createIngestDoc(Map.of("to_redact", "before"));
ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> processor.execute(ingestDoc));
assertThat(e.getMessage(), containsString("current license is non-compliant for [redact_processor]"));
}

// and starts working against when the license allows
allowed[0] = true;
for (int i = 0; i < times; i++) {
var ingestDoc = createIngestDoc(Map.of("to_redact", "before"));
var redacted = processor.execute(ingestDoc);
assertEquals("<after>", redacted.getFieldValue("to_redact", String.class));
}
}

public void testMergeLongestRegion() {
var r = List.of(
new RedactProcessor.RegionTrackingMatchExtractor.Replacement(10, 20, "first"),
Expand Down

0 comments on commit f8edfcc

Please sign in to comment.