diff --git a/build.gradle b/build.gradle index c74ba3077..238ae3f94 100644 --- a/build.gradle +++ b/build.gradle @@ -6,7 +6,7 @@ import org.opensearch.gradle.test.RestIntegTestTask buildscript { ext { - opensearch_version = System.getProperty("opensearch.version", "2.7.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "2.7.1-SNAPSHOT") isSnapshot = "true" == System.getProperty("build.snapshot", "true") buildVersionQualifier = System.getProperty("build.version_qualifier", "") version_tokens = opensearch_version.tokenize('-') diff --git a/src/main/java/org/opensearch/securityanalytics/mapper/MapperService.java b/src/main/java/org/opensearch/securityanalytics/mapper/MapperService.java index 52a0af849..4c2430815 100644 --- a/src/main/java/org/opensearch/securityanalytics/mapper/MapperService.java +++ b/src/main/java/org/opensearch/securityanalytics/mapper/MapperService.java @@ -72,9 +72,11 @@ public void createMappingAction(String indexName, String ruleTopic, String alias // since you can't update documents in non-write indices String index = indexName; boolean shouldUpsertIndexTemplate = IndexUtils.isConcreteIndex(indexName, this.clusterService.state()) == false; - if (IndexUtils.isDataStream(indexName, this.clusterService.state())) { + if (IndexUtils.isDataStream(indexName, this.clusterService.state()) || IndexUtils.isAlias(indexName, this.clusterService.state())) { + log.debug("{} is an alias or datastream. Fetching write index for create mapping action.", indexName); String writeIndex = IndexUtils.getWriteIndex(indexName, this.clusterService.state()); if (writeIndex != null) { + log.debug("Write index for {} is {}", indexName, writeIndex); index = writeIndex; } } @@ -86,6 +88,7 @@ public void onResponse(GetMappingsResponse getMappingsResponse) { applyAliasMappings(getMappingsResponse.getMappings(), ruleTopic, aliasMappings, partial, new ActionListener<>() { @Override public void onResponse(Collection createMappingResponse) { + log.debug("Completed create mappings for {}", indexName); // We will return ack==false if one of the requests returned that // else return ack==true Optional notAckd = createMappingResponse.stream() @@ -104,6 +107,7 @@ public void onResponse(Collection createMappingResponse) { @Override public void onFailure(Exception e) { + log.debug("Failed to create mappings for {}", indexName ); actionListener.onFailure(e); } }); diff --git a/src/main/java/org/opensearch/securityanalytics/mapper/MapperUtils.java b/src/main/java/org/opensearch/securityanalytics/mapper/MapperUtils.java index fb130b7e8..31858b6a5 100644 --- a/src/main/java/org/opensearch/securityanalytics/mapper/MapperUtils.java +++ b/src/main/java/org/opensearch/securityanalytics/mapper/MapperUtils.java @@ -5,19 +5,16 @@ package org.opensearch.securityanalytics.mapper; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; import org.apache.commons.lang3.tuple.Pair; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.collect.ImmutableOpenMap; - +import org.opensearch.securityanalytics.util.SecurityAnalyticsException; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; import java.util.List; import java.util.Set; -import java.util.stream.Collectors; -import org.opensearch.securityanalytics.util.SecurityAnalyticsException; public class MapperUtils { @@ -249,7 +246,6 @@ public void onError(String error) { } }); mappingsTraverser.traverse(); - return presentPathsMappings; } } diff --git a/src/main/java/org/opensearch/securityanalytics/model/Detector.java b/src/main/java/org/opensearch/securityanalytics/model/Detector.java index adbbe6776..608eb43ef 100644 --- a/src/main/java/org/opensearch/securityanalytics/model/Detector.java +++ b/src/main/java/org/opensearch/securityanalytics/model/Detector.java @@ -158,12 +158,12 @@ public Detector(StreamInput sin) throws IOException { sin.readList(DetectorInput::readFrom), sin.readList(DetectorTrigger::readFrom), sin.readStringList(), - sin.readString(), - sin.readString(), - sin.readString(), - sin.readString(), - sin.readString(), - sin.readString(), + sin.readOptionalString(), + sin.readOptionalString(), + sin.readOptionalString(), + sin.readOptionalString(), + sin.readOptionalString(), + sin.readOptionalString(), sin.readMap(StreamInput::readString, StreamInput::readString) ); } @@ -196,8 +196,12 @@ public void writeTo(StreamOutput out) throws IOException { it.writeTo(out); } out.writeStringCollection(monitorIds); - out.writeString(ruleIndex); - + out.writeOptionalString(ruleIndex); + out.writeOptionalString(alertsIndex); + out.writeOptionalString(alertsHistoryIndex); + out.writeOptionalString(alertsHistoryIndexPattern); + out.writeOptionalString(findingsIndex); + out.writeOptionalString(findingsIndexPattern); out.writeMap(ruleIdMonitorIdMap, StreamOutput::writeString, StreamOutput::writeString); } diff --git a/src/main/java/org/opensearch/securityanalytics/rules/backend/OSQueryBackend.java b/src/main/java/org/opensearch/securityanalytics/rules/backend/OSQueryBackend.java index f7c09cfc7..07455945c 100644 --- a/src/main/java/org/opensearch/securityanalytics/rules/backend/OSQueryBackend.java +++ b/src/main/java/org/opensearch/securityanalytics/rules/backend/OSQueryBackend.java @@ -131,7 +131,7 @@ public OSQueryBackend(String ruleCategory, boolean collectErrors, boolean enable this.reEscapeChar = "\\"; this.reExpression = "%s: /%s/"; this.cidrExpression = "%s: \"%s\""; - this.fieldNullExpression = "%s: null"; + this.fieldNullExpression = "%s: (NOT [* TO *])"; this.unboundValueStrExpression = "\"%s\""; this.unboundValueNumExpression = "\"%s\""; this.unboundWildcardExpression = "%s"; @@ -331,9 +331,12 @@ public Object convertConditionFieldEqValQueryExpr(ConditionFieldEqualsValueExpre @Override public Object convertConditionValStr(ConditionValueExpression condition) throws SigmaValueError { + String field = getFinalValueField(); + ruleQueryFields.put(field, Map.of("type", "text", "analyzer", "rule_analyzer")); SigmaString value = (SigmaString) condition.getValue(); boolean containsWildcard = value.containsWildcard(); - return String.format(Locale.getDefault(), (containsWildcard? this.unboundWildcardExpression: this.unboundValueStrExpression), this.convertValueStr((SigmaString) condition.getValue())); + return String.format(Locale.getDefault(), (containsWildcard? this.unboundWildcardExpression: this.unboundValueStrExpression), + this.convertValueStr((SigmaString) condition.getValue())); } @Override diff --git a/src/main/java/org/opensearch/securityanalytics/transport/TransportIndexDetectorAction.java b/src/main/java/org/opensearch/securityanalytics/transport/TransportIndexDetectorAction.java index 0f1f620c8..6711c853d 100644 --- a/src/main/java/org/opensearch/securityanalytics/transport/TransportIndexDetectorAction.java +++ b/src/main/java/org/opensearch/securityanalytics/transport/TransportIndexDetectorAction.java @@ -107,9 +107,11 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -192,25 +194,29 @@ protected void doExecute(Task task, IndexDetectorRequest request, ActionListener } private void checkIndicesAndExecute( - Task task, - IndexDetectorRequest request, - ActionListener listener, - User user + Task task, + IndexDetectorRequest request, + ActionListener listener, + User user ) { + log.debug("check indices and execute began"); String [] detectorIndices = request.getDetector().getInputs().stream().flatMap(detectorInput -> detectorInput.getIndices().stream()).toArray(String[]::new); - SearchRequest searchRequest = new SearchRequest(detectorIndices).source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery()));; + SearchRequest searchRequest = new SearchRequest(detectorIndices).source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())); + searchRequest.setCancelAfterTimeInterval(TimeValue.timeValueSeconds(30)); client.search(searchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { + log.debug("check indices and execute completed. Took {} millis", searchResponse.getTook().millis()); AsyncIndexDetectorsAction asyncAction = new AsyncIndexDetectorsAction(user, task, request, listener); asyncAction.start(); } @Override public void onFailure(Exception e) { + log.debug("check indices and execute failed", e); if (e instanceof OpenSearchStatusException) { listener.onFailure(SecurityAnalyticsException.wrap( - new OpenSearchStatusException(String.format(Locale.getDefault(), "User doesn't have read permissions for one or more configured index %s", detectorIndices), RestStatus.FORBIDDEN) + new OpenSearchStatusException(String.format(Locale.getDefault(), "User doesn't have read permissions for one or more configured index %s", detectorIndices), RestStatus.FORBIDDEN) )); } else if (e instanceof IndexNotFoundException) { listener.onFailure(SecurityAnalyticsException.wrap( @@ -224,70 +230,110 @@ public void onFailure(Exception e) { }); } - private void createMonitorFromQueries(String index, List> rulesById, Detector detector, ActionListener> listener, WriteRequest.RefreshPolicy refreshPolicy) throws SigmaError, IOException { + private void createMonitorFromQueries(String index, List> rulesById, Detector detector, ActionListener> listener, WriteRequest.RefreshPolicy refreshPolicy, + List queryFieldNames) { List> docLevelRules = rulesById.stream().filter(it -> !it.getRight().isAggregationRule()).collect( - Collectors.toList()); + Collectors.toList()); List> bucketLevelRules = rulesById.stream().filter(it -> it.getRight().isAggregationRule()).collect( - Collectors.toList()); + Collectors.toList()); - List monitorRequests = new ArrayList<>(); + try { + List monitorRequests = new ArrayList<>(); - if (!docLevelRules.isEmpty()) { - monitorRequests.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST)); - } - if (!bucketLevelRules.isEmpty()) { - monitorRequests.addAll(buildBucketLevelMonitorRequests(bucketLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST)); - } - // Do nothing if detector doesn't have any monitor - if (monitorRequests.isEmpty()){ - listener.onResponse(Collections.emptyList()); - return; - } + if (!docLevelRules.isEmpty()) { + monitorRequests.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST, queryFieldNames)); + } + if (!bucketLevelRules.isEmpty()) { + StepListener> bucketLevelMonitorRequests = new StepListener<>(); + buildBucketLevelMonitorRequests(bucketLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST, bucketLevelMonitorRequests); + bucketLevelMonitorRequests.whenComplete(indexMonitorRequests -> { + log.debug("bucket level monitor request built"); + monitorRequests.addAll(indexMonitorRequests); + + // Do nothing if detector doesn't have any monitor + if (monitorRequests.isEmpty()) { + listener.onResponse(Collections.emptyList()); + return; + } - List monitorResponses = new ArrayList<>(); - StepListener addFirstMonitorStep = new StepListener(); - - // Indexing monitors in two steps in order to prevent all shards failed error from alerting - // https://github.com/opensearch-project/alerting/issues/646 - AlertingPluginInterface.INSTANCE.indexMonitor((NodeClient) client, monitorRequests.get(0), namedWriteableRegistry, addFirstMonitorStep); - addFirstMonitorStep.whenComplete(addedFirstMonitorResponse -> { - monitorResponses.add(addedFirstMonitorResponse); - int numberOfUnprocessedResponses = monitorRequests.size() - 1; - if (numberOfUnprocessedResponses == 0){ - listener.onResponse(monitorResponses); - } else { - GroupedActionListener monitorResponseListener = new GroupedActionListener( - new ActionListener>() { - @Override - public void onResponse(Collection indexMonitorResponse) { - monitorResponses.addAll(indexMonitorResponse.stream().collect(Collectors.toList())); - listener.onResponse(monitorResponses); - } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, numberOfUnprocessedResponses); + List monitorResponses = new ArrayList<>(); + StepListener addFirstMonitorStep = new StepListener(); + + // Indexing monitors in two steps in order to prevent all shards failed error from alerting + // https://github.com/opensearch-project/alerting/issues/646 + AlertingPluginInterface.INSTANCE.indexMonitor((NodeClient) client, monitorRequests.get(0), namedWriteableRegistry, addFirstMonitorStep); + addFirstMonitorStep.whenComplete(addedFirstMonitorResponse -> { + log.debug("first monitor created id {} of type {}", addedFirstMonitorResponse.getId(), addedFirstMonitorResponse.getMonitor().getMonitorType()); + monitorResponses.add(addedFirstMonitorResponse); + int numberOfUnprocessedResponses = monitorRequests.size() - 1; + if (numberOfUnprocessedResponses == 0) { + listener.onResponse(monitorResponses); + } else { + saveMonitors(monitorRequests, monitorResponses, numberOfUnprocessedResponses, listener); + } + }, listener::onFailure); + }, listener::onFailure); + } else { + // Do nothing if detector doesn't have any monitor + if (monitorRequests.isEmpty()) { + listener.onResponse(Collections.emptyList()); + return; + } + List monitorResponses = new ArrayList<>(); + StepListener indexDocLevelMonitorStep = new StepListener(); + // Indexing monitors in two steps in order to prevent all shards failed error from alerting + // https://github.com/opensearch-project/alerting/issues/646 + AlertingPluginInterface.INSTANCE.indexMonitor((NodeClient) client, monitorRequests.get(0), namedWriteableRegistry, indexDocLevelMonitorStep); + indexDocLevelMonitorStep.whenComplete(addedFirstMonitorResponse -> { + log.debug("first monitor created id {} of type {}", addedFirstMonitorResponse.getId(), addedFirstMonitorResponse.getMonitor().getMonitorType()); + monitorResponses.add(addedFirstMonitorResponse); + int numberOfUnprocessedResponses = monitorRequests.size() - 1; + if (numberOfUnprocessedResponses == 0) { + listener.onResponse(monitorResponses); + } else { + saveMonitors(monitorRequests, monitorResponses, numberOfUnprocessedResponses, listener); + } + }, listener::onFailure); + } + } catch (Exception ex) { + listener.onFailure(ex); + } + } - for(int i = 1; i < monitorRequests.size(); i++){ - AlertingPluginInterface.INSTANCE.indexMonitor((NodeClient) client, monitorRequests.get(i), namedWriteableRegistry, monitorResponseListener); + private void saveMonitors( + List monitorRequests, + List monitorResponses, + int numberOfUnprocessedResponses, + ActionListener> listener + ) { + GroupedActionListener monitorResponseListener = new GroupedActionListener( + new ActionListener>() { + @Override + public void onResponse(Collection indexMonitorResponses) { + monitorResponses.addAll(indexMonitorResponses.stream().collect(Collectors.toList())); + listener.onResponse(monitorResponses); } - } - }, - listener::onFailure - ); + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, numberOfUnprocessedResponses); + for (int i = 1; i < monitorRequests.size(); i++) { + AlertingPluginInterface.INSTANCE.indexMonitor((NodeClient) client, monitorRequests.get(i), namedWriteableRegistry, monitorResponseListener); + } } - private void updateMonitorFromQueries(String index, List> rulesById, Detector detector, ActionListener> listener, WriteRequest.RefreshPolicy refreshPolicy) throws SigmaError, IOException { + private void updateMonitorFromQueries(String index, List> rulesById, Detector detector, ActionListener> listener, WriteRequest.RefreshPolicy refreshPolicy, + List queryFieldNames) throws IOException { List monitorsToBeUpdated = new ArrayList<>(); List> bucketLevelRules = rulesById.stream().filter(it -> it.getRight().isAggregationRule()).collect( - Collectors.toList()); + Collectors.toList()); List monitorsToBeAdded = new ArrayList<>(); // Process bucket level monitors if (!bucketLevelRules.isEmpty()) { List ruleCategories = bucketLevelRules.stream().map(Pair::getRight).map(Rule::getCategory).distinct().collect( - Collectors.toList()); + Collectors.toList()); Map queryBackendMap = new HashMap<>(); for(String category: ruleCategories){ queryBackendMap.put(category, new OSQueryBackend(category, true, true)); @@ -295,6 +341,27 @@ private void updateMonitorFromQueries(String index, List> rul // Pair of RuleId - MonitorId for existing monitors of the detector Map monitorPerRule = detector.getRuleIdMonitorIdMap(); + GroupedActionListener groupedActionListener = new GroupedActionListener<>( + new ActionListener<>() { + @Override + public void onResponse(Collection indexMonitorRequests) { + onIndexMonitorRequestCreation( + monitorsToBeUpdated, + monitorsToBeAdded, + rulesById, + detector, + refreshPolicy, + queryFieldNames, + listener + ); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, bucketLevelRules.size() + ); for (Pair query: bucketLevelRules) { Rule rule = query.getRight(); @@ -302,39 +369,84 @@ private void updateMonitorFromQueries(String index, List> rul // Detect if the monitor should be added or updated if (monitorPerRule.containsKey(rule.getId())) { String monitorId = monitorPerRule.get(rule.getId()); - monitorsToBeUpdated.add(createBucketLevelMonitorRequest(query.getRight(), - detector, - refreshPolicy, - monitorId, - Method.PUT, - queryBackendMap.get(rule.getCategory()))); + createBucketLevelMonitorRequest(query.getRight(), + detector, + refreshPolicy, + monitorId, + Method.PUT, + queryBackendMap.get(rule.getCategory()), + new ActionListener<>() { + @Override + public void onResponse(IndexMonitorRequest indexMonitorRequest) { + monitorsToBeUpdated.add(indexMonitorRequest); + groupedActionListener.onResponse(indexMonitorRequest); + } + + @Override + public void onFailure(Exception e) { + log.error("Failed to create bucket level monitor request", e); + listener.onFailure(e); + } + + }); } else { - monitorsToBeAdded.add(createBucketLevelMonitorRequest(query.getRight(), - detector, - refreshPolicy, - Monitor.NO_ID, - Method.POST, - queryBackendMap.get(rule.getCategory()))); + createBucketLevelMonitorRequest(query.getRight(), + detector, + refreshPolicy, + Monitor.NO_ID, + Method.POST, + queryBackendMap.get(rule.getCategory()), + new ActionListener<>() { + @Override + public void onResponse(IndexMonitorRequest indexMonitorRequest) { + monitorsToBeAdded.add(indexMonitorRequest); + groupedActionListener.onResponse(indexMonitorRequest); + } + + @Override + public void onFailure(Exception e) { + log.error("Failed to create bucket level monitor request", e); + listener.onFailure(e); + } + }); } } } + } else { + onIndexMonitorRequestCreation( + monitorsToBeUpdated, + monitorsToBeAdded, + rulesById, + detector, + refreshPolicy, + queryFieldNames, + listener + ); } + } + private void onIndexMonitorRequestCreation(List monitorsToBeUpdated, + List monitorsToBeAdded, + List> rulesById, + Detector detector, + RefreshPolicy refreshPolicy, + List queryFieldNames, + ActionListener> listener) { List> docLevelRules = rulesById.stream().filter(it -> !it.getRight().isAggregationRule()).collect( - Collectors.toList()); + Collectors.toList()); // Process doc level monitors if (!docLevelRules.isEmpty()) { if (detector.getDocLevelMonitorId() == null) { - monitorsToBeAdded.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST)); + monitorsToBeAdded.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, Monitor.NO_ID, Method.POST, queryFieldNames)); } else { - monitorsToBeUpdated.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, detector.getDocLevelMonitorId(), Method.PUT)); + monitorsToBeUpdated.add(createDocLevelMonitorRequest(docLevelRules, detector, refreshPolicy, detector.getDocLevelMonitorId(), Method.PUT, queryFieldNames)); } } List monitorIdsToBeDeleted = detector.getRuleIdMonitorIdMap().values().stream().collect(Collectors.toList()); monitorIdsToBeDeleted.removeAll(monitorsToBeUpdated.stream().map(IndexMonitorRequest::getMonitorId).collect( - Collectors.toList())); + Collectors.toList())); updateAlertingMonitors(monitorsToBeAdded, monitorsToBeUpdated, monitorIdsToBeDeleted, refreshPolicy, listener); } @@ -353,11 +465,11 @@ private void updateMonitorFromQueries(String index, List> rul * @param listener Listener that accepts the list of updated monitors if the action was successful */ private void updateAlertingMonitors( - List monitorsToBeAdded, - List monitorsToBeUpdated, - List monitorsToBeDeleted, - RefreshPolicy refreshPolicy, - ActionListener> listener + List monitorsToBeAdded, + List monitorsToBeUpdated, + List monitorsToBeDeleted, + RefreshPolicy refreshPolicy, + ActionListener> listener ) { List updatedMonitors = new ArrayList<>(); @@ -374,25 +486,26 @@ private void updateAlertingMonitors( executeMonitorActionRequest(monitorsToBeUpdated, updateMonitorsStep); // 2. Update existing alerting monitors (based on the common rules) updateMonitorsStep.whenComplete(updateMonitorResponse -> { - if (updateMonitorResponse!=null && !updateMonitorResponse.isEmpty()) { - updatedMonitors.addAll(updateMonitorResponse); - } + if (updateMonitorResponse!=null && !updateMonitorResponse.isEmpty()) { + updatedMonitors.addAll(updateMonitorResponse); + } - StepListener> deleteMonitorStep = new StepListener<>(); - deleteAlertingMonitors(monitorsToBeDeleted, refreshPolicy, deleteMonitorStep); - // 3. Delete alerting monitors (rules that are not provided by the user) - deleteMonitorStep.whenComplete(deleteMonitorResponses -> - // Return list of all updated + newly added monitors - listener.onResponse(updatedMonitors), - // Handle delete monitors (step 3) - listener::onFailure); - }, // Handle update monitor failed (step 2) - listener::onFailure); + StepListener> deleteMonitorStep = new StepListener<>(); + deleteAlertingMonitors(monitorsToBeDeleted, refreshPolicy, deleteMonitorStep); + // 3. Delete alerting monitors (rules that are not provided by the user) + deleteMonitorStep.whenComplete(deleteMonitorResponses -> + // Return list of all updated + newly added monitors + listener.onResponse(updatedMonitors), + // Handle delete monitors (step 3) + listener::onFailure); + }, // Handle update monitor failed (step 2) + listener::onFailure); // Handle add failed (step 1) }, listener::onFailure); } - private IndexMonitorRequest createDocLevelMonitorRequest(List> queries, Detector detector, WriteRequest.RefreshPolicy refreshPolicy, String monitorId, RestRequest.Method restMethod) { + private IndexMonitorRequest createDocLevelMonitorRequest(List> queries, Detector detector, + WriteRequest.RefreshPolicy refreshPolicy, String monitorId, RestRequest.Method restMethod, List queryFieldNames) { List docLevelMonitorInputs = new ArrayList<>(); List docLevelQueries = new ArrayList<>(); @@ -410,7 +523,7 @@ private IndexMonitorRequest createDocLevelMonitorRequest(List tags.add(rule.getCategory()); tags.addAll(rule.getTags().stream().map(Value::getValue).collect(Collectors.toList())); - DocLevelQuery docLevelQuery = new DocLevelQuery(id, name, actualQuery, tags); + DocLevelQuery docLevelQuery = new DocLevelQuery(id, name, actualQuery, tags, queryFieldNames); docLevelQueries.add(docLevelQuery); } DocLevelMonitorInput docLevelMonitorInput = new DocLevelMonitorInput(detector.getName(), detector.getInputs().get(0).getIndices(), docLevelQueries); @@ -430,134 +543,189 @@ private IndexMonitorRequest createDocLevelMonitorRequest(List } Monitor monitor = new Monitor(monitorId, Monitor.NO_VERSION, detector.getName(), detector.getEnabled(), detector.getSchedule(), detector.getLastUpdateTime(), detector.getEnabledTime(), - Monitor.MonitorType.DOC_LEVEL_MONITOR, detector.getUser(), 1, docLevelMonitorInputs, triggers, Map.of(), - new DataSources(detector.getRuleIndex(), - detector.getFindingsIndex(), - detector.getFindingsIndexPattern(), - detector.getAlertsIndex(), - detector.getAlertsHistoryIndex(), - detector.getAlertsHistoryIndexPattern(), - DetectorMonitorConfig.getRuleIndexMappingsByType(detector.getDetectorType()), - true), PLUGIN_OWNER_FIELD); + Monitor.MonitorType.DOC_LEVEL_MONITOR, detector.getUser(), 1, docLevelMonitorInputs, triggers, Map.of(), + new DataSources(detector.getRuleIndex(), + detector.getFindingsIndex(), + detector.getFindingsIndexPattern(), + detector.getAlertsIndex(), + detector.getAlertsHistoryIndex(), + detector.getAlertsHistoryIndexPattern(), + DetectorMonitorConfig.getRuleIndexMappingsByType(detector.getDetectorType()), + true), PLUGIN_OWNER_FIELD); return new IndexMonitorRequest(monitorId, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, refreshPolicy, restMethod, monitor, null); } - private List buildBucketLevelMonitorRequests(List> queries, Detector detector, WriteRequest.RefreshPolicy refreshPolicy, String monitorId, RestRequest.Method restMethod) throws IOException, SigmaError { + private void buildBucketLevelMonitorRequests(List> queries, Detector detector, + WriteRequest.RefreshPolicy refreshPolicy, String monitorId, RestRequest.Method restMethod, + ActionListener> listener) { + log.debug("bucket level monitor request starting"); + log.debug("get rule field mappings request being made"); List ruleCategories = queries.stream().map(Pair::getRight).map(Rule::getCategory).distinct().collect( - Collectors.toList()); + Collectors.toList()); Map queryBackendMap = new HashMap<>(); for(String category: ruleCategories){ - queryBackendMap.put(category, new OSQueryBackend(category, true, true)); + try { + queryBackendMap.put(category, new OSQueryBackend(category, true, true)); + } catch (IOException e) { + logger.error("Failed to create OSQueryBackend from category", e); + listener.onFailure(e); + } } List monitorRequests = new ArrayList<>(); + GroupedActionListener bucketLevelMonitorRequestsListener = new GroupedActionListener<>( + new ActionListener<>() { + @Override + public void onResponse(Collection indexMonitorRequests) { + listener.onResponse(monitorRequests); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, queries.size()); for (Pair query: queries) { Rule rule = query.getRight(); // Creating bucket level monitor per each aggregation rule - if (rule.getAggregationQueries() != null){ - monitorRequests.add(createBucketLevelMonitorRequest( - query.getRight(), - detector, - refreshPolicy, - Monitor.NO_ID, - Method.POST, - queryBackendMap.get(rule.getCategory()))); + if (rule.getAggregationQueries() != null) { + createBucketLevelMonitorRequest( + query.getRight(), + detector, + refreshPolicy, + Monitor.NO_ID, + Method.POST, + queryBackendMap.get(rule.getCategory()), + new ActionListener<>() { + @Override + public void onResponse(IndexMonitorRequest indexMonitorRequest) { + monitorRequests.add(indexMonitorRequest); + bucketLevelMonitorRequestsListener.onResponse(indexMonitorRequest); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to build bucket level monitor requests", e); + bucketLevelMonitorRequestsListener.onFailure(e); + } + }); + } else { + log.debug("Aggregation query is null in rule {}", rule.getId()); + bucketLevelMonitorRequestsListener.onResponse(null); } } - return monitorRequests; } - private IndexMonitorRequest createBucketLevelMonitorRequest( - Rule rule, - Detector detector, - WriteRequest.RefreshPolicy refreshPolicy, - String monitorId, - RestRequest.Method restMethod, - QueryBackend queryBackend - ) throws SigmaError { - - List indices = detector.getInputs().get(0).getIndices(); - - AggregationQueries aggregationQueries = queryBackend.convertAggregation(rule.getAggregationItemsFromRule().get(0)); - - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() - .seqNoAndPrimaryTerm(true) - .version(true) - // Build query string filter - .query(QueryBuilders.queryStringQuery(rule.getQueries().get(0).getValue())) - .aggregation(aggregationQueries.getAggBuilder()); - // input index can also be an index pattern or alias so we have to resolve it to concrete index - String concreteIndex = IndexUtils.getNewIndexByCreationDate( - clusterService.state(), - indexNameExpressionResolver, - indices.get(0) // taking first one is fine because we expect that all indices in list share same mappings - ); + private void createBucketLevelMonitorRequest( + Rule rule, + Detector detector, + WriteRequest.RefreshPolicy refreshPolicy, + String monitorId, + RestRequest.Method restMethod, + QueryBackend queryBackend, + ActionListener listener + ) { + log.debug(":create bucket level monitor response starting"); try { - GetIndexMappingsResponse getIndexMappingsResponse = client.execute( - GetIndexMappingsAction.INSTANCE, - new GetIndexMappingsRequest(concreteIndex)) - .actionGet(); - MappingMetadata mappingMetadata = getIndexMappingsResponse.mappings().get(concreteIndex); - List> pairs = MapperUtils.getAllAliasPathPairs(mappingMetadata); - boolean timeStampAliasPresent = pairs. - stream() - .anyMatch(p -> - TIMESTAMP_FIELD_ALIAS.equals(p.getLeft()) || TIMESTAMP_FIELD_ALIAS.equals(p.getRight())); - if(timeStampAliasPresent) { - BoolQueryBuilder boolQueryBuilder = searchSourceBuilder.query() == null - ? new BoolQueryBuilder() - : QueryBuilders.boolQuery().must(searchSourceBuilder.query()); - RangeQueryBuilder timeRangeFilter = QueryBuilders.rangeQuery(TIMESTAMP_FIELD_ALIAS) - .gt("{{period_end}}||-1h") - .lte("{{period_end}}") - .format("epoch_millis"); - boolQueryBuilder.must(timeRangeFilter); - searchSourceBuilder.query(boolQueryBuilder); - } - } catch (Exception e) { - log.error( - String.format(Locale.getDefault(), - "Unable to verify presence of timestamp alias for index [%s] in detector [%s]. Not setting time range filter for bucket level monitor.", - concreteIndex, detector.getName()), e); - } - - List bucketLevelMonitorInputs = new ArrayList<>(); - bucketLevelMonitorInputs.add(new SearchInput(indices, searchSourceBuilder)); - - List triggers = new ArrayList<>(); - BucketLevelTrigger bucketLevelTrigger = new BucketLevelTrigger(rule.getId(), rule.getTitle(), rule.getLevel(), aggregationQueries.getCondition(), - Collections.emptyList()); - triggers.add(bucketLevelTrigger); - - /** TODO - Think how to use detector trigger - List detectorTriggers = detector.getTriggers(); - for (DetectorTrigger detectorTrigger: detectorTriggers) { - String id = detectorTrigger.getId(); - String name = detectorTrigger.getName(); - String severity = detectorTrigger.getSeverity(); - List actions = detectorTrigger.getActions(); - Script condition = detectorTrigger.convertToCondition(); + List indices = detector.getInputs().get(0).getIndices(); + + AggregationQueries aggregationQueries = queryBackend.convertAggregation(rule.getAggregationItemsFromRule().get(0)); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() + .seqNoAndPrimaryTerm(true) + .version(true) + // Build query string filter + .query(QueryBuilders.queryStringQuery(rule.getQueries().get(0).getValue())) + .aggregation(aggregationQueries.getAggBuilder()); + // input index can also be an index pattern or alias so we have to resolve it to concrete index + String concreteIndex = IndexUtils.getNewIndexByCreationDate( // index variable in method signature can also be an index pattern + clusterService.state(), + indexNameExpressionResolver, + indices.get(0) // taking first one is fine because we expect that all indices in list share same mappings + ); - BucketLevelTrigger bucketLevelTrigger1 = new BucketLevelTrigger(id, name, severity, condition, actions); - triggers.add(bucketLevelTrigger1); - } **/ + client.execute( + GetIndexMappingsAction.INSTANCE, + new GetIndexMappingsRequest(concreteIndex), + new ActionListener<>() { + @Override + public void onResponse(GetIndexMappingsResponse getIndexMappingsResponse) { + MappingMetadata mappingMetadata = getIndexMappingsResponse.mappings().get(concreteIndex); + List> pairs = null; + try { + pairs = MapperUtils.getAllAliasPathPairs(mappingMetadata); + } catch (IOException e) { + logger.debug("Failed to get alias path pairs from mapping metadata", e); + onFailure(e); + } + boolean timeStampAliasPresent = pairs. + stream() + .anyMatch(p -> + TIMESTAMP_FIELD_ALIAS.equals(p.getLeft()) || TIMESTAMP_FIELD_ALIAS.equals(p.getRight())); + if(timeStampAliasPresent) { + BoolQueryBuilder boolQueryBuilder = searchSourceBuilder.query() == null + ? new BoolQueryBuilder() + : QueryBuilders.boolQuery().must(searchSourceBuilder.query()); + RangeQueryBuilder timeRangeFilter = QueryBuilders.rangeQuery(TIMESTAMP_FIELD_ALIAS) + .gt("{{period_end}}||-1h") + .lte("{{period_end}}") + .format("epoch_millis"); + boolQueryBuilder.must(timeRangeFilter); + searchSourceBuilder.query(boolQueryBuilder); + } - Monitor monitor = new Monitor(monitorId, Monitor.NO_VERSION, detector.getName(), detector.getEnabled(), detector.getSchedule(), detector.getLastUpdateTime(), detector.getEnabledTime(), - MonitorType.BUCKET_LEVEL_MONITOR, detector.getUser(), 1, bucketLevelMonitorInputs, triggers, Map.of(), - new DataSources(detector.getRuleIndex(), - detector.getFindingsIndex(), - detector.getFindingsIndexPattern(), - detector.getAlertsIndex(), - detector.getAlertsHistoryIndex(), - detector.getAlertsHistoryIndexPattern(), - DetectorMonitorConfig.getRuleIndexMappingsByType(detector.getDetectorType()), - true), PLUGIN_OWNER_FIELD); + List bucketLevelMonitorInputs = new ArrayList<>(); + bucketLevelMonitorInputs.add(new SearchInput(indices, searchSourceBuilder)); + + List triggers = new ArrayList<>(); + BucketLevelTrigger bucketLevelTrigger = new BucketLevelTrigger(rule.getId(), rule.getTitle(), rule.getLevel(), aggregationQueries.getCondition(), + Collections.emptyList()); + triggers.add(bucketLevelTrigger); + + /** TODO - Think how to use detector trigger + List detectorTriggers = detector.getTriggers(); + for (DetectorTrigger detectorTrigger: detectorTriggers) { + String id = detectorTrigger.getId(); + String name = detectorTrigger.getName(); + String severity = detectorTrigger.getSeverity(); + List actions = detectorTrigger.getActions(); + Script condition = detectorTrigger.convertToCondition(); + + BucketLevelTrigger bucketLevelTrigger1 = new BucketLevelTrigger(id, name, severity, condition, actions); + triggers.add(bucketLevelTrigger1); + } **/ + + Monitor monitor = new Monitor(monitorId, Monitor.NO_VERSION, detector.getName(), detector.getEnabled(), detector.getSchedule(), detector.getLastUpdateTime(), detector.getEnabledTime(), + MonitorType.BUCKET_LEVEL_MONITOR, detector.getUser(), 1, bucketLevelMonitorInputs, triggers, Map.of(), + new DataSources(detector.getRuleIndex(), + detector.getFindingsIndex(), + detector.getFindingsIndexPattern(), + detector.getAlertsIndex(), + detector.getAlertsHistoryIndex(), + detector.getAlertsHistoryIndexPattern(), + DetectorMonitorConfig.getRuleIndexMappingsByType(detector.getDetectorType()), + true), PLUGIN_OWNER_FIELD); + + listener.onResponse(new IndexMonitorRequest(monitorId, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, refreshPolicy, restMethod, monitor, null)); + } - return new IndexMonitorRequest(monitorId, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM, refreshPolicy, restMethod, monitor, null); + @Override + public void onFailure(Exception e) { + log.error( + String.format(Locale.getDefault(), + "Unable to verify presence of timestamp alias for index [%s] in detector [%s]. Not setting time range filter for bucket level monitor.", + concreteIndex, detector.getName()), e); + listener.onFailure(e); + } + }); + } catch (SigmaError e) { + log.error("Failed to create bucket level monitor request", e); + listener.onFailure(e); + } } /** @@ -566,8 +734,8 @@ private IndexMonitorRequest createBucketLevelMonitorRequest( * @param listener actionListener for handling updating/creating monitors */ public void executeMonitorActionRequest( - List indexMonitors, - ActionListener> listener) { + List indexMonitors, + ActionListener> listener) { // In the case of not provided monitors, just return empty list if (indexMonitors == null || indexMonitors.isEmpty()) { @@ -576,16 +744,16 @@ public void executeMonitorActionRequest( } GroupedActionListener monitorResponseListener = new GroupedActionListener( - new ActionListener>() { - @Override - public void onResponse(Collection indexMonitorResponse) { - listener.onResponse(indexMonitorResponse.stream().collect(Collectors.toList())); - } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }, indexMonitors.size()); + new ActionListener>() { + @Override + public void onResponse(Collection indexMonitorResponse) { + listener.onResponse(indexMonitorResponse.stream().collect(Collectors.toList())); + } + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, indexMonitors.size()); // Persist monitors sequentially for (IndexMonitorRequest req: indexMonitors) { @@ -674,17 +842,22 @@ class AsyncIndexDetectorsAction { } void start() { + log.debug("stash context"); try { TransportIndexDetectorAction.this.threadPool.getThreadContext().stashContext(); + log.debug("log type check : {}", request.getDetector().getDetectorType()); if (!detectorIndices.detectorIndexExists()) { + log.debug("detector index creation"); detectorIndices.initDetectorIndex(new ActionListener<>() { @Override public void onResponse(CreateIndexResponse response) { + log.debug("detector index created in {}"); try { onCreateMappingsResponse(response); prepareDetectorIndexing(); } catch (IOException e) { + log.debug("detector index creation failed", e); onFailures(e); } } @@ -695,16 +868,19 @@ public void onFailure(Exception e) { } }); } else if (!IndexUtils.detectorIndexUpdated) { + log.debug("detector index update mapping"); IndexUtils.updateIndexMapping( Detector.DETECTORS_INDEX, DetectorIndices.detectorMappings(), clusterService.state(), client.admin().indices(), new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse response) { + log.debug("detector index mapping updated"); onUpdateMappingsResponse(response); try { prepareDetectorIndexing(); } catch (IOException e) { + log.debug("detector index mapping FAILED updation", e); onFailures(e); } } @@ -749,24 +925,29 @@ void createDetector() { if (!detector.getInputs().isEmpty()) { try { + log.debug("init rule index template"); ruleTopicIndices.initRuleTopicIndexTemplate(new ActionListener<>() { @Override public void onResponse(AcknowledgedResponse acknowledgedResponse) { + log.debug("init rule index template ack"); initRuleIndexAndImportRules(request, new ActionListener<>() { @Override public void onResponse(List monitorResponses) { + log.debug("monitors indexed"); request.getDetector().setMonitorIds(getMonitorIds(monitorResponses)); request.getDetector().setRuleIdMonitorIdMap(mapMonitorIds(monitorResponses)); try { indexDetector(); - } catch (IOException e) { + } catch (Exception e) { + logger.debug("create detector failed", e); onFailures(e); } } @Override public void onFailure(Exception e) { + logger.debug("import rules failed", e); onFailures(e); } }); @@ -774,10 +955,12 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { + logger.debug("init rules index failed", e); onFailures(e); } }); - } catch (IOException e) { + } catch (Exception e) { + logger.debug("init rules index failed", e); onFailures(e); } } @@ -893,11 +1076,13 @@ public void initRuleIndexAndImportRules(IndexDetectorRequest request, ActionList new ActionListener<>() { @Override public void onResponse(CreateIndexResponse response) { + log.debug("prepackaged rule index created"); ruleIndices.onCreateMappingsResponse(response, true); ruleIndices.importRules(RefreshPolicy.IMMEDIATE, indexTimeout, new ActionListener<>() { @Override public void onResponse(BulkResponse response) { + log.debug("rules imported"); if (!response.hasFailures()) { importRules(request, listener); } else { @@ -907,6 +1092,7 @@ public void onResponse(BulkResponse response) { @Override public void onFailure(Exception e) { + log.debug("failed to import rules", e); onFailures(e); } }); @@ -1018,12 +1204,14 @@ public void importRules(IndexDetectorRequest request, ActionListener() { @Override public void onResponse(SearchResponse response) { if (response.isTimedOut()) { onFailures(new OpenSearchStatusException(response.toString(), RestStatus.REQUEST_TIMEOUT)); } + logger.debug("prepackaged rules fetch success"); SearchHits hits = response.getHits(); List> queries = new ArrayList<>(); @@ -1046,13 +1234,10 @@ public void onResponse(SearchResponse response) { } else if (detectorInput.getCustomRules().size() > 0) { onFailures(new OpenSearchStatusException("Custom Rule Index not found", RestStatus.NOT_FOUND)); } else { - if (request.getMethod() == RestRequest.Method.POST) { - createMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy()); - } else if (request.getMethod() == RestRequest.Method.PUT) { - updateMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy()); - } + resolveRuleFieldNamesAndUpsertMonitorFromQueries(queries, detector, logIndex, listener); } - } catch (IOException | SigmaError e) { + } catch (Exception e) { + logger.debug("failed to fetch prepackaged rules", e); onFailures(e); } } @@ -1064,6 +1249,60 @@ public void onFailure(Exception e) { }); } + private void resolveRuleFieldNamesAndUpsertMonitorFromQueries(List> queries, Detector detector, String logIndex, ActionListener> listener) { + logger.error("PERF_DEBUG_SAP: Fetching alias path pairs to construct rule_field_names"); + long start = System.currentTimeMillis(); + Set ruleFieldNames = new HashSet<>(); + + for (Pair query : queries) { + List queryFieldNames = query.getValue().getQueryFieldNames().stream().map(Value::getValue).collect(Collectors.toList()); + ruleFieldNames.addAll(queryFieldNames); + } + client.execute(GetIndexMappingsAction.INSTANCE, new GetIndexMappingsRequest(logIndex), new ActionListener<>() { + @Override + public void onResponse(GetIndexMappingsResponse getMappingsViewResponse) { + try { + List> aliasPathPairs; + + aliasPathPairs = MapperUtils.getAllAliasPathPairs(getMappingsViewResponse.getMappings().get(logIndex)); + for (Pair aliasPathPair : aliasPathPairs) { + if (ruleFieldNames.contains(aliasPathPair.getLeft())) { + ruleFieldNames.remove(aliasPathPair.getLeft()); + ruleFieldNames.add(aliasPathPair.getRight()); + } + } + long took = System.currentTimeMillis() - start; + log.debug("completed collecting rule_field_names in {} millis", took); + } catch (Exception e) { + logger.error("Failure in parsing rule field names/aliases while " + + detector.getId() == null ? "creating" : "updating" + + " detector. Not optimizing detector queries with relevant fields", e); + ruleFieldNames.clear(); + } + try { + upsertMonitorQueries(queries, detector, listener, ruleFieldNames, logIndex); + } catch (Exception e) { + log.error("Caught exception upserting monitor queries", e); + listener.onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + log.error("Failed to fetch mappings view response for log index " + logIndex, e); + listener.onFailure(e); + } + }); + } + + private void upsertMonitorQueries(List> queries, Detector detector, ActionListener> listener, Set ruleFieldNames, String logIndex) throws SigmaError, IOException { + if (request.getMethod() == Method.POST) { + createMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy(), new ArrayList<>(ruleFieldNames)); + } else if (request.getMethod() == Method.PUT) { + updateMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy(), new ArrayList<>(ruleFieldNames)); + } + } + @SuppressWarnings("unchecked") public void importCustomRules(Detector detector, DetectorInput detectorInput, List> queries, ActionListener> listener) { final String logIndex = detectorInput.getIndices().get(0); @@ -1077,6 +1316,7 @@ public void importCustomRules(Detector detector, DetectorInput detectorInput, Li .query(queryBuilder) .size(10000)); + logger.debug("importing custom rules"); client.search(searchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse response) { @@ -1084,6 +1324,7 @@ public void onResponse(SearchResponse response) { onFailures(new OpenSearchStatusException(response.toString(), RestStatus.REQUEST_TIMEOUT)); } + logger.debug("custom rules fetch successful"); SearchHits hits = response.getHits(); try { @@ -1099,12 +1340,8 @@ public void onResponse(SearchResponse response) { queries.add(Pair.of(id, rule)); } - if (request.getMethod() == RestRequest.Method.POST) { - createMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy()); - } else if (request.getMethod() == RestRequest.Method.PUT) { - updateMonitorFromQueries(logIndex, queries, detector, listener, request.getRefreshPolicy()); - } - } catch (IOException | SigmaError ex) { + resolveRuleFieldNamesAndUpsertMonitorFromQueries(queries, detector, logIndex, listener); + } catch (Exception ex) { onFailures(ex); } } @@ -1131,9 +1368,11 @@ public void indexDetector() throws IOException { .timeout(indexTimeout); } + log.debug("indexing detector"); client.index(indexRequest, new ActionListener<>() { @Override public void onResponse(IndexResponse response) { + log.debug("detector indexed success."); Detector responseDetector = request.getDetector(); responseDetector.setId(response.getId()); onOperation(response, responseDetector); @@ -1174,7 +1413,7 @@ private void finishHim(Detector detector, Exception t) { private List getMonitorIds(List monitorResponses) { return monitorResponses.stream().map(IndexMonitorResponse::getId).collect( - Collectors.toList()); + Collectors.toList()); } /** @@ -1186,17 +1425,17 @@ private List getMonitorIds(List monitorResponses) private Map mapMonitorIds(List monitorResponses) { return monitorResponses.stream().collect( Collectors.toMap( - // In the case of bucket level monitors rule id is trigger id - it -> { - if (MonitorType.BUCKET_LEVEL_MONITOR == it.getMonitor().getMonitorType()) { - return it.getMonitor().getTriggers().get(0).getId(); - } else { - return Detector.DOC_LEVEL_MONITOR; - } - }, - IndexMonitorResponse::getId + // In the case of bucket level monitors rule id is trigger id + it -> { + if (MonitorType.BUCKET_LEVEL_MONITOR == it.getMonitor().getMonitorType()) { + return it.getMonitor().getTriggers().get(0).getId(); + } else { + return Detector.DOC_LEVEL_MONITOR; + } + }, + IndexMonitorResponse::getId ) - ); + ); } } diff --git a/src/test/java/org/opensearch/securityanalytics/TestHelpers.java b/src/test/java/org/opensearch/securityanalytics/TestHelpers.java index 1937a4df5..00e87e8d6 100644 --- a/src/test/java/org/opensearch/securityanalytics/TestHelpers.java +++ b/src/test/java/org/opensearch/securityanalytics/TestHelpers.java @@ -186,6 +186,38 @@ public static String randomRule() { "level: high"; } + + + public static String randomNullRule() { + return "title: null field\n" + + "id: 5f92fff9-82e2-48eb-8fc1-8b133556a551\n" + + "description: Detects remote RPC calls to possibly abuse remote encryption service via MS-EFSR\n" + + "references:\n" + + " - https://attack.mitre.org/tactics/TA0008/\n" + + " - https://msrc.microsoft.com/update-guide/vulnerability/CVE-2021-36942\n" + + " - https://github.com/jsecurity101/MSRPC-to-ATTACK/blob/main/documents/MS-EFSR.md\n" + + " - https://github.com/zeronetworks/rpcfirewall\n" + + " - https://zeronetworks.com/blog/stopping_lateral_movement_via_the_rpc_firewall/\n" + + "tags:\n" + + " - attack.defense_evasion\n" + + "status: experimental\n" + + "author: Sagie Dulce, Dekel Paz\n" + + "date: 2022/01/01\n" + + "modified: 2022/01/01\n" + + "logsource:\n" + + " product: rpc_firewall\n" + + " category: application\n" + + " definition: 'Requirements: install and apply the RPC Firew all to all processes with \"audit:true action:block uuid:df1941c5-fe89-4e79-bf10-463657acf44d or c681d488-d850-11d0-8c52-00c04fd90f7e'\n" + + "detection:\n" + + " selection:\n" + + " EventID: 22\n" + + " RecordNumber: null\n" + + " condition: selection\n" + + "falsepositives:\n" + + " - Legitimate usage of remote file encryption\n" + + "level: high"; + } + public static String randomRuleForMappingView(String field) { return "title: Remote Encrypting File System Abuse\n" + "id: 5f92fff9-82e2-48eb-8fc1-8b133556a551\n" + @@ -1460,6 +1492,84 @@ public static String randomDocOnlyNumericAndText(int severity, int version, Stri return String.format(Locale.ROOT, doc, severity, version, opCode); } + //Add IPs in HostName field. + public static String randomDocWithIpIoc(int severity, int version, String ioc) { + String doc = "{\n" + + "\"EventTime\":\"2020-02-04T14:59:39.343541+00:00\",\n" + + "\"HostName\":\"%s\",\n" + + "\"Keywords\":\"9223372036854775808\",\n" + + "\"SeverityValue\":%s,\n" + + "\"Severity\":\"INFO\",\n" + + "\"EventID\":22,\n" + + "\"SourceName\":\"Microsoft-Windows-Sysmon\",\n" + + "\"ProviderGuid\":\"{5770385F-C22A-43E0-BF4C-06F5698FFBD9}\",\n" + + "\"Version\":%s,\n" + + "\"TaskValue\":22,\n" + + "\"OpcodeValue\":0,\n" + + "\"RecordNumber\":9532,\n" + + "\"ExecutionProcessID\":1996,\n" + + "\"ExecutionThreadID\":2616,\n" + + "\"Channel\":\"Microsoft-Windows-Sysmon/Operational\",\n" + + "\"Domain\":\"NT AUTHORITY\",\n" + + "\"AccountName\":\"SYSTEM\",\n" + + "\"UserID\":\"S-1-5-18\",\n" + + "\"AccountType\":\"User\",\n" + + "\"Message\":\"Dns query:\\r\\nRuleName: \\r\\nUtcTime: 2020-02-04 14:59:38.349\\r\\nProcessGuid: {b3c285a4-3cda-5dc0-0000-001077270b00}\\r\\nProcessId: 1904\\r\\nQueryName: EC2AMAZ-EPO7HKA\\r\\nQueryStatus: 0\\r\\nQueryResults: 172.31.46.38;\\r\\nImage: C:\\\\Program Files\\\\nxlog\\\\nxlog.exe\",\n" + + "\"Category\":\"Dns query (rule: DnsQuery)\",\n" + + "\"Opcode\":\"blahblah\",\n" + + "\"UtcTime\":\"2020-02-04 14:59:38.349\",\n" + + "\"ProcessGuid\":\"{b3c285a4-3cda-5dc0-0000-001077270b00}\",\n" + + "\"ProcessId\":\"1904\",\"QueryName\":\"EC2AMAZ-EPO7HKA\",\"QueryStatus\":\"0\",\n" + + "\"QueryResults\":\"172.31.46.38;\",\n" + + "\"Image\":\"C:\\\\Program Files\\\\nxlog\\\\regsvr32.exe\",\n" + + "\"EventReceivedTime\":\"2020-02-04T14:59:40.780905+00:00\",\n" + + "\"SourceModuleName\":\"in\",\n" + + "\"SourceModuleType\":\"im_msvistalog\",\n" + + "\"CommandLine\": \"eachtest\",\n" + + "\"Initiated\": \"true\"\n" + + "}"; + return String.format(Locale.ROOT, doc, ioc, severity, version); + + } + + public static String randomDocWithNullField() { + return "{\n" + + "\"@timestamp\":\"2020-02-04T14:59:39.343541+00:00\",\n" + + "\"EventTime\":\"2020-02-04T14:59:39.343541+00:00\",\n" + + "\"HostName\":\"EC2AMAZ-EPO7HKA\",\n" + + "\"Keywords\":\"9223372036854775808\",\n" + + "\"SeverityValue\":2,\n" + + "\"Severity\":\"INFO\",\n" + + "\"EventID\":22,\n" + + "\"SourceName\":\"Microsoft-Windows-Sysmon\",\n" + + "\"ProviderGuid\":\"{5770385F-C22A-43E0-BF4C-06F5698FFBD9}\",\n" + + "\"Version\":5,\n" + + "\"TaskValue\":22,\n" + + "\"OpcodeValue\":0,\n" + + "\"RecordNumber\":null,\n" + + "\"ExecutionProcessID\":1996,\n" + + "\"ExecutionThreadID\":2616,\n" + + "\"Channel\":\"Microsoft-Windows-Sysmon/Operational\",\n" + + "\"Domain\":\"NTAUTHORITY\",\n" + + "\"AccountName\":\"SYSTEM\",\n" + + "\"UserID\":\"S-1-5-18\",\n" + + "\"AccountType\":\"User\",\n" + + "\"Message\":\"Dns query:\\r\\nRuleName: \\r\\nUtcTime: 2020-02-04 14:59:38.349\\r\\nProcessGuid: {b3c285a4-3cda-5dc0-0000-001077270b00}\\r\\nProcessId: 1904\\r\\nQueryName: EC2AMAZ-EPO7HKA\\r\\nQueryStatus: 0\\r\\nQueryResults: 172.31.46.38;\\r\\nImage: C:\\\\Program Files\\\\nxlog\\\\nxlog.exe\",\n" + + "\"Category\":\"Dns query (rule: DnsQuery)\",\n" + + "\"Opcode\":\"Info\",\n" + + "\"UtcTime\":\"2020-02-04 14:59:38.349\",\n" + + "\"ProcessGuid\":\"{b3c285a4-3cda-5dc0-0000-001077270b00}\",\n" + + "\"ProcessId\":\"1904\",\"QueryName\":\"EC2AMAZ-EPO7HKA\",\"QueryStatus\":\"0\",\n" + + "\"QueryResults\":\"172.31.46.38;\",\n" + + "\"Image\":\"C:\\\\Program Files\\\\nxlog\\\\regsvr32.exe\",\n" + + "\"EventReceivedTime\":\"2020-02-04T14:59:40.780905+00:00\",\n" + + "\"SourceModuleName\":\"in\",\n" + + "\"SourceModuleType\":\"im_msvistalog\",\n" + + "\"CommandLine\": \"eachtest\",\n" + + "\"Initiated\": \"true\"\n" + + "}"; + } + public static String randomDoc() { return "{\n" + "\"@timestamp\":\"2020-02-04T14:59:39.343541+00:00\",\n" + diff --git a/src/test/java/org/opensearch/securityanalytics/mapper/MappingsTraverserTests.java b/src/test/java/org/opensearch/securityanalytics/mapper/MappingsTraverserTests.java index e25b7084e..811c65d75 100644 --- a/src/test/java/org/opensearch/securityanalytics/mapper/MappingsTraverserTests.java +++ b/src/test/java/org/opensearch/securityanalytics/mapper/MappingsTraverserTests.java @@ -132,7 +132,7 @@ public void onError(String error) { public void testTraverseInvalidMappings() { // 1. Parse mappings from MappingMetadata - ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); + Map mappings = new HashMap<>(); Map m = new HashMap<>(); m.put("netflow.event_data.SourceAddress", Map.of("type", "ip")); m.put("netflow.event_data.SourcePort", Map.of("type", "integer")); diff --git a/src/test/java/org/opensearch/securityanalytics/model/WriteableTests.java b/src/test/java/org/opensearch/securityanalytics/model/WriteableTests.java index 2326b541d..01f1cea70 100644 --- a/src/test/java/org/opensearch/securityanalytics/model/WriteableTests.java +++ b/src/test/java/org/opensearch/securityanalytics/model/WriteableTests.java @@ -13,15 +13,42 @@ import java.io.IOException; import java.util.List; +import static org.opensearch.securityanalytics.TestHelpers.parser; import static org.opensearch.securityanalytics.TestHelpers.randomDetector; import static org.opensearch.securityanalytics.TestHelpers.randomUser; import static org.opensearch.securityanalytics.TestHelpers.randomUserEmpty; +import static org.opensearch.securityanalytics.TestHelpers.toJsonStringWithUser; public class WriteableTests extends OpenSearchTestCase { public void testDetectorAsStream() throws IOException { Detector detector = randomDetector(List.of()); detector.setInputs(List.of(new DetectorInput("", List.of(), List.of(), List.of()))); + logger.error(toJsonStringWithUser(detector)); + BytesStreamOutput out = new BytesStreamOutput(); + detector.writeTo(out); + StreamInput sin = StreamInput.wrap(out.bytes().toBytesRef().bytes); + Detector newDetector = new Detector(sin); + Assert.assertEquals("Round tripping Detector doesn't work", detector, newDetector); + } + + public void testDetector() throws IOException { // an edge case of detector serialization that failed testDetectorAsAStream() intermittently + String detectorString = "{\"type\":\"detector\",\"name\":\"MczAuRCrve\",\"detector_type\":\"test_windows\"," + + "\"user\":{\"name\":\"QhKrfthgxw\",\"backend_roles\":[\"uYvGLCPhfX\",\"fOLkcRxMWR\"],\"roles\"" + + ":[\"YuucNpVzTm\",\"all_access\"],\"custom_attribute_names\":[\"test_attr=test\"]," + + "\"user_requested_tenant\":null},\"threat_intel_enabled\":false,\"enabled\":false,\"enabled_time\"" + + ":null,\"schedule\":{\"period\":{\"interval\":5,\"unit\":\"MINUTES\"}},\"inputs\":[{\"detector_input\"" + + ":{\"description\":\"\",\"indices\":[],\"custom_rules\":[],\"pre_packaged_rules\":[]}}],\"triggers\"" + + ":[{\"id\":\"SiWfaosBBiNA8if0E1bC\",\"name\":\"windows-trigger\",\"severity\":\"1\",\"types\"" + + ":[\"test_windows\"],\"ids\":[\"QuarksPwDump Clearing Access History\"],\"sev_levels\":[\"high\"]," + + "\"tags\":[\"T0008\"],\"actions\":[],\"detection_types\":[\"rules\"]}],\"last_update_time\":" + + "1698300892093,\"monitor_id\":[\"\"],\"workflow_ids\":[],\"bucket_monitor_id_rule_id\"" + + ":{},\"rule_topic_index\":\"\",\"alert_index\":\"\",\"alert_history_index\":\"\"," + + "\"alert_history_index_pattern\":\"\",\"findings_index\":\"\",\"findings_index_pattern\":\"\"}"; + Detector detector = Detector.parse(parser(detectorString), null, null); +// Detector detector = randomDetector(List.of()); +// detector.setInputs(List.of(new DetectorInput("", List.of(), List.of(), List.of()))); +// logger.error(toJsonStringWithUser(detector)); BytesStreamOutput out = new BytesStreamOutput(); detector.writeTo(out); StreamInput sin = StreamInput.wrap(out.bytes().toBytesRef().bytes); diff --git a/src/test/java/org/opensearch/securityanalytics/resthandler/DetectorMonitorRestApiIT.java b/src/test/java/org/opensearch/securityanalytics/resthandler/DetectorMonitorRestApiIT.java index d5d86a890..50ff6fb14 100644 --- a/src/test/java/org/opensearch/securityanalytics/resthandler/DetectorMonitorRestApiIT.java +++ b/src/test/java/org/opensearch/securityanalytics/resthandler/DetectorMonitorRestApiIT.java @@ -61,10 +61,10 @@ public void testRemoveDocLevelRuleAddAggregationRules_verifyFindings_success() t Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -78,11 +78,11 @@ public void testRemoveDocLevelRuleAddAggregationRules_verifyFindings_success() t assertEquals("Create detector failed", RestStatus.CREATED, restStatus(createResponse)); String request = "{\n" + - " \"query\" : {\n" + - " \"match_all\":{\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match_all\":{\n" + + " }\n" + + " }\n" + + "}"; SearchResponse response = executeSearchAndGetResponse(DetectorMonitorConfig.getRuleIndex(randomDetectorType()), request, true); assertEquals(5, response.getHits().getTotalHits().value); @@ -90,12 +90,12 @@ public void testRemoveDocLevelRuleAddAggregationRules_verifyFindings_success() t Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; // Verify that one document level monitor is created List hits = executeSearch(Detector.DETECTORS_INDEX, request); @@ -115,7 +115,7 @@ public void testRemoveDocLevelRuleAddAggregationRules_verifyFindings_success() t String avgTermRuleId = createRule(randomAggregationRule( "avg", " > 1")); // Update detector and empty doc level rules so detector contains only one aggregation rule DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), List.of(new DetectorRule(sumRuleId), new DetectorRule(avgTermRuleId)), - Collections.emptyList()); + Collections.emptyList()); Detector updatedDetector = randomDetectorWithInputs(List.of(input)); Response updateResponse = makeRequest(client(), "PUT", SecurityAnalyticsPlugin.DETECTOR_BASE_URI + "/" + detectorId, Collections.emptyMap(), toHttpEntity(updatedDetector)); @@ -155,7 +155,7 @@ public void testRemoveDocLevelRuleAddAggregationRules_verifyFindings_success() t List> findings = (List)getFindingsBody.get("findings"); for(Map finding : findings) { Set aggRulesFinding = ((List>)finding.get("queries")).stream().map(it -> it.get("id").toString()).collect( - Collectors.toSet()); + Collectors.toSet()); // Bucket monitor finding will have one rule String aggRuleId = aggRulesFinding.iterator().next(); @@ -187,10 +187,10 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -200,7 +200,7 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw String maxRuleId = createRule(randomAggregationRule( "max", " > 2")); List detectorRules = List.of(new DetectorRule(maxRuleId)); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - Collections.emptyList()); + Collections.emptyList()); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); @@ -209,22 +209,22 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match_all\":{\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match_all\":{\n" + + " }\n" + + " }\n" + + "}"; SearchResponse response = executeSearchAndGetResponse(Rule.CUSTOM_RULES_INDEX, request, true); assertEquals(1, response.getHits().getTotalHits().value); request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; // Verify that one bucket level monitor is created List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -239,7 +239,7 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw String randomDocRuleId = createRule(randomRule()); List prepackagedRules = getRandomPrePackagedRules(); input = new DetectorInput("windows detector for security analytics", List.of("windows"), List.of(new DetectorRule(randomDocRuleId)), - prepackagedRules.stream().map(DetectorRule::new).collect(Collectors.toList())); + prepackagedRules.stream().map(DetectorRule::new).collect(Collectors.toList())); Detector updatedDetector = randomDetectorWithInputs(List.of(input)); Response updateResponse = makeRequest(client(), "PUT", SecurityAnalyticsPlugin.DETECTOR_BASE_URI + "/" + detectorId, Collections.emptyMap(), toHttpEntity(updatedDetector)); @@ -264,11 +264,11 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw // Verify rules request = "{\n" + - " \"query\" : {\n" + - " \"match_all\":{\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match_all\":{\n" + + " }\n" + + " }\n" + + "}"; response = executeSearchAndGetResponse(DetectorMonitorConfig.getRuleIndex(randomDetectorType()), request, true); assertEquals(6, response.getHits().getTotalHits().value); @@ -299,7 +299,7 @@ public void testReplaceAggregationRuleWithDocRule_verifyFindings_success() throw List foundDocIds = new ArrayList<>(); for(Map finding : findings) { Set aggRulesFinding = ((List>)finding.get("queries")).stream().map(it -> it.get("id").toString()).collect( - Collectors.toSet()); + Collectors.toSet()); assertTrue(docRuleIds.containsAll(aggRulesFinding)); @@ -325,10 +325,10 @@ public void testRemoveAllRulesAndUpdateDetector_success() throws IOException { Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -343,22 +343,22 @@ public void testRemoveAllRulesAndUpdateDetector_success() throws IOException { Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match_all\":{\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match_all\":{\n" + + " }\n" + + " }\n" + + "}"; SearchResponse response = executeSearchAndGetResponse(DetectorMonitorConfig.getRuleIndex(randomDetectorType()), request, true); assertEquals(randomPrepackagedRules.size(), response.getHits().getTotalHits().value); request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; // Verify that one doc level monitor is created List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -403,10 +403,10 @@ public void testAddNewAggregationRule_verifyFindings_success() throws IOExceptio Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -416,7 +416,7 @@ public void testAddNewAggregationRule_verifyFindings_success() throws IOExceptio String sumRuleId = createRule(randomAggregationRule("sum", " > 1")); List detectorRules = List.of(new DetectorRule(sumRuleId)); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - Collections.emptyList()); + Collections.emptyList()); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); @@ -425,12 +425,12 @@ public void testAddNewAggregationRule_verifyFindings_success() throws IOExceptio Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -442,7 +442,7 @@ public void testAddNewAggregationRule_verifyFindings_success() throws IOExceptio // Test adding the new max monitor and updating the existing sum monitor String maxRuleId = createRule(randomAggregationRule("max", " > 3")); DetectorInput newInput = new DetectorInput("windows detector for security analytics", List.of("windows"), List.of(new DetectorRule(maxRuleId), new DetectorRule(sumRuleId)), - Collections.emptyList()); + Collections.emptyList()); Detector updatedDetector = randomDetectorWithInputs(List.of(newInput)); Response updateResponse = makeRequest(client(), "PUT", SecurityAnalyticsPlugin.DETECTOR_BASE_URI + "/" + detectorId, Collections.emptyMap(), toHttpEntity(updatedDetector)); @@ -480,7 +480,7 @@ public void testAddNewAggregationRule_verifyFindings_success() throws IOExceptio Map finding = ((List) getFindingsBody.get("findings")).get(0); Set aggRulesFinding = ((List>) finding.get("queries")).stream().map(it -> it.get("id").toString()).collect( - Collectors.toSet()); + Collectors.toSet()); assertEquals(sumRuleId, aggRulesFinding.iterator().next()); @@ -510,10 +510,10 @@ public void testDeleteAggregationRule_verifyFindings_success() throws IOExceptio Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -528,7 +528,7 @@ public void testDeleteAggregationRule_verifyFindings_success() throws IOExceptio List detectorRules = aggRuleIds.stream().map(DetectorRule::new).collect(Collectors.toList()); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - Collections.emptyList()); + Collections.emptyList()); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); @@ -537,12 +537,12 @@ public void testDeleteAggregationRule_verifyFindings_success() throws IOExceptio Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -553,7 +553,7 @@ public void testDeleteAggregationRule_verifyFindings_success() throws IOExceptio // Test deleting the aggregation rule DetectorInput newInput = new DetectorInput("windows detector for security analytics", List.of("windows"), List.of(new DetectorRule(avgRuleId)), - Collections.emptyList()); + Collections.emptyList()); detector = randomDetectorWithInputs(List.of(newInput)); Response updateResponse = makeRequest(client(), "PUT", SecurityAnalyticsPlugin.DETECTOR_BASE_URI + "/" + detectorId, Collections.emptyMap(), toHttpEntity(detector)); @@ -595,7 +595,7 @@ public void testDeleteAggregationRule_verifyFindings_success() throws IOExceptio Map finding = ((List) getFindingsBody.get("findings")).get(0); Set aggRulesFinding = ((List>) finding.get("queries")).stream().map(it -> it.get("id").toString()).collect( - Collectors.toSet()); + Collectors.toSet()); assertEquals(avgRuleId, aggRulesFinding.iterator().next()); @@ -625,10 +625,10 @@ public void testReplaceAggregationRule_verifyFindings_success() throws IOExcepti Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -644,7 +644,7 @@ public void testReplaceAggregationRule_verifyFindings_success() throws IOExcepti List prepackagedDocRules = getRandomPrePackagedRules(); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - prepackagedDocRules.stream().map(DetectorRule::new).collect(Collectors.toList())); + prepackagedDocRules.stream().map(DetectorRule::new).collect(Collectors.toList())); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); @@ -653,12 +653,12 @@ public void testReplaceAggregationRule_verifyFindings_success() throws IOExcepti Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -669,8 +669,8 @@ public void testReplaceAggregationRule_verifyFindings_success() throws IOExcepti String maxRuleId = createRule(randomAggregationRule("max", " > 2")); DetectorInput newInput = new DetectorInput("windows detector for security analytics", List.of("windows"), - List.of(new DetectorRule(avgRuleId), new DetectorRule(maxRuleId)), - getRandomPrePackagedRules().stream().map(DetectorRule::new).collect(Collectors.toList())); + List.of(new DetectorRule(avgRuleId), new DetectorRule(maxRuleId)), + getRandomPrePackagedRules().stream().map(DetectorRule::new).collect(Collectors.toList())); detector = randomDetectorWithInputs(List.of(newInput)); createResponse = makeRequest(client(), "PUT", SecurityAnalyticsPlugin.DETECTOR_BASE_URI + "/" + detectorId, Collections.emptyMap(), toHttpEntity(detector)); @@ -743,10 +743,10 @@ public void testMinAggregationRule_findingSuccess() throws IOException { Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -757,7 +757,7 @@ public void testMinAggregationRule_findingSuccess() throws IOException { aggRuleIds.add(createRule(randomAggregationRule("min", " > 3", testOpCode))); List detectorRules = aggRuleIds.stream().map(DetectorRule::new).collect(Collectors.toList()); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - Collections.emptyList()); + Collections.emptyList()); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); @@ -766,12 +766,12 @@ public void testMinAggregationRule_findingSuccess() throws IOException { Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); String request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); @@ -830,10 +830,10 @@ public void testMultipleAggregationAndDocRules_findingSuccess() throws IOExcepti Request createMappingRequest = new Request("POST", SecurityAnalyticsPlugin.MAPPER_BASE_URI); // both req params and req body are supported createMappingRequest.setJsonEntity( - "{ \"index_name\":\"" + index + "\"," + - " \"rule_topic\":\"" + randomDetectorType() + "\", " + - " \"partial\":true" + - "}" + "{ \"index_name\":\"" + index + "\"," + + " \"rule_topic\":\"" + randomDetectorType() + "\", " + + " \"partial\":true" + + "}" ); Response createMappingResponse = client().performRequest(createMappingRequest); @@ -854,20 +854,20 @@ public void testMultipleAggregationAndDocRules_findingSuccess() throws IOExcepti List prepackagedRules = getRandomPrePackagedRules(); List detectorRules = List.of(new DetectorRule(sumRuleId), new DetectorRule(maxRuleId), new DetectorRule(minRuleId), - new DetectorRule(avgRuleId), new DetectorRule(cntRuleId), new DetectorRule(randomDocRuleId)); + new DetectorRule(avgRuleId), new DetectorRule(cntRuleId), new DetectorRule(randomDocRuleId)); DetectorInput input = new DetectorInput("windows detector for security analytics", List.of("windows"), detectorRules, - prepackagedRules.stream().map(DetectorRule::new).collect(Collectors.toList())); + prepackagedRules.stream().map(DetectorRule::new).collect(Collectors.toList())); Detector detector = randomDetectorWithInputs(List.of(input)); Response createResponse = makeRequest(client(), "POST", SecurityAnalyticsPlugin.DETECTOR_BASE_URI, Collections.emptyMap(), toHttpEntity(detector)); String request = "{\n" + - " \"query\" : {\n" + - " \"match_all\":{\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match_all\":{\n" + + " }\n" + + " }\n" + + "}"; SearchResponse response = executeSearchAndGetResponse(DetectorMonitorConfig.getRuleIndex(randomDetectorType()), request, true); assertEquals(6, response.getHits().getTotalHits().value); @@ -876,12 +876,12 @@ public void testMultipleAggregationAndDocRules_findingSuccess() throws IOExcepti Map responseBody = asMap(createResponse); String detectorId = responseBody.get("_id").toString(); request = "{\n" + - " \"query\" : {\n" + - " \"match\":{\n" + - " \"_id\": \"" + detectorId + "\"\n" + - " }\n" + - " }\n" + - "}"; + " \"query\" : {\n" + + " \"match\":{\n" + + " \"_id\": \"" + detectorId + "\"\n" + + " }\n" + + " }\n" + + "}"; List hits = executeSearch(Detector.DETECTORS_INDEX, request); SearchHit hit = hits.get(0); Map updatedDetectorMap = (HashMap)(hit.getSourceAsMap().get("detector")); @@ -1235,4 +1235,4 @@ private static void assertRuleMonitorFinding(Map executeResults, List triggerResultBucketKeys = ((Map)((Map) ((Map)executeResults.get("trigger_results")).get(ruleId)).get("agg_result_buckets")).keySet().stream().collect(Collectors.toList()); assertEquals(expectedTriggerResult, triggerResultBucketKeys); } -} +} \ No newline at end of file diff --git a/src/test/java/org/opensearch/securityanalytics/rules/backend/QueryBackendTests.java b/src/test/java/org/opensearch/securityanalytics/rules/backend/QueryBackendTests.java index c5917cf8b..10501cddd 100644 --- a/src/test/java/org/opensearch/securityanalytics/rules/backend/QueryBackendTests.java +++ b/src/test/java/org/opensearch/securityanalytics/rules/backend/QueryBackendTests.java @@ -269,20 +269,20 @@ public void testConvertValueNull() throws IOException, SigmaError { OSQueryBackend queryBackend = testBackend(); List queries = queryBackend.convertRule(SigmaRule.fromYaml( " title: Test\n" + - " id: 39f919f3-980b-4e6f-a975-8af7e507ef2b\n" + - " status: test\n" + - " level: critical\n" + - " description: Detects QuarksPwDump clearing access history in hive\n" + - " author: Florian Roth\n" + - " date: 2017/05/15\n" + - " logsource:\n" + - " category: test_category\n" + - " product: test_product\n" + - " detection:\n" + - " sel:\n" + - " fieldA1: null\n" + - " condition: sel", false)); - Assert.assertEquals("mappedA: null", queries.get(0).toString()); + " id: 39f919f3-980b-4e6f-a975-8af7e507ef2b\n" + + " status: test\n" + + " level: critical\n" + + " description: Detects QuarksPwDump clearing access history in hive\n" + + " author: Florian Roth\n" + + " date: 2017/05/15\n" + + " logsource:\n" + + " category: test_category\n" + + " product: test_product\n" + + " detection:\n" + + " sel:\n" + + " fieldA1: null\n" + + " condition: sel", false)); + Assert.assertEquals("mappedA: (NOT [* TO *])", queries.get(0).toString()); } public void testConvertValueRegex() throws IOException, SigmaError { @@ -509,23 +509,23 @@ public void testConvertOrInUnallowedValueType() throws IOException, SigmaError { OSQueryBackend queryBackend = testBackend(); List queries = queryBackend.convertRule(SigmaRule.fromYaml( " title: Test\n" + - " id: 39f919f3-980b-4e6f-a975-8af7e507ef2b\n" + - " status: test\n" + - " level: critical\n" + - " description: Detects QuarksPwDump clearing access history in hive\n" + - " author: Florian Roth\n" + - " date: 2017/05/15\n" + - " logsource:\n" + - " category: test_category\n" + - " product: test_product\n" + - " detection:\n" + - " sel:\n" + - " fieldA1: \n" + - " - value1\n" + - " - value2\n" + - " - null\n" + - " condition: sel", false)); - Assert.assertEquals("(mappedA: \"value1\") OR (mappedA: \"value2\") OR (mappedA: null)", queries.get(0).toString()); + " id: 39f919f3-980b-4e6f-a975-8af7e507ef2b\n" + + " status: test\n" + + " level: critical\n" + + " description: Detects QuarksPwDump clearing access history in hive\n" + + " author: Florian Roth\n" + + " date: 2017/05/15\n" + + " logsource:\n" + + " category: test_category\n" + + " product: test_product\n" + + " detection:\n" + + " sel:\n" + + " fieldA1: \n" + + " - value1\n" + + " - value2\n" + + " - null\n" + + " condition: sel", false)); + Assert.assertEquals("(mappedA: \"value1\") OR (mappedA: \"value2\") OR (mappedA: (NOT [* TO *]))", queries.get(0).toString()); } public void testConvertOrInListNumbers() throws IOException, SigmaError {