diff --git a/log4j-parent/pom.xml b/log4j-parent/pom.xml index a188c140dd6..5530b4c428a 100644 --- a/log4j-parent/pom.xml +++ b/log4j-parent/pom.xml @@ -78,7 +78,7 @@ 1.2.15 3.4.4 - 8.14.1 + 8.14.2 0.9.0 7.0.5 4.12.0 @@ -114,7 +114,7 @@ 9.4.55.v20240627 3.5.9 1.37 - 2.39.0 + 2.40.0 4.13.2 5.10.3 1.9.1 diff --git a/src/changelog/.2.x.x/update_co_elastic_clients_elasticsearch_java.xml b/src/changelog/.2.x.x/update_co_elastic_clients_elasticsearch_java.xml index 04c183a67f5..74635cec554 100644 --- a/src/changelog/.2.x.x/update_co_elastic_clients_elasticsearch_java.xml +++ b/src/changelog/.2.x.x/update_co_elastic_clients_elasticsearch_java.xml @@ -3,6 +3,6 @@ xmlns="https://logging.apache.org/xml/ns" xsi:schemaLocation="https://logging.apache.org/xml/ns https://logging.apache.org/xml/ns/log4j-changelog-0.xsd" type="updated"> - - Update `co.elastic.clients:elasticsearch-java` to version `8.14.1` + + Update `co.elastic.clients:elasticsearch-java` to version `8.14.2` diff --git a/src/changelog/.2.x.x/update_net_javacrumbs_json_unit_json_unit.xml b/src/changelog/.2.x.x/update_net_javacrumbs_json_unit_json_unit.xml index febb0c423e3..efee1ce07bd 100644 --- a/src/changelog/.2.x.x/update_net_javacrumbs_json_unit_json_unit.xml +++ b/src/changelog/.2.x.x/update_net_javacrumbs_json_unit_json_unit.xml @@ -3,6 +3,6 @@ xmlns="https://logging.apache.org/xml/ns" xsi:schemaLocation="https://logging.apache.org/xml/ns https://logging.apache.org/xml/ns/log4j-changelog-0.xsd" type="updated"> - - Update `net.javacrumbs.json-unit:json-unit` to version `2.39.0` + + Update `net.javacrumbs.json-unit:json-unit` to version `2.40.0` diff --git a/src/site/antora/antora.tmpl.yml b/src/site/antora/antora.tmpl.yml index 8b7c1fd4ab9..0fd4e0e8538 100644 --- a/src/site/antora/antora.tmpl.yml +++ b/src/site/antora/antora.tmpl.yml @@ -45,6 +45,7 @@ asciidoc: project-id: "log4j" java-target-version: "${maven.compiler.target}" java-compiler-version: "${minimalJavaBuildVersion}" + log4j-kubernetes-url: "https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md" logback-url: "https://logback.qos.ch" logging-services-url: "https://logging.apache.org" lmax-disruptor-url: "https://lmax-exchange.github.io/disruptor" diff --git a/src/site/antora/antora.yml b/src/site/antora/antora.yml index e027636b213..36ea427b7ff 100644 --- a/src/site/antora/antora.yml +++ b/src/site/antora/antora.yml @@ -45,6 +45,7 @@ asciidoc: project-id: "log4j" java-target-version: "8" java-compiler-version: "[17,18)" + log4j-kubernetes-url: "https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md" logback-url: "https://logback.qos.ch" logging-services-url: "https://logging.apache.org" lmax-disruptor-url: "https://lmax-exchange.github.io/disruptor" diff --git a/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.json b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.json new file mode 100644 index 00000000000..14512de6346 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.json @@ -0,0 +1,24 @@ +{ + "Configuration": { + "Appenders": { + // tag::socketAppender[] + "Socket": { + "name": "SOCKET", + "host": "localhost", + "port": 12345, + "JsonTemplateLayout": { + "nullEventDelimiterEnabled": true + } + } + // end::socketAppender[] + }, + "Loggers": { + "Root": { + "level": "WARN", + "AppenderRef": { + "ref": "SOCKET" + } + } + } + } +} diff --git a/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.properties b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.properties new file mode 100644 index 00000000000..b3aa633b9f2 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.properties @@ -0,0 +1,28 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# tag::socketAppender[] +appender.0.type = Socket +appender.0.name = SOCKET +appender.0.host = localhost +appender.0.port = 12345 +appender.0.layout.type = JsonTemplateLayout +appender.0.layout.nullEventDelimiterEnabled = true +# end::socketAppender[] + +rootLogger.level = WARN +rootLogger.appenderRef.0.ref = SOCKET diff --git a/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.xml b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.xml new file mode 100644 index 00000000000..bc7e6e3697e --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.yaml b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.yaml new file mode 100644 index 00000000000..60cc47d586d --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/cloud/logstash/log4j2.yaml @@ -0,0 +1,33 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + + Appenders: + # tag::socketAppender[] + Socket: + name: "SOCKET" + host: "localhost" + port: 12345 + JsonTemplateLayout: + nullEventDelimiterEnabled: true + # end::socketAppender[] + + Loggers: + Root: + level: "WARN" + AppenderRef: + ref: "SOCKET" diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.json new file mode 100644 index 00000000000..9ff7a6bb9fc --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.json @@ -0,0 +1,38 @@ +{ + "Configuration": { + "monitorInterval": 10, + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "ALL", + "AppenderRef": { + "ref": "CONSOLE" + } + } + }, + // tag::filter[] + "ContextMapFilter": { + "operator": "AND", + "KeyValuePair": [ + { + "key": "clientId", + "value": "1234" + }, + { + "key": "userId", + "value": "alice" + }, + { + "key": "userId", + "value": "bob" + } + ] + } + // end::filter[] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.properties new file mode 100644 index 00000000000..950441524c0 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.properties @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +monitorInterval = 10 + +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +rootLogger.level = ALL +rootLogger.appenderRef.0.ref = CONSOLE + +# tag::filter[] +filter.0.type = ContextMapFilter +filter.0.operator = AND + +filter.0.kv0.type = KeyValuePair +filter.0.kv0.key = clientId +filter.0.kv0.value = 1234 + +filter.0.kv1.type = KeyValuePair +filter.0.kv1.key = userId +filter.0.kv1.value = alice + +filter.0.kv2.type = KeyValuePair +filter.0.kv2.key = userId +filter.0.kv2.value = bob +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.xml new file mode 100644 index 00000000000..9367cd2ccad --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.yaml new file mode 100644 index 00000000000..9de0a687ffb --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ContextMapFilter.yaml @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + monitorInterval: 10 + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: { } + Loggers: + Root: + level: "ALL" + AppenderRef: + ref: "CONSOLE" + # tag::filter[] + ContextMapFilter: + operator: "AND" + KeyValuePair: + - key: "clientId" + value: "1234" + - key: "userId" + value: "alice" + - key: "userId" + value: "bob" + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.json new file mode 100644 index 00000000000..12b6fb95fb1 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.json @@ -0,0 +1,35 @@ +{ + "Configuration": { + "monitorInterval": 10, + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "ALL", + "AppenderRef": { + "ref": "CONSOLE" + } + } + }, + // tag::filter[] + "DynamicThresholdFilter": { + "key": "loginId", // <3> + "defaultThreshold": "ERROR", + "KeyValuePair": [ + { // <1> + "key": "alice", + "value": "DEBUG" + }, + { // <2> + "key": "bob", + "value": "INFO" + } + ] + } + // end::filter[] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.properties new file mode 100644 index 00000000000..92177e729f1 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.properties @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +monitorInterval = 10 + +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +rootLogger.level = ALL +rootLogger.appenderRef.0.ref = CONSOLE + +# tag::filter[] +filter.0.type = DynamicThresholdFilter +filter.0.key = loginId +# <3> +filter.0.defaultThreshold = ERROR +# <1> +filter.0.kv0.type = KeyValuePair +filter.0.kv0.key = alice +filter.0.kv0.value = DEBUG +# <2> +filter.0.kv1.type = KeyValuePair +filter.0.kv1.key = bob +filter.0.kv1.value = INFO +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.xml new file mode 100644 index 00000000000..98fc22e2bb8 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.yaml new file mode 100644 index 00000000000..a4d4c7e473d --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/DynamicThresholdFilter.yaml @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + monitorInterval: 10 + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: { } + Loggers: + Root: + level: "ALL" + AppenderRef: + ref: "CONSOLE" + # tag::filter[] + DynamicThresholdFilter: + key: "loginId" + defaultThreshold: "ERROR" # <3> + KeyValuePair: + - key: "alice" # <1> + value: "DEBUG" + - key: "bob" # <2> + value: "INFO" + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.json new file mode 100644 index 00000000000..9b49b533f9a --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.json @@ -0,0 +1,38 @@ +{ + "Configuration": { + "monitorInterval": 10, + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "ALL", + "AppenderRef": { + "ref": "CONSOLE" + } + } + }, + // tag::filter[] + "MapFilter": { + "operator": "AND", + "KeyValuePair": [ + { + "key": "eventType", + "value": "authentication" + }, + { + "key": "eventId", + "value": "login" + }, + { + "key": "eventId", + "value": "logout" + } + ] + } + // end::filter[] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.properties new file mode 100644 index 00000000000..1c878d26d57 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.properties @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +monitorInterval = 10 + +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +rootLogger.level = ALL +rootLogger.appenderRef.0.ref = CONSOLE + +# tag::filter[] +filter.0.type = MapFilter +filter.0.operator = AND + +filter.0.kv0.type = KeyValuePair +filter.0.kv0.key = eventType +filter.0.kv0.value = authentication + +filter.0.kv1.type = KeyValuePair +filter.0.kv1.key = eventId +filter.0.kv1.value = login + +filter.0.kv2.type = KeyValuePair +filter.0.kv2.key = eventId +filter.0.kv2.value = logout +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.xml new file mode 100644 index 00000000000..269debf76f1 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.yaml new file mode 100644 index 00000000000..cb01ac66b1c --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MapFilter.yaml @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + monitorInterval: 10 + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: { } + Loggers: + Root: + level: "ALL" + AppenderRef: + ref: "CONSOLE" + # tag::filter[] + MapFilter: + operator: "AND" + KeyValuePair: + - key: "eventType" + value: "authentication" + - key: "eventId" + value: "login" + - key: "eventId" + value: "logout" + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.json new file mode 100644 index 00000000000..f221178b6a4 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.json @@ -0,0 +1,24 @@ +{ + "Configuration": { + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "ALL", + "AppenderRef": { + "ref": "CONSOLE" + } + } + }, + // tag::filter[] + "MutableContextMapFilter": { + "configLocation": "https://server.example/configs.json", + "pollInterval": 10 + } + // end::filter[] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.properties new file mode 100644 index 00000000000..89cc27eaa27 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.properties @@ -0,0 +1,28 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +rootLogger.level = ALL +rootLogger.appenderRef.0.ref = CONSOLE + +# tag::filter[] +filter.0.type = MutableContextMapFilter +filter.0.configLocation = https://server.example/configs.json +filter.0.pollInterval = 10 +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.xml new file mode 100644 index 00000000000..a4ab14a5585 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.yaml new file mode 100644 index 00000000000..f44a232cd10 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/MutableContextMapFilter.yaml @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: { } + Loggers: + Root: + level: "ALL" + AppenderRef: + ref: "CONSOLE" + # tag::filter[] + MutableContextMapFilter: + configLocation: "https://server.example/configs.json" + pollInterval: 10 + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.json new file mode 100644 index 00000000000..2703a976ee6 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.json @@ -0,0 +1,34 @@ +{ + "Configuration": { + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + // tag::local[] + "Root": { + "level": "ALL", + "ScriptFilter": { + "ScriptFile": { + "language": "groovy", + "path": "scripts/local.groovy" + } + }, + "AppenderRef": { + "ref": "CONSOLE" + } + } + // end::local[] + } + }, + // tag::global[] + "ScriptFilter": { + "ScriptFile": { + "language": "groovy", + "path": "scripts/global.groovy" + } + } + // end::global[] +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.properties new file mode 100644 index 00000000000..c3adae4cba3 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.properties @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +# tag::local[] +rootLogger.level = ALL + +rootLogger.filter.0.type = ScriptFilter +rootLogger.filter.0.script.type = ScriptFile +rootLogger.filter.0.script.language = groovy +rootLogger.filter.0.script.path = scripts/local.groovy + +rootLogger.appenderRef.0.ref = CONSOLE +# end::local[] +# tag::global[] +filter.0.type = ScriptFilter +filter.0.script.type = ScriptFile +filter.0.script.language = groovy +filter.0.script.path = scripts/global.groovy +# end::global[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.xml new file mode 100644 index 00000000000..2d50686d9c6 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.yaml new file mode 100644 index 00000000000..29d7f63bc3e --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/ScriptFilter.yaml @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: {} + Loggers: + # tag::local[] + Root: + level: "ALL" + ScriptFilter: + ScriptFile: + language: "groovy" + path: "scripts/local.groovy" + AppenderRef: + ref: "CONSOLE" + # end::local[] + # tag::global[] + ScriptFilter: + ScriptFile: + language: "groovy" + path: "scripts/global.groovy" + # end::global[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.json new file mode 100644 index 00000000000..6516d84130a --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.json @@ -0,0 +1,38 @@ +{ + "Configuration": { + "monitorInterval": 10, + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "ALL", + "AppenderRef": { + "ref": "CONSOLE" + } + } + }, + // tag::filter[] + "StructuredDataFilter": { + "operator": "AND", + "KeyValuePair": [ + { + "key": "id", + "value": "authentication" + }, + { + "key": "userId", + "value": "alice" + }, + { + "key": "userId", + "value": "bob" + } + ] + } + // end::filter[] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.properties new file mode 100644 index 00000000000..ad11a44bdb7 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.properties @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +monitorInterval = 10 + +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +rootLogger.level = ALL +rootLogger.appenderRef.0.ref = CONSOLE + +# tag::filter[] +filter.0.type = StructuredDataFilter +filter.0.operator = AND + +filter.0.kv0.type = KeyValuePair +filter.0.kv0.key = id +filter.0.kv0.value = authentication + +filter.0.kv1.type = KeyValuePair +filter.0.kv1.key = userId +filter.0.kv1.value = alice + +filter.0.kv2.type = KeyValuePair +filter.0.kv2.key = userId +filter.0.kv2.value = bob +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.xml new file mode 100644 index 00000000000..f5f43ea5b52 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.yaml new file mode 100644 index 00000000000..31822d3644f --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/StructuredDataFilter.yaml @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + monitorInterval: 10 + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: { } + Loggers: + Root: + level: "ALL" + AppenderRef: + ref: "CONSOLE" + # tag::filter[] + StructuredDataFilter: + operator: "AND" + KeyValuePair: + - key: "id" + value: "authentication" + - key: "userId" + value: "alice" + - key: "userId" + value: "bob" + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.json b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.json new file mode 100644 index 00000000000..39bd0620950 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.json @@ -0,0 +1,38 @@ +{ + "Configuration": { + "Appenders": { + "Console": { + "name": "CONSOLE", + "JsonTemplateLayout": {} + }, + "SMTP": { + "name": "SMTP", + "to": "root@localhost", + "JsonTemplateLayout": {} + } + }, + "Loggers": { + "Root": { + "level": "INFO", + // tag::filter[] + "AppenderRef": [ + { + "ref": "CONSOLE", + "TimeFilter": { + "start": "08:00:00", + "end": "16:00:00" + } + }, + { + "ref": "SMTP", + "TimeFilter": { + "start": "16:00:00", + "end": "08:00:00" + } + } + ] + // end::filter[] + } + } + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.properties b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.properties new file mode 100644 index 00000000000..4a14f09d171 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.properties @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +appender.0.type = Console +appender.0.name = CONSOLE +appender.0.layout.type = JsonTemplateLayout + +appender.1.type = SMTP +appender.1.name = SMTP +appender.1.to = root@localhost +appender.1.layout.type = JsonTemplateLayout + +rootLogger.level = INFO +# tag::filter[] +rootLogger.appenderRef.0.ref = CONSOLE +rootLogger.appenderRef.0.filter.0.type = TimeFilter +rootLogger.appenderRef.0.filter.0.start = 08:00:00 +rootLogger.appenderRef.0.filter.0.end = 16:00:00 + +rootLogger.appenderRef.1.ref = SMTP +rootLogger.appenderRef.1.filter.0.type = TimeFilter +rootLogger.appenderRef.1.filter.0.start = 16:00:00 +rootLogger.appenderRef.1.filter.0.end = 08:00:00 +# end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.xml b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.xml new file mode 100644 index 00000000000..b2904d49ecf --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.yaml new file mode 100644 index 00000000000..74dd842113c --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/TimeFilter.yaml @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Configuration: + monitorInterval: 10 + Appenders: + Console: + name: "CONSOLE" + JsonTemplateLayout: {} + SMTP: + name: "SMTP" + to: "root@localhost" + JsonTemplateLayout: {} + Loggers: + Root: + level: "INFO" + # tag::filter[] + AppenderRef: + - ref: "CONSOLE" + TimeFilter: + start: "08:00:00" + end: "16:00:00" + - ref: "SMTP" + TimeFilter: + start: "16:00:00" + end: "08:00:00" + # end::filter[] diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/configs.json b/src/site/antora/modules/ROOT/examples/manual/filters/configs.json new file mode 100644 index 00000000000..b78065eece0 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/configs.json @@ -0,0 +1,11 @@ +{ + "configs": { + "clientId": [ + "1234" + ], + "userId": [ + "alice", + "bob" + ] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/configs2.json b/src/site/antora/modules/ROOT/examples/manual/filters/configs2.json new file mode 100644 index 00000000000..0528cc6c83b --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/configs2.json @@ -0,0 +1,10 @@ +{ + "configs": { + "clientId": [ + "1234" // <1> + ], + "userId": [ + "root" // <2> + ] + } +} \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/filters.json b/src/site/antora/modules/ROOT/examples/manual/filters/filters.json index 4acc4c43b76..752f68b2ab9 100644 --- a/src/site/antora/modules/ROOT/examples/manual/filters/filters.json +++ b/src/site/antora/modules/ROOT/examples/manual/filters/filters.json @@ -4,33 +4,36 @@ "Console": { "name": "CONSOLE", "ThresholdFilter": { - "level": "WARN" // <6> + "level": "FATAL" // <6> } } }, "Loggers": { "Root": { - "level": "INFO", + "level": "OFF", "ThresholdFilter": { // <3> - "level": "DEBUG" + "level": "WARN" }, "AppenderRef": { "ref": "CONSOLE", - "level": "WARN", // <5> - "MarkerFilter": { // <4> - "marker": "ALERT", - "onMatch": "NEUTRAL", - "onMismatch": "DENY" + "level": "ERROR", // <4> + "MarkerFilter": { // <5> + "marker": "SECURITY_ALERT" } } }, "Logger": { "name": "org.example", - "level": "TRACE" // <2> + "level": "DEBUG", // <2> + "ThresholdFilter": { // <3> + "level": "INFO" + } } } }, "MarkerFilter": { // <1> - "marker": "PRIVATE" + "marker": "ALERT", + "onMatch": "ACCEPT", + "onMismatch": "NEUTRAL" } } \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/filters.properties b/src/site/antora/modules/ROOT/examples/manual/filters/filters.properties index 1077fc44e7e..b98760c8e41 100644 --- a/src/site/antora/modules/ROOT/examples/manual/filters/filters.properties +++ b/src/site/antora/modules/ROOT/examples/manual/filters/filters.properties @@ -16,23 +16,30 @@ # appender.0.type = Console appender.0.name = CONSOLE -appender.0.filter.type = ThresholdFilter # <6> -appender.0.filter.level = WARN +# <7> +appender.0.filter.type = ThresholdFilter +appender.0.filter.level = FATAL -rootLogger.level = INFO -rootLogger.filter.type = ThresholdFilter # <3> -rootLogger.filter.level = DEBUG +rootLogger.level = OFF +# <4> +rootLogger.filter.type = ThresholdFilter +rootLogger.filter.level = WARN rootLogger.appenderRef.0.ref = CONSOLE -rootLogger.appenderRef.0.level = WARN # <5> -rootLogger.appenderRef.0.filter.type = MarkerFilter # <4> -rootLogger.appenderRef.0.filter.marker = ALERT -rootLogger.appenderRef.0.filter.onMatch = NEUTRAL -rootLogger.appenderRef.0.filter.onMismatch = DENY +# <5> +rootLogger.appenderRef.0.level = ERROR +# <6> +rootLogger.appenderRef.0.filter.type = MarkerFilter +rootLogger.appenderRef.0.filter.marker = SECURITY_ALERT logger.0.name = org.example -logger.0.level = DEBUG # <2> -logger.0.filter.type = ThresholdFilter # <3> -logger.0.filter.level = TRACE +# <2> +logger.0.level = DEBUG +# <3> +logger.0.filter.type = ThresholdFilter +logger.0.filter.level = INFO -filter.type = MarkerFilter # <1> -filter.marker = PRIVATE +# <1> +filter.0.type = MarkerFilter +filter.0.marker = ALERT +filter.0.onMatch = ACCEPT +filter.0.onMismatch = NEUTRAL diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/filters.xml b/src/site/antora/modules/ROOT/examples/manual/filters/filters.xml index 4c86b04faf5..8584d870961 100644 --- a/src/site/antora/modules/ROOT/examples/manual/filters/filters.xml +++ b/src/site/antora/modules/ROOT/examples/manual/filters/filters.xml @@ -22,21 +22,21 @@ https://logging.apache.org/xml/ns/log4j-config-2.xsd"> - + - - - - + + + + - - + + - + diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/filters.yaml b/src/site/antora/modules/ROOT/examples/manual/filters/filters.yaml index 9a4f61f3467..bc1c72637cc 100644 --- a/src/site/antora/modules/ROOT/examples/manual/filters/filters.yaml +++ b/src/site/antora/modules/ROOT/examples/manual/filters/filters.yaml @@ -18,24 +18,24 @@ Configuration: Appenders: Console: name: "CONSOLE" - ThresholdFilter: # <6> - level: "WARN" + ThresholdFilter: # <7> + level: "FATAL" Loggers: Root: - level: "INFO" - ThreadsholdFilter: # <3> - level: "DEBUG" + level: "OFF" + ThresholdFilter: # <4> + level: "WARN" AppenderRef: ref: "CONSOLE" - level: "WARN" # <5> - MarkerFilter: # <4> - marker: "ALERT" - onMatch: "NEUTRAL" - onMismatch: "DENY" + level: "ERROR" # <5> + MarkerFilter: # <6> + marker: "SECURITY_ALERT" Logger: name: "org.example" - level: "TRACE" # <2> - ThresholdFilter: - level: "TRACE" # <3> + level: "DEBUG" # <2> + ThresholdFilter: # <3> + level: "INFO" MarkerFilter: # <1> - marker: "PRIVATE" + marker: "ALERT" + onMatch: "ACCEPT" + onMismatch: "NEUTRAL" diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/global.groovy b/src/site/antora/modules/ROOT/examples/manual/filters/global.groovy new file mode 100644 index 00000000000..01f836bb574 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/global.groovy @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +Throwable lastParam = parameters?.last() instanceof Throwable ? parameters.last() : null +Throwable actualThrowable = throwable ?: message?.throwable ?: lastParam +return actualThrowable instanceof DataAccessException \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/examples/manual/filters/local.groovy b/src/site/antora/modules/ROOT/examples/manual/filters/local.groovy new file mode 100644 index 00000000000..fc5dc066670 --- /dev/null +++ b/src/site/antora/modules/ROOT/examples/manual/filters/local.groovy @@ -0,0 +1,17 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +return logEvent.throwable instanceof DataAccessException; \ No newline at end of file diff --git a/src/site/antora/modules/ROOT/images/DockerFluentd.png b/src/site/antora/modules/ROOT/images/DockerFluentd.png deleted file mode 100644 index edcd35082be..00000000000 Binary files a/src/site/antora/modules/ROOT/images/DockerFluentd.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/images/DockerFluentdAggregator.png b/src/site/antora/modules/ROOT/images/DockerFluentdAggregator.png deleted file mode 100644 index 846c5a677f3..00000000000 Binary files a/src/site/antora/modules/ROOT/images/DockerFluentdAggregator.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/images/DockerLogFile.png b/src/site/antora/modules/ROOT/images/DockerLogFile.png deleted file mode 100644 index 5ec366ed74d..00000000000 Binary files a/src/site/antora/modules/ROOT/images/DockerLogFile.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/images/DockerStdout.png b/src/site/antora/modules/ROOT/images/DockerStdout.png deleted file mode 100644 index 546e8969aa5..00000000000 Binary files a/src/site/antora/modules/ROOT/images/DockerStdout.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/images/DockerTCP.png b/src/site/antora/modules/ROOT/images/DockerTCP.png deleted file mode 100644 index 4a30200c028..00000000000 Binary files a/src/site/antora/modules/ROOT/images/DockerTCP.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/images/LoggerAggregator.png b/src/site/antora/modules/ROOT/images/LoggerAggregator.png deleted file mode 100644 index 78ebf0af878..00000000000 Binary files a/src/site/antora/modules/ROOT/images/LoggerAggregator.png and /dev/null differ diff --git a/src/site/antora/modules/ROOT/pages/log4j-docker.adoc b/src/site/antora/modules/ROOT/pages/log4j-docker.adoc index 669d0bf63ce..3f21b93a910 100644 --- a/src/site/antora/modules/ROOT/pages/log4j-docker.adoc +++ b/src/site/antora/modules/ROOT/pages/log4j-docker.adoc @@ -15,31 +15,39 @@ Licensed to the Apache Software Foundation (ASF) under one or more limitations under the License. //// -= Log4j Docker Support - -Log4j supports Docker by providing a Lookup to retrieve container information. - -== Accessing Docker - -The Log4j Docker support requires access to the Docker REST interface. -In practical terms this means the application either needs access to unix:///var/run/docker.sock through a volume mount (not recommended), bind Docker to another host/port or unix socket. -or use a proxy application to provide access. -The https://github.com/apache/logging-log4j2/tree/main/log4j-spring-cloud-config/log4j-spring-cloud-config-samples/log4j-spring-cloud-config-sample-application[Log4j Spring Cloud sample application] uses a socat proxy to access Docker. - -== Lookup Attributes - -Log4j Docker provides access to the following container attributes: - -* containerId - The full id assigned to the container. -* containerName - The name assigned to the container. -* imageId - The id assigned to the image. -* imageName - The name assigned to the image. -* shortContainerId - The first 12 characters of the container id. -* shortImageId - The first 12 characters of the image id. - -Attributes may be accessed by adding `${docker:containerId}` to the configuration. -Note that docker variables are only resolved once during logging initialization so they shouldn't be referenced with more than one `$` character. - -== Requirements - -Log4j Docker requires Log4j Core, Log4j API and a minimum of Java 8. += Log4j Docker + +Log4j Docker module offers https://docker.com[Docker]-specific utilities. + +[#install] +== Installation + +You need to add the following additional runtime dependency to your build to use Log4j Docker: + +[tabs] +==== +Maven:: ++ +[source,xml,subs="+attributes"] +---- + + org.apache.logging.log4j + log4j-docker + {log4j-core-version} + runtime + +---- + +Gradle:: ++ +[source,groovy,subs="+attributes"] +---- +runtimeOnly 'org.apache.logging.log4j:log4j-docker:{log4j-core-version}' +---- +==== + +[#features] +== Features + +Log4j Docker provides xref:manual/lookups.adoc#DockerLookup[Docker Lookup] that queries https://docs.docker.com/engine/api/[the API of the Docker Engine] running your container. +You can use it to inject Docker-environment specific values (container ID, image name, etc.) in xref:manual/configuration.adoc#property-substitution[property substitutions]. diff --git a/src/site/antora/modules/ROOT/pages/manual/architecture.adoc b/src/site/antora/modules/ROOT/pages/manual/architecture.adoc index b97b63502bc..dafbfeca443 100644 --- a/src/site/antora/modules/ROOT/pages/manual/architecture.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/architecture.adoc @@ -42,7 +42,7 @@ package "Configuration" as c { class Configuration { Appender[] appenders - Filter[] filters + Filter filter LoggerConfig[] loggerConfigs LoggerConfig getLoggerConfig(String name) StrSubstitutor substitutor @@ -198,7 +198,7 @@ LoggerContext --> "0..*" Logger class Configuration { Appender[] appenders - Filter[] filters + Filter filter LoggerConfig[] loggerConfigs LoggerConfig getLoggerConfig(String name) StrSubstitutor substitutor @@ -237,7 +237,7 @@ LoggerContext --> Configuration class Configuration #line.bold { Appender[] appenders - Filter[] filters + Filter filter LoggerConfig[] loggerConfigs LoggerConfig getLoggerConfig(String name) StrSubstitutor substitutor @@ -397,7 +397,7 @@ A `LoggerConfig` essentially contains class Configuration { Appender[] appenders - Filter[] filters + Filter filter LoggerConfig[] loggerConfigs LoggerConfig getLoggerConfig(String name) StrSubstitutor substitutor @@ -541,13 +541,13 @@ In addition to < "0..*" Filter +Configuration --> Filter Configuration --> "0..*" LoggerConfig @@ -575,6 +575,10 @@ class AppenderRef { Filter filter } +class AppenderControl { + Filter filter +} + AppenderRef --> Filter AppenderControl --> Filter @@ -599,7 +603,7 @@ See xref:manual/appenders.adoc[] for the complete guide. class Configuration { Appender[] appenders - Filter[] filters + Filter filter LoggerConfig[] loggerConfigs LoggerConfig getLoggerConfig(String name) StrSubstitutor substitutor diff --git a/src/site/antora/modules/ROOT/pages/manual/cloud.adoc b/src/site/antora/modules/ROOT/pages/manual/cloud.adoc index b0e16e54962..636dd508ade 100644 --- a/src/site/antora/modules/ROOT/pages/manual/cloud.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/cloud.adoc @@ -15,549 +15,260 @@ limitations under the License. //// -= Using Log4j in Cloud Enabled Applications - -== The Twelve-Factor Application - -The Logging Guidelines for https://12factor.net/logs[The Twelve-Factor App] state that all logs should be routed -unbuffered to stdout. Since this is the least common denominator it is guaranteed to work for all applications. However, -as with any set of general guidelines, choosing the least common denominator approach comes at a cost. Some of the costs -in Java applications include: - -. Java stack traces are multi-line log messages. The standard docker log driver cannot handle these properly. See -https://github.com/moby/moby/issues/22920[Docker Issue #22920] which was closed with the message "Don't Care". -Solutions for this are to: -.. Use a docker log driver that does support multi-line log message, -.. Use a logging format that does not produce multi-line messages, -.. Log from Log4j directly to a logging forwarder or aggregator and bypass the docker logging driver. -. When logging to stdout in Docker, log events pass through Java's standard output handling which is then directed -to the operating system so that the output can be piped into a file. The overhead of all this is measurably slower -than just writing directly to a file as can be seen in these benchmark results where logging -to stdout is 16-20 times slower over repeated runs than logging directly to the file. The results below were obtained by -running the https://github.com/apache/logging-log4j2/blob/release-2.x/log4j-perf-test/src/main/java/org/apache/logging/log4j/perf/jmh/OutputBenchmark.java[Output Benchmark] -on a 2018 MacBook Pro with a 2.9GHz Intel Core i9 processor and a 1TB SSD. However, these results alone would not be -enough to argue against writing to the standard output stream as they only amount to about 14-25 microseconds -per logging call vs 1.5 microseconds when writing to the file. -+ -[source] ----- - Benchmark Mode Cnt Score Error Units - OutputBenchmark.console thrpt 20 39291.885 ± 3370.066 ops/s - OutputBenchmark.file thrpt 20 654584.309 ± 59399.092 ops/s - OutputBenchmark.redirect thrpt 20 70284.576 ± 7452.167 ops/s ----- -. When performing audit logging using a framework such as log4j-audit guaranteed delivery of the audit events -is required. Many of the options for writing the output, including writing to the standard output stream, do -not guarantee delivery. In these cases the event must be delivered to a "forwarder" that acknowledges receipt -only when it has placed the event in durable storage, such as what https://flume.apache.org/[Apache Flume] will do. += Integrating with service-oriented architectures -== Logging Approaches +In this page we will share certain <> you can employ in your applications using Log4j Core to integrate them with service-oriented architectures. +While doing so, we will also try to share guides on some popular scenarios. -All the solutions discussed on this page are predicated with the idea that log files cannot permanently -reside on the file system and that all log events should be routed to one or more log analysis tools that will -be used for reporting and alerting. There are many ways to forward and collect events to be sent to the -log analysis tools. +[#motivation] +== Motivation -Note that any approach that bypasses Docker's logging drivers requires Log4j's -https://logging.apache.org/log4j/2.x/manual/lookups.html#DockerLookup[Docker Lookup] to allow Docker attributes to be injected into the log events. +Most modern software is deployed in https://en.wikipedia.org/wiki/Service-oriented_architecture[service-oriented architectures]. +This is a very broad domain and can be realized in an amazingly large number of ways. +Nevertheless, they all redefine the notion of an application: -=== Logging to the Standard Output Stream +* Deployed in *multiple instances* +* Situated in *multiple locations*; either in the same rack, or in different data centers located in different continents +* Hosted by *multiple platforms*; hardware, virtual machine, container, etc. +* *Polyglot*; a product of multiple programming languages +* *Scaled* on demand; instances come and go in time -As discussed above, this is the recommended 12-Factor approach for applications running in a docker container. -The Log4j team does not recommend this approach for performance reasons. +Naturally, logging systems also evolved to accommodate these needs. +In particular, the old practice of _"monoliths writing logs to files rotated daily"_ has changed in two major angles: -image:DockerStdout.png[Stdout, "Application Logging to the Standard Output Stream"] +Application delivers logs differently:: -=== Logging to the Standard Output Stream with the Docker Fluentd Logging Driver +Applications no longer write logs to files, but <>, and deliver them to an external system centrally managed. +Most of the time this is a <> (a library, a sidecar container, etc.) that takes care of discovering the log storage system and determining the right external service to forward the logs to. -Docker provides alternate https://docs.docker.com/config/containers/logging/configure/[logging drivers], -such as https://docs.docker.com/config/containers/logging/fluentd/[fluentd], that -can be used to redirect the standard output stream to a log forwarder or log aggregator. +Platform stores logs differently:: -When routing to a log forwarder it is expected that the forwarder will have the same lifetime as the -application. If the forwarder should fail the management tools would be expected to also terminate -other containers dependent on the forwarder. +There is no longer `/var/log/tomcat/catalina.out` combining all logs of a monolith. +Instead, the software runs in multiple instances, each is implemented in a different language, and instances get scaled (i.e., new ones get started, old ones get stopped) on demand. +To accommodate this, logs are persisted on a central storage system (Elasticsearch, Google Cloud Logging, etc.) that allows advanced navigation and filtering capabilities. -image:DockerFluentd.png[Docker Fluentbit, "Logging via StdOut using the Docker Fluentd Logging Driver to Fluent-bit"] +Log4j Core not only adapts to this evolution, but also strives to provide the best in the class support for that. +We will explore how to integrate Log4j with service-oriented architectures. -As an alternative the logging drivers could be configured to route events directly to a logging aggregator. -This is generally not a good idea as the logging drivers only allow a single host and port to be configured. -The docker documentation isn't clear but infers that log events will be dropped when log events cannot be -delivered so this method should not be used if a highly available solution is required. +[#best-practices] +== Best practices -image:DockerFluentdAggregator.png[Docker Fluentd, "Logging via StdOut using the Docker Fluentd Logging Driver to Fluentd"] +Independent of the service-oriented architecture you choose, there are certain best practices we strongly encourage you to follow: -=== Logging to a File +[#structured-encoding] +=== Encode logs using a structured layout -While this is not the recommended 12-Factor approach, it performs very well. -However, it requires that the application declares a volume where the log files will reside and then configures the log forwarder to tail those files. -Care must also be taken to automatically manage the disk space used for the logs, which Log4j can perform via the "Delete" action on the xref:manual/appenders.adoc#RollingFileAppender[RollingFileAppender]. +We can't emphasize it enough to not use anything, but a xref:manual/layouts.adoc#structured-logging[structured layout] to deliver your logs to an external system. +We recommend xref:manual/json-template-layout.adoc[] for this purpose: -image:DockerLogFile.png[File, "Logging to a File"] +* JSON Template Layout provides full customizability and contains several predefined layouts for popular log storage services. +* JSON is accepted by every log storage service. +* JSON is supported by logging frameworks in other languages. +This makes it possible to agree on a common log format with non-Java applications. -=== Sending Directly to a Log Forwarder via TCP +[#proxy] +=== Use a proxy for writing logs -Sending logs directly to a Log Forwarder is simple as it generally just requires that the forwarder's host and port be configured on a SocketAppender with an appropriate layout. +Most of the time it is not a good idea to write to the log storage system directly, but instead delegate that task to a proxy. +This design decouples applications' log target and the log storage system and, as a result, effectively enables each to evolve independently and reliably (i.e., without downtime). +For instance, this will allow the log storage system to scale or migrate to a new environment while proxies take care of necessary buffering and routing. -image:DockerTCP.png[TCP, "Application Logging to a Forwarder via TCP"] +This proxy can appear in many forms, for instance: -=== Sending Directly to a Log Aggregator via TCP +* *Console* can act as a proxy. +Logs written to console can be consumed by an external service. +For example, https://12factor.net/logs[The Twelve-Factor App] and https://kubernetes.io/docs/concepts/cluster-administration/logging/[Kubernetes Logging Architecture] recommends this approach. -Similar to sending logs to a forwarder, logs can also be sent to a cluster of aggregators. -However, setting this up is not as simple since, to be highly available, a cluster of aggregators must be used. -However, the SocketAppender currently can only be configured with a single host and port. -To allow for failover if the primary aggregator fails the SocketAppender must be enclosed in a xref:manual/appenders.adoc#FailoverAppender[FailoverAppender], which would also have the secondary aggregator configured. -Another option is to have the SocketAppender point to a highly available proxy that can forward to the Log Aggregator. +* A *library* can act as proxy. +It can tap into the logging API and forward it to an external service. +For instance, https://docs.datadoghq.com/logs/log_collection/java[Datadog's Java Log Collector] uses this mechanism. -If the log aggregator used is Apache Flume (or similar) the Appenders for these support -being configured with a list of hosts and ports so high availability is not an issue. +* An external *service* can act as a proxy, which applications can write logs to. +For example, you can write to https://www.elastic.co/logstash[Logstash], a https://kubernetes.io/docs/concepts/cluster-administration/logging/#sidecar-container-with-a-logging-agent[Kubernetes logging agent sidecar], or a https://redis.io/glossary/redis-queue/[Redis queue] over a socket. -image:LoggerAggregator.png[Aggregator, "Application Logging to an Aggregator via TCP"] +What to use as a proxy depends on your deployment environment. +You should consult to your colleagues if there is already an established logging proxy convention. +Otherwise, we strongly encourage you to establish one in collaboration with your system administrators and architects. -[#ELK] -== Logging using Elasticsearch, Logstash, and Kibana +[#appender] +=== Configure your appender correctly -There are various approaches with different trade-offs for ingesting logs into -an ELK stack. Here we will briefly cover how one can forward Log4j generated -events first to Logstash and then to Elasticsearch. +Once you decide on <> to use, the choice of appender pretty much becomes self-evident. +Nevertheless, there are some tips we recommend you to practice: -=== Log4j Configuration +* *For writing to console*, use a xref:manual/appenders.adoc#ConsoleAppender[Console Appender] and make sure to configure its `direct` attribute to `true` for the maximum efficiency. -==== JsonTemplateLayout +* *For writing to an external service*, use a xref:manual/appenders.adoc#SocketAppender[Socket Appender] and make sure to set the protocol to TCP and configure the null delimiter of the associated layout. +For instance, see xref:manual/json-template-layout.adoc#plugin-attr-nullEventDelimiterEnabled[the `nullEventDelimiterEnabled` configuration attribute of JSON Template Layout]. -Log4j provides a multitude of JSON generating layouts. In particular, JSON -Template Layout allows full schema -customization and bundles ELK-specific layouts by default, which makes it a -great fit for the bill. Using the EcsLayout template as shown below will generate data in Kibana where -the message displayed exactly matches the message passed to Log4j and most of the event attributes, including -any exceptions, are present as individual attributes that can be displayed. Note, however that stack traces -will be formatted without newlines. +[#file] +=== Avoid writing to files -[source,xml] ----- - - - - - - - - - - - - - - - - - - - - ----- +As explained in <>, in a service-oriented architecture, log files are -==== Gelf Template +* Difficult to maintain – writable volumes must be mounted to the runtime (container, VM, etc.), rotated, and monitored for excessive usage +* Difficult to use – multiple files need to be manually combined while troubleshooting, no central navigation point +* Difficult to interoperate – each application needs to be individually configured to produce the same structured log output to enable interleaving of logs from multiple sources while troubleshooting distributed issues -The JsonTemplateLayout can also be used to generate JSON that matches the GELF specification which can format -the message attribute using a pattern in accordance with the PatternLayout. For example, the following -template, named EnhancedGelf.json, can be used to generate GELF-compliant data that can be passed to Logstash. -With this template the message attribute will include the thread id, level, specific ThreadContext attributes, -the class name, method name, and line number as well as the message. If an exception is included it will also -be included with newlines. This format follows very closely what you would see in a typical log file on disk -using the PatternLayout but has the additional advantage of including the attributes as separate fields that -can be queried. +In short, *we don't recommend writing logs to files*. -[source,json] ----- -{ - "version": "1.1", - "host": "${hostName}", - "short_message": { - "$resolver": "message", - "stringified": true - }, - "full_message": { - "$resolver": "message", - "pattern": "[%t] %-5p %X{requestId, sessionId, loginId, userId, ipAddress, corpAcctNumber} %C{1.}.%M:%L - %m", - "stringified": true - }, - "timestamp": { - "$resolver": "timestamp", - "epoch": { - "unit": "secs" - } - }, - "level": { - "$resolver": "level", - "field": "severity", - "severity": { - "field": "code" - } - }, - "_logger": { - "$resolver": "logger", - "field": "name" - }, - "_thread": { - "$resolver": "thread", - "field": "name" - }, - "_mdc": { - "$resolver": "mdc", - "flatten": { - "prefix": "_" - }, - "stringified": true - } -} ----- +[#separate-config] +=== Separate logging configuration from the application -The logging configuration to use this template would be +We strongly advise you to separate the logging configuration from the application and couple them in an environment-specific way. +This will allow you to -[source,xml] ----- - - - - - - - - - - - - - - - - - - - - ----- +* Address environment-specific configurations (e.g., logging verbosity needs of test and production can be different) +* Ensure Log4j configuration changes applies to all affected Log4j-using software without the need to manually update their Log4j configuration one by one -The significant difference with this configuration from the first example is that it references the -custom template and it specifies an event delimiter of a null character ('\0'); +How to implement this separation pretty much depends on your setup. +We will share some recommended approaches to give you an idea: -NOTE: The level being passed with the above template does not strictly conform to the GELF spec as the Level being passed is the Log4j Level NOT the Level defined in the GELF spec. -However, testing has shown that Logstash, Elk, and Kibana are pretty tolerant of whatever data is passed to it. +Choosing configuration files during deployment:: ++ +-- +Environment-specific xref:manual/configuration.adoc[Log4j configuration files] (`log4j2-common.xml`, `log4j2-local.xml`, `log4j2-test.xml`, `log4j2-prod.xml`, etc.) can be provided in one of following ways: -==== Custom Template +* Shipped with your software (i.e., accessible in the classpath) +* Served from an HTTP server +* A combination of the first two -Another option is to use a custom template, possibly based on one of the standard templates. The template -below is loosely based on ECS but a) adds the spring boot application name, b) formats the message -using PatternLayout, formats Map Messages as event.data attributes while setting the event action based on -any Marker included in the event, includes all the ThreadContext attributes. +Depending on the deployment environment, you can selectively activate a subset of them using xref:manual/systemproperties.adoc#log4j2.configurationFile[the `log4j2.configurationFile` configuration property]. -NOTE: The Json Template Layout escapes control sequences so messages that contain '\n' will have those -control sequences copied as "\n" into the text rather than converted to a newline character. This bypasses -many problems that occur with Log Forwarders such as Filebeat and FluentBit/Fluentd. Kibana will correctly -interpret these sequences as newlines and display them correctly. Also note that the message pattern does -not contain a timestamp. Kibana will display the timestamp field in its own column so placing it in the -message would be redundant. +[TIP] +==== +Spring Boot allows you to https://docs.spring.io/spring-boot/reference/features/logging.html[configure the underlying logging system]. +Just like any other Spring Boot configuration, logging-related configuration also can be provided in multiple files split by profiles matching the environment: `application-common.yaml`, `application-local.yaml`, etc. +https://docs.spring.io/spring-boot/reference/features/external-config.html[Spring Boot's Externalized Configuration System] will automatically load these files depending on the active profile(s). +==== +-- -[source,json] ----- -{ - "@timestamp": { - "$resolver": "timestamp", - "pattern": { - "format": "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", - "timeZone": "UTC" - } - }, - "ecs.version": "1.11.0", - "log.level": { - "$resolver": "level", - "field": "name" - }, - "application": "\\${lower:\\${spring:spring.application.name}}", - "short_message": { - "$resolver": "message", - "stringified": true - }, - "message": { - "$resolver": "pattern", - "pattern": "[%t] %X{requestId, sessionId, loginId, userId, ipAddress, accountNumber} %C{1.}.%M:%L - %m%n" - }, - "process.thread.name": { - "$resolver": "thread", - "field": "name" - }, - "log.logger": { - "$resolver": "logger", - "field": "name" - }, - "event.action": { - "$resolver": "marker", - "field": "name" - }, - "event.data": { - "$resolver": "map", - "stringified": true - }, - "labels": { - "$resolver": "mdc", - "flatten": true, - "stringified": true - }, - "tags": { - "$resolver": "ndc" - }, - "error.type": { - "$resolver": "exception", - "field": "className" - }, - "error.message": { - "$resolver": "exception", - "field": "message" - }, - "error.stack_trace": { - "$resolver": "exception", - "field": "stackTrace", - "stackTrace": { - "stringified": true - } - } -} ----- +Mounting configuration files during deployment:: ++ +Many service-oriented deployment architectures offer solutions for environment-specific configuration storage; Kubernetes' https://kubernetes.io/docs/concepts/configuration/configmap/[ConfigMap], HashiCorp's https://developer.hashicorp.com/consul/docs/dynamic-app-config/kv[Consul], etc. +You can leverage these to store environment-specific Log4j configurations and mount them to the associated runtime (container, VM, etc.) at deployment. -Finally, the GelfLayout can be used to generate GELF compliant output. Unlike the JsonTemplateLayout it adheres closely to the GELF spec. +[NOTE] +==== +Log4j Core can poll configuration files for changes (see xref:manual/configuration.adoc#configuration-attribute-monitorInterval[the `monitorInterval` attribute]) and reconfigure the associated logger context. +You can leverage this mechanism to *dynamically update the Log4j configuration at runtime*. -[source,xml] ----- - - - requestId,sessionId,loginId,userId,ipAddress,callingHost - %d [%t] %-5p %X{requestId, sessionId, loginId, userId, ipAddress} %C{1.}.%M:%L - %m%n - - - - - - - - - - - - - - - - - - ----- +You need to be careful with this mechanism to not shoot yourself in the foot. +Imagine publishing an incorrect `log4j2.xml` and rendering the logging setup of your entire cluster useless in seconds. +Coupling the configuration with the application at deployment and gradually deploying new configurations is a more reliable approach. +==== + +[#guides] +== Guides + +In this section, we will share guides on some popular integration scenarios. -==== Logstash Configuration with Gelf +[#docker] +=== Docker -We will configure Logstash to listen on TCP port 12345 for payloads of type JSON and then forward these to (either console and/or) an Elasticsearch server. +See xref:log4j-docker.adoc[Log4j Docker] for Docker-specific Log4j features, e.g., xref:manual/lookups.adoc#DockerLookup[Docker Lookup]. +We also strongly advise you to check https://docs.docker.com/config/containers/logging/[the extensive logging integration] offered by Docker containers. -[source] +=== Kubernetes + +Log4j Kubernetes (containing xref:manual/lookups.adoc#KubernetesLookup[Kubernetes Lookup]) is distributed as a part of Fabric8's Kubernetes Client, refer to {log4j-kubernetes-url}[its website] for details. + +[#ELK] +=== Elasticsearch & Logstash + +Elasticsearch, Logstash, and Kibana (aka. https://www.elastic.co/elastic-stack/[ELK Stack]) is probably the most popular logging system solution. +In this setup, + +* https://www.elastic.co/elasticsearch[Elasticsearch] is used for log storage +* https://www.elastic.co/logstash[Logstash] is used for transformation and ingestion to Elasticsearch from multiple sources (file, socket, etc.) +* https://www.elastic.co/kibana[Kibana] is used as a web-based UI to query Elasticsearch + +To begin with, JSON is the de facto messaging format used across the entire https://www.elastic.co/platform[Elastic platform]. +Hence, as stated earlier, <>, i.e., xref:manual/json-template-layout.adoc[]. + +[#logstash] +==== Logstash as a proxy + +While using ELK stack, there are numerous ways you can write your application logs to Elasticsearch. +<> while doing so. +In particular, *we recommend you to use Logstash* for this purpose. +In a modern software stack, the shape and accessibility of log varies greatly: some write to files (be it legacy or new systems), some doesn't provide a structured encoding, etc. +Logstash excels at ingesting from a wide range of sources, transforming them into the desired format, and writing them to Elasticsearch. + +While setting up Logstash, we recommend you to use https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html[TCP input plugin] in combination with https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html[Elasticsearch output plugin] to accept logs over a TCP socket and write them to Elasticsearch: + +.An example `logstash.conf` snippet for accepting JSON-encoded log events over TCP and writing them to Elasticsearch +[source,text] ---- input { - tcp { - port => 12345 - codec => "json" + tcp { //<1> + port => 12345 //<2> + codec => "json" //<3> } } output { - # Uncomment for debugging purposes. - # stdout { codec => rubydebug } + + # stdout { codec => rubydebug } //<4> # Modify the hosts value to reflect where Elasticsearch is installed. - elasticsearch { - hosts => ["http://localhost:9200/"] - index => "app-%{application}-%{+YYYY.MM.dd}" + elasticsearch { //<5> + hosts => ["http://localhost:9200/"] //<6> + index => "app-%{application}-%{+YYYYMMdd}" //<7> } + } ---- +<1> Using https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html[TCP input plugin] to accept logs from +<2> Setting the port Logstash will bind to accept TCP connections to 12345 – *adapt the `port`* to your setup +<3> Setting the payload encoding to JSON +<4> Uncomment this while troubleshooting your Logstash configuration +<5> Using https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html[Elasticsearch output plugin] to write logs to Elasticsearch +<6> The list of Elasticsearch hosts to connect to +<7> The name of the Elasticsearch index to write to -==== Logstash Configuration with JsonTemplateLayout +Refer to https://www.elastic.co/guide/en/logstash/current/configuration.html[the official documentation] for details on configuring a Logstash pipeline. -When using one of the GELF compliant formats, Logstash should be configured as: +For the sake of completeness, see the following Log4j configuration to write to the TCP socket Logstash accepts input from: -[source] +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/cloud/logstash/log4j2.xml[`log4j2.xml`] +[source,xml] ---- -gelf { - host => "localhost" - use_tcp => true - use_udp => false - port => 12222 - type => "gelf" - } - } - - filter { - translate { - field => "[level]" - destination => "[levelName]" - dictionary => { - "0" => "EMERG" - "1" => "ALERT" - "2" => "CRITICAL" - "3" => "ERROR" - "4" => "WARN" - "5" => "NOTICE" - "6" => "INFO" - "7" => "DEBUG" - } - } - } - - output { - elasticsearch { - hosts => ["http://localhost:9200/"] - index => "app-%{application}-%{+YYYY.MM.dd}" - } - } +include::example$cloud/logstash/log4j2.xml[tag=socketAppender,indent=0] ---- -==== Filebeat configuration with JsonTemplateLayout - -When using a JsonTemplateLayout that complies with ECS, Filebeat configuration is straightforward. +JSON:: ++ +.Snippet from an example {antora-examples-url}/cloud/logstash/log4j2.json[`log4j2.json`] +[source,json] +---- +include::example$cloud/logstash/log4j2.json[tag=socketAppender,indent=0] +---- -[source,yaml] +YAML:: ++ +.Snippet from an example {antora-examples-url}/cloud/logstash/log4j2.yaml[`log4j2.yaml`] +[source,xml] ---- -filebeat.inputs: -- type: log - enabled: true - json.keys_under_root: true - paths: - - /var/log/apps/*.log +include::example$cloud/logstash/log4j2.yaml[tag=socketAppender,indent=0] ---- -=== Kibana - -Using the EnhancedGelf template or the custom template, the above configurations will allow the message -field to contain a fully formatted log event. The ThreadContext attributes, custom fields, thread name, etc., will all be available as attributes on each log event that can be used for filtering. - -== Managing Logging Configuration - -Spring Boot provides another least common denominator approach to logging configuration. -It will let you set the log level for various Loggers within an application which can be dynamically updated via REST endpoints provided by Spring. -While this works in a lot of cases it does not support any of the more advanced filtering features of Log4j. -For example, since it cannot add or modify any Filters other than the log level of a logger, changes cannot be made to allow all log events for a specific user or customer to temporarily be logged (see xref:manual/filters.adoc#DynamicThresholdFilter[DynamicThresholdFilter] or xref:manual/filters.adoc#ThreadContextMapFilter[ThreadContextMapFilter]) or any other kinds of changes to filters. -Also, in a microservices, clustered environment it is quite likely that these changes will need to be propagated to multiple servers at the same time. -Trying to achieve this via REST calls could be difficult. - -Since its first release Log4j has supported reconfiguration through a file. -Beginning with Log4j 2.12.0 Log4j also supports accessing the configuration via HTTP(S) and monitoring the file for changes by using the HTTP "If-Modified-Since" header. -A patch has also been integrated into Spring Cloud Config starting with versions 2.0.3 and 2.1.1 for it to honor the If-Modified-Since header. -In addition, the log4j-spring-cloud-config project will listen for update events published by Spring Cloud Bus and then verify that the configuration file has been modified, so polling via HTTP is not required. - -Log4j also supports composite configurations. -A distributed application spread across microservices could share a common configuration file that could be used to control things like enabling debug logging for a specific user. - -While the standard Spring Boot REST endpoints to update logging will still work any changes made by those REST endpoints will be lost if Log4j reconfigures itself do to changes in the logging configuration file. - -Further information regarding integration of the log4j-spring-cloud-config-client can be found at Log4j Spring Cloud Config Client. - -=== Integration with Spring Boot - -Log4j integrates with Spring Boot in two ways: - -. A Spring Lookup can be used to access the Spring application configuration from Log4j configuration files. -. Log4j will access the Spring configuration when trying to resolve Log4j system properties. - -Both of these require that the log4j-spring-cloud-client jar is included in the application. - -=== Integration with Docker - -Applications within a Docker container that log using a Docker logging driver can include special attributes in the formatted log event as described at https://docs.docker.com/config/containers/logging/log_tags/[Customize Log Driver Output]. -Log4j provides similar functionality via the xref:manual/lookups.adoc#DockerLookup[Docker Lookup]. -More information on Log4j's Docker support may also be found at xref:log4j-docker.adoc[Log4j Docker]. - -=== Integration with Kubernetes - -Applications managed by Kubernetes can bypass the Docker/Kubernetes logging infrastructure and log directly to either a sidecar forwarder or a logging aggragator cluster while still including all the Kubernetes attributes by using the https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md[Kubernetes Log4j Lookup] maintained by the Fabric8 project. - -== Appender Performance - -The numbers in the table below represent how much time in seconds was required for the application to call `+logger.debug(...)+` 100,000 times. -These numbers only include the time taken to deliver to the specifically noted endpoint and many not include the actual time required before they are available for viewing. -All measurements were performed on a MacBook Pro with a 2.9GHz Intel Core I9 processor with 6 physical and 12 logical cores, 32GB of 2400 MHz DDR4 RAM, and 1TB of Apple SSD storage. -The VM used by Docker was managed by VMWare Fusion and had 4 CPUs and 2 GB of RAM. -These number should be used for relative performance comparisons as the results on another system may vary considerably. - -The sample application used can be found under the in https://github.com/apache/logging-log4j-samples[the Log4j samples source repository]. - -[options="header"] -|=========================================================================== -| Test | 1 Thread | 2 Threads | 4 Threads | 8 Threads -| Flume Avro | | | | -| - Batch Size 1 - JSON | 49.11 | 46.54 | 46.70 | 44.92 -| - Batch Size 1 - RFC5424 | 48.30 | 45.79 | 46.31 | 45.50 -| - Batch Size 100 - JSON | 6.33 | 3.87 | 3.57 | 3.84 -| - Batch Size 100 - RFC5424 | 6.08 | 3.69 | 3.22 | 3.11 -| - Batch Size 1000 - JSON | 4.83 | 3.20 | 3.02 | 2.11 -| - Batch Size 1000 - RFC5424 | 4.70 | 2.40 | 2.37 | 2.37 -| Flume Embedded | | | | -| - RFC5424 | 3.58 | 2.10 | 2.10 | 2.70 -| - JSON | 4.20 | 2.49 | 3.53 | 2.90 -| Kafka Local JSON | | | | -| - sendSync true | 58.46 | 38.55 | 19.59 | 19.01 -| - sendSync false | 9.8 | 10.8 | 12.23 | 11.36 -| Console | | | | -| - JSON / Kubernetes | 3.03 | 3.11 | 3.04 | 2.51 -| - JSON | 2.80 | 2.74 | 2.54 | 2.35 -| - Docker fluentd driver | 10.65 | 9.92 | 10.42 | 10.27 -| Rolling File | | | | -| - RFC5424 | 1.65 | 0.94 | 1.22 | 1.55 -| - JSON | 1.90 | 0.95 | 1.57 | 1.94 -| TCP - Fluent Bit - JSON | 2.34 | 2.167 | 1.67 | 2.50 -| Async Logger | | | | -| - TCP - Fluent Bit - JSON | 0.90 | 0.58 | 0.36 | 0.48 -| - Console - JSON | 0.83 | 0.57 | 0.55 | 0.61 -| - Flume Avro - 1000 - JSON | 0.76 | 0.37 | 0.45 | 0.68 -|=========================================================================== - -Notes: - -. Flume Avro - Buffering is controlled by the batch size. Each send is complete when the remote -acknowledges the batch was written to its channel. These number seem to indicate Flume Avro could -benefit from using a pool of RPCClients, at least for a batchSize of 1. -. Flume Embedded - This is essentially asynchronous as it writes to an in-memory buffer. It is -unclear why the performance isn't closer to the AsyncLogger results. -. Kafka was run in standalone mode on the same laptop as the application. See sendSync set to true -requires waiting for an ack from Kafka for each log event. -. Console - System.out is redirected to a file by Docker. Testing shows that it would be much -slower if it was writing to the terminal screen. -. Rolling File - Test uses the default buffer size of 8K. -. TCP to Fluent Bit - The Socket Appender uses a default buffer size of 8K. -. Async Loggers - These all write to a circular buffer and return to the application. The actual -I/O will take place on a separate thread. If writing the events is performed more slowly than -events are being created eventually the buffer will fill up and logging will be performed at -the same pace that log events are written. - -== Logging Recommendations - -. Use asynchronous logging unless guaranteed delivery is absolutely required. As -the performance numbers show, so long as the volume of logging is not high enough to fill up the -circular buffer the overhead of logging will almost be unnoticeable to the application. -. If overall performance is a consideration or you require multiline events such as stack traces -be processed properly then log via TCP to a companion container that acts as a log forwarder or directly -to a log aggregator as shown above in <>. Use the -Log4j Docker Lookup to add the container information to each log event. -. Whenever guaranteed delivery is required use Flume Avro with a batch size of 1 or another Appender such -as the Kafka Appender with syncSend set to true that only return control after the downstream agent -acknowledges receipt of the event. Beware that using an Appender that writes each event individually should -be kept to a minimum since it is much slower than sending buffered events. -. Logging to files within the container is discouraged. Doing so requires that a volume be declared in -the Docker configuration and that the file be tailed by a log forwarder. However, it performs -better than logging to the standard output stream. If logging via TCP is not an option and -proper multiline handling is required then consider this option. +Properties:: ++ +.Snippet from an example {antora-examples-url}/cloud/logstash/log4j2.properties[`log4j2.properties`] +[source,xml] +---- +include::example$cloud/logstash/log4j2.properties[tag=socketAppender,indent=0] +---- +==== + +[NOTE] +==== +<>. +If this is a necessity in your logging setup for some reason, we recommend you to check https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html[Filebeat]. +It is a data shipper agent for forwarding logs to Logstash, Elasticsearch, etc. +==== diff --git a/src/site/antora/modules/ROOT/pages/manual/configuration.adoc b/src/site/antora/modules/ROOT/pages/manual/configuration.adoc index d5140872a34..a271a7719f0 100644 --- a/src/site/antora/modules/ROOT/pages/manual/configuration.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/configuration.adoc @@ -50,10 +50,13 @@ The order in which an extension will be searched for first depends on the order See <> for details. If no configuration file is found, Log4j Core uses the link:../javadoc/log4j-core/org/apache/logging/log4j/core/config/DefaultConfiguration.html[`DefaultConfiguration`] and the xref:manual/status-logger.adoc[status logger] prints a warning. -The default configuration prints all messages less specific than xref:manual/systemproperties.adoc#log4j2.level[`log4j2.level`] to the console. +The default configuration prints all messages less severe than xref:manual/systemproperties.adoc#log4j2.level[`log4j2.level`] to the console. -You can override the location of the configuration file using the xref:manual/systemproperties.adoc#log4j2.configurationFile[the `log4j2.configurationFile` system property]. -In such a case, Log4j Core will guess the configuration file format from the provided file name, or use the default configuration factory if the extension is unknown. +You can override the location of the configuration file +using the xref:manual/systemproperties.adoc#log4j2.configurationFile[`log4j2.configurationFile` +system property]. +In such a case, Log4j Core will guess the configuration file format from the provided file name, +or use the default configuration factory if the extension is unknown. There are certain *best-practices* we strongly recommend you to adapt in your Log4j configuration: @@ -287,7 +290,7 @@ include::example$manual/configuration/main-elements.properties[lines=17..-1] <2> Configures a file appender named `MAIN` with a JSON template layout. <3> Configures a file appender named `DEBUG_LOG` with a pattern layout. <4> Configures the root logger at level `INFO` and connects it to the `CONSOLE` and `MAIN` appenders. -The `CONSOLE` appender will only log messages less specific than `WARN`. +The `CONSOLE` appender will only log messages at least as severe as `WARN`. <5> Configures a logger named `"org.example"` at level `DEBUG` and connects it to the `DEBUG_LOG` appender. The logger is configured to forward messages to its parent (the root appender). @@ -334,6 +337,7 @@ Log4j allows the configuration of custom log-level names. + See xref:manual/customloglevels.adoc[Custom log level configuration] for details. +[#global-filters] Filters:: + Users can add Components to loggers, appender references, appenders, or the global configuration object to provide additional filtering of log events. @@ -566,8 +570,8 @@ xref:manual/architecture.adoc#logger-hierarchy[parent logger], for `Logger` and `AsyncLogger`. |=== -Specifies the level threshold that a log event must have to be logged. -Log events that are more specific than this setting will be filtered out. +It specifies the level threshold that a log event must have to be logged. +Log events that are less severe than this setting will be filtered out. See also xref:manual/filters.adoc#filters[Filters] if you require additional filtering. @@ -687,8 +691,8 @@ Specifies the name of the appender to use. | Type | link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] |=== -Specifies the level threshold that a log event must have to be logged. -Log events that are more specific than this setting will be filtered out. +It specifies the level threshold that a log event must have to be logged. +Log events that are less severe than this setting will be filtered out. [id=appenderrefs-elements-filters] === Filters diff --git a/src/site/antora/modules/ROOT/pages/manual/customloglevels.adoc b/src/site/antora/modules/ROOT/pages/manual/customloglevels.adoc index 67748f824b9..e23d1c62c60 100644 --- a/src/site/antora/modules/ROOT/pages/manual/customloglevels.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/customloglevels.adoc @@ -63,7 +63,7 @@ A level is composed of a case-sensitive name and a *priority* (of type `int`), w Priority can be used in several contexts to express a filtering capability, for instance: * `WARN` is _less severe_ than `ERROR` -* `WARN` is _more specific_ than `ERROR` +* `WARN` is _less specific_ than `ERROR` The entry point to log levels are through link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`]. Predefined levels are available for Log4j API integrators through link:../javadoc/log4j-api/org/apache/logging/log4j/spi/StandardLevel.html[`StandardLevel`]. diff --git a/src/site/antora/modules/ROOT/pages/manual/filters.adoc b/src/site/antora/modules/ROOT/pages/manual/filters.adoc index ad8eaeff125..228f81d8db9 100644 --- a/src/site/antora/modules/ROOT/pages/manual/filters.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/filters.adoc @@ -18,1036 +18,1448 @@ [id=filters] = Filters -Log4j supports filtering of log events at each level of the logging pipeline using two features: +Filters are Log4j plugins that evaluate the parameters of a logging call or a log event and return one of three results: -* the `level` attributes that can be set on loggers and appender references, -* filter components that can be attached to loggers, appenders, appender references or the global configuration object. +ACCEPT:: The filter accepts the log event. +This effectively causes other filters in the same filtering stage to be skipped. -Filters evaluate the parameters of a logging call (context-wide filter) or a log event and return one of three results: +DENY:: The filter drops the log event. -ACCEPT:: The log event is accepted by the filter and goes to the next stage of the logging pipeline, +NEUTRAL:: Log4j behaves as if the filter was not present. +It is evaluated by the next filter in the filter chain. -DENY:: The log event is unconditionally dropped, +Filters can be used at each level of the +xref:manual/architecture.adoc#architecture-diagram[logging pipeline]: -NEUTRAL:: Log4j behaves as if the filter was not present. +* the global configuration element can contain a xref:manual/configuration.adoc#global-filters[global filter]. +* each xref:manual/configuration.adoc#configuring-loggers[logger] configuration element can contain a xref:manual/configuration.adoc#logger-elements-filters[logger filter]. +* each xref:manual/configuration.adoc#configuring-appenderrefs[appender reference] configuration element can contain an xref:manual/configuration.adoc#appenderrefs-elements-filters[appender reference filter]. +* each xref:manual/appenders.adoc[appender] configuration element can contain an xref:manual/appenders.adoc[appender filter]. + +Additionally, the following configuration attributes take part in the filtering process: + +* the xref:manual/configuration.adoc#logger-attributes-level[`level` attribute] of logger configuration elements. +* the xref:manual/configuration.adoc#appenderref-attributes-level[`level` attribute] of appender reference configuration elements. + +[#filtering-process] +== Filtering process + +Due to the interaction of many elements, +the filtering process in Log4j is quite complex and can be divided in four stages: + +. <> +. <> +. <> +. <> + +[IMPORTANT] +==== +For performance reasons, log events should be filtered at the earliest possible stage. +This reduces the cost of disabled log events: +e.g., log event creation, population of context data, formatting, transfer through an asynchronous barrier. +==== + +[#logger-stage] +=== 1. `Logger` stage + +[plantuml] +.... +@startuml +start +group Logger + +:A Logger method; + +switch (Apply global filter) +case (DENY) + #pink:Discard; + detach +case (ACCEPT) +case (NEUTRAL) + if (Is less severe than logger level?) then (yes) + #pink:Discard; + detach + else (no) + ' The label improves spacing + label a1 + endif +endswitch +end group +:Create LogEvent; +stop +.... + +The parameters of the logging call are passed to the global filter. +If the global filter returns: + +DENY:: The log message is immediately discarded. +NEUTRAL:: If the level of the log message is less severe than the configured logger threshold, the message is discarded. +Otherwise, a +link:../javadoc/log4j-core/org/apache/logging/log4j/core/LogEvent.html[`LogEvent`] is created and processing continues. +ACCEPT:: A `LogEvent` is created and processing continues in the next stage. + +[IMPORTANT] +==== +This is the only stage, which differentiates between an `ACCEPT` and `NEUTRAL` filter result. +==== + +[TIP] +==== +Filtering logging calls at this stage provides the best performance: + +* this stage precedes the creation of log events, therefore operations like the +xref:manual/thread-context.adoc[injection of context data], +xref:manual/layouts.adoc#LocationInformation[computation of location information] +will not be performed for disabled log statements. +* this stage precedes the asynchronous calls performed by either +xref:manual/async.adoc[asynchronous loggers] +or +xref:manual/appenders.adoc#AsyncAppender[asynchronous appenders]. +==== + +[#logger-config-stage] +=== 2. `LoggerConfig` stage + +[plantuml] +.... +@startuml +start +:LogEvent; + +group LoggerConfig +repeat + +:LoggerConfig#log(); + +if (Apply logger filter) then (DENY) + #pink:Discard; + detach +else (not DENY) + ' The label improves spacing + label a1 +endif +repeat while (Go to parent logger?) is (yes) +-> no; +end group +stop +@enduml +.... + +In this stage, log events are evaluated by all the +xref:manual/configuration.adoc#logger-elements-filters[logger filters] +that stand on the path from the logger to an appender. +Due to the +xref:manual/configuration.adoc#logger-attributes-additivity[additivity of logger configurations], +this means that a log event must also pass the filters of all the parent loggers, +until it reaches the logger that references the chosen appender. + +[#appender-control-stage] +=== 3. `AppenderControl` stage + +[plantuml] +.... +@startuml +start +:LogEvent; + +group AppenderControl + +:AppenderControl#callAppender(); + +if (Is less severe then appender reference level?) then (yes) + #pink:Discard; + detach +else (no) + ' The label improves spacing + label a2 +endif +if (Apply appender reference filter) then (DENY) + #pink:Discard; + detach +else (not DENY) + ' The label improves spacing + label a1 +endif +end group +stop +@enduml +.... + +To pass this stage, log events must satisfy both conditions: + +* the log event must be at least as severe as the +xref:manual/configuration.adoc#appenderref-attributes-level[`level` attribute] +of the appender reference. +* the xref:manual/configuration.adoc#appenderrefs-elements-filters[appender reference filter] must return `ACCEPT` or `NEUTRAL`, + +[#appender-stage] +=== 4. `Appender` stage (optional) + +[plantuml] +.... +@startuml +start +:LogEvent; + +group Appender + +if (Apply appender filter) then (DENY) + #pink:Discard; + detach +else (not DENY) + ' The label improves spacing + label a1 +endif +end group +#palegreen:Appender#append(); +@enduml +.... + +If the appender implements +link:../javadoc/log4j-core/org/apache/logging/log4j/core/filter/Filterable.html[`Filterable`] +an additional filtering stage is available. +When log events reach such an appender, +the filter attached to an appender is evaluated and if the result is `DENY`, +the log event is discarded. + +All standard appenders implement `Filterable`. + +[NOTE] +==== +Some appenders like the +xref:manual/appenders.adoc#AsyncAppender[asynchronous appender] +use appender references to transfer log events to other appenders. +In such a case, the filtering process goes back to the <>. +==== + +[TIP] +==== +Users migrating from Log4j 1 often replace the `threshold` property of a Log4j 1 appender with a <> on the equivalent Log4j 2 appender. + +Using the `level` property of appender references will give a better performance. +==== + +[WARNING] +==== +Configuring filters at this stage is a measure of last resort, +since it adds a large overhead to disabled log events. +You should rather configure the filtering in one of the previous stages. +==== + +[#example-configuration-file] +=== Example configuration file -To decide whether a log event from a certain logger is delivered to a specific appender, the following procedure is followed: +The following example configuration file employs filters at all possible stages to explain their evaluation order: [tabs] ==== XML:: + -.Snippet from an example {antora-examples-url}/manual/filters/filters.xml[`log4j2.xml`] [source,xml] ---- -include::example$manual/filters/filters.xml[lines=23..41,indent=0] +include::example$manual/filters/filters.xml[lines=1;18..-1] ---- JSON:: + -.Snippet from an example {antora-examples-url}/manual/filters/filters.json[`log4j2.json`] [source,json] ---- -include::example$manual/filters/filters.json[lines=3..35,indent=0] +include::example$manual/filters/filters.json[] ---- YAML:: + -.Snippet from an example {antora-examples-url}/manual/filters/filters.yaml[`log4j2.yaml`] [source,yaml] ---- -include::example$manual/filters/filters.yaml[lines=18..-1] +include::example$manual/filters/filters.yaml[lines=17..-1] ---- Properties:: + -.Snippet from an example {antora-examples-url}/manual/filters/filters.properties[`log4j2.properties`] [source,properties] ---- include::example$manual/filters/filters.properties[lines=17..-1] ---- ==== -<1> First the context-wide filter is consulted. -If it returns `ACCEPT` the log message goes directly to point 3. -<2> Then Log4j checks the message against the configured logger level. -<3> The filter configured on a logger is applied next. -If the logger is additive, the filter on the parent logger is applied recursively until we end up on the logger that references the given appender. -<4> Next comes the turn of the filter configured on an appender reference, -<5> followed by a level check against the configured level of the reference. -<6> The process ends with the filter attached to an appender. +<1> Global filter +<2> Logger `level` attribute. This setting is **ignored** unless the global filter returns `NEUTRAL`. +<3> Filter of the `org.example` logger +<4> Filter of the root logger (it is the parent of the `org.example` logger) +<5> Appender reference `level` attribute +<6> Filter of the appender reference +<7> Filter of the appender -[WARNING] -==== -For performance reasons, log events should be filtered as soon as possible in the logging pipeline. -This reduces the costs (formatting, transfer through an asynchronous barrier) of disabled log events. -==== +[#common-configuration] +== Common configuration -[TIP] -==== -Users migrating from Log4j 1 often replace the `threshold` property of a Log4j 1 appender with a <> on the equivalent Log4j 2 appender. +[#common-configuration-attributes] +=== Common configuration attributes -Using the `level` property of appender references will give a better performance. -==== +The default behavior of filters is in line with the `filter()` methods of functional interfaces, such as +https://docs.oracle.com/javase/{java-target-version}/docs/api/java/util/Optional.html#filter-java.util.function.Predicate-[`Optional.filter()`] +or +https://docs.oracle.com/javase/{java-target-version}/docs/api/java/util/stream/Stream.html#filter-java.util.function.Predicate-[`Stream.filter()`]: +filters pass matching events to the next filter and drop those that do not match. + +To allow for a larger spectrum of behaviors, +all standard filters, except `CompositeFilter` and `DenyAllFilter`, accept the following configuration attributes: + +.Common filter configuration attributes +[cols="1m,1,1,4"] +|=== +|Attribute |Type | Default value |Description + +| [[onMatch]]onMatch +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.Result.html[`Result`] +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.Result.html#NEUTRAL[`NEUTRAL`] +| Result returned if the condition matches. + +| [[onMismatch]]onMismatch +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.Result.html[`Result`] +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.Result.html#DENY[`DENY`] +| Result returned if the condition does not match. + +|=== + +[#CompositeFilter] +=== Composing filters + +Filters usually test for a single condition. +To express a more complex filtering logic, Log4j provides a +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-CompositeFilter[`Filters`] +plugin. +This plugin can contain a sequence of filters and has no other configuration option. + +The `Filters` plugin sequentially evaluates each sub-filter and: + +* if the sub-filter returns `ACCEPT` (resp. `DENY`), the `Filters` plugin returns `ACCEPT` (resp. `DENY`). +* if the sub-filter return `NEUTRAL`, the `Filters` plugin evaluates the next sub-filter in the chain. +* if the last sub-filter returns `NEUTRAL`, the `Filters` plugin returns `NEUTRAL`. + +The `Filters` plugin together with the ternary logic of filters, can be used to express most boolean operators. +In the following examples `A` and `B` are two filters. + +`NOT A`:: +You can invert the functionality of filter `A` by swapping the `onMatch` and `onMismatch`: ++ +[source,xml] +---- + +---- + +`A AND B`:: +To select the events that match both `A` and `B` you can use: ++ +[source,xml] +---- + + + + +---- + +`A OR B`:: +To select the events that match `A` or `B` we can replace `NEUTRAL` with `ACCEPT` in the `onMatch` attribute: ++ +[source,xml] +---- + + + + +---- + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-CompositeFilter[📖 Plugin reference for `Filters`] [#collection] == Collection -Log4j bundles several predefined filters to assist in several common deployment use cases. -Following sections explain all these in detail. +Log4j Core provides the following filters out-of-the-box. + +[#timestamp-filters] +=== Timestamp filters + +Timestamp filters use the timestamp of log events to decide whether to accept them or not. [#BurstFilter] -=== BurstFilter +==== `BurstFilter` -The BurstFilter provides a mechanism to control the rate at which LogEvents are processed by silently discarding events after the maximum limit has been reached. +The `BurstFilter` limits the rate of log events. +The rate limit is only applied to log events less severe than a configured log level. -.Burst Filter Parameters -[cols="1m,1,4"] +Besides the <>, +the `BurstFilter` supports the following parameters: + +.`BurstFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description - -|level -|String -|Level of messages to be filtered. Anything at or below -this level will be filtered out if `maxBurst` has been exceeded. The -default is WARN meaning any messages that are higher than warn will be -logged regardless of the size of a burst. - -|rate -|float -|The average number of events per second to allow. - -|maxBurst -|integer -|The maximum number of events that can occur before -events are filtered for exceeding the average rate. The default is 10 -times the rate. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +|Attribute | Type | Default value | Description + +| level +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#WARN[`WARN`] +| The rate limit only applies to log events less severe than this level. +Events at least as severe as this level will always match. + +| rate +| `float` +| `10` +| The average number of events per second to allow. + +| maxBurst +| `long` +| `10 × rate` +| The maximum number of events that can be logged at once, without incurring in rate limiting. + |=== -A configuration containing the BurstFilter might look like: +[NOTE] +==== +The `BurstFilter` uses the _sliding window log_ algorithm with a window `window` of `maxBurst / rate` seconds. -[source,xml] +The filter maintains a list of recently logged events. +If in the interval of time of duration `window` preceding the current log event, +more than `maxBurst` events have already been logged, rate limiting is applied. + +To control the size of the log files only the `rate` attribute needs to be taken into account. +The `maxBurst` attribute controls the temporal spacing between log events: +lower values of `maxBurst` will give more evenly spaced log events, +while higher values will allow for peaks of activity followed by an absence of log events. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-BurstFilter[📖 Plugin reference for `BurstFilter`] + +[#TimeFilter] +==== `TimeFilter` + +The `TimeFilter` only matches log events emitted during a certain time of the day. + +Besides the <>, +the `TimeFilter` supports the following parameters: + +.`TimeFilter` -- configuration attributes +[cols="1m,2,1,4"] +|=== +| Attribute | Type | Default value | Description + +| start +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/LocalTime.html[`LocalTime`] in `HH:mm:ss` format +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/LocalTime.html#MIN[`LocalTime.MIN`] +| The beginning of the time slot. + +| start +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/LocalTime.html[`LocalTime`] in `HH:mm:ss` format +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/LocalTime.html#MAX[`LocalTime.MAX`] +| The end of the time slot. + +| timezone +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/ZoneId.html[`ZoneId`] +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/time/ZoneId.html#systemDefault--[`ZoneId.systemDefault()`] +| The timezone to use when comparing `start` and `end` to the event timestamp. + +|=== + +As a simple application of this filter, +if you want to forward messages to your console during work hours and to your e-mail account after work hours, +you can use a configuration snippet like: + +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/TimeFilter.xml[`log4j2.xml`] +[source,xml,indent=0] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - +include::example$manual/filters/TimeFilter.xml[tag=filter] ---- -[#CompositeFilter] -=== CompositeFilter - -The CompositeFilter provides a way to specify more than one filter. -It is added to the configuration as a filter element and contains other filters to be evaluated. -The filter element accepts no parameters. +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/TimeFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/TimeFilter.json[tag=filter] +---- -A configuration containing the CompositeFilter might look like: +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/TimeFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/TimeFilter.yaml[tag=filter] +---- -[source,xml] +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/TimeFilter.properties[`log4j2.properties`] +[source,properties,indent=0] ---- - - - - - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - - - - +include::example$manual/filters/TimeFilter.properties[tag=filter] ---- +==== -[#DynamicThresholdFilter] -=== DynamicThresholdFilter +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-TimeFilter[📖 Plugin reference for `TimeFilter`] -The DynamicThresholdFilter allows filtering by log level based on specific attributes. -For example, if the user's loginId is being captured in the ThreadContext Map then it is possible to enable debug logging for only that user. -If the log event does not contain the specified ThreadContext item NEUTRAL will be returned. +[#level-filters] +=== Level filters -.Dynamic Threshold Filter Parameters -[cols="1m,1,4"] +The following filters allow you to filter log events based on their xref:manual/customloglevels.adoc[levels]. + +[#LevelMatchFilter] +==== `LevelMatchFilter` + +The `LevelMatchFilter` matches log events that have exactly a certain log level. + +Besides the <>, +the `LevelMatchFilter` supports the following parameter: + +.`LevelMatchFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description - -|key -|String -|The name of the item in the ThreadContext Map to compare. - -|defaultThreshold -|String -|Level of messages to be filtered. The default -threshold only applies if the log event contains the specified -ThreadContext Map item and its value do not match any key in the -key/value pairs. - -|keyValuePair -|KeyValuePair[] -|One or more KeyValuePair elements that -define the matching value for the key and the Level to evaluate when the -key matches. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +| Attribute | Type | Default value | Description + +| level +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#ERROR[`ERROR`] +| The filter only matches log events of this level. + |=== -Here is a sample configuration containing the DynamicThresholdFilter: +[TIP] +==== +If you wish to use a different log file for each log level, you should also use a +xref:manual/appenders.adoc#RoutingAppender[`Routing` appender] together with the +xref:manual/lookups.adoc#EventLookup[`${event:Level}` lookup]. +Such a solution will ensure that: -[source,xml] ----- - - - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- +* you don't forget any log level (Log4j supports xref:manual/customloglevels.adoc[custom levels]). +* you don't need to configure an appender for each level separately. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-LevelMatchFilter[📖 Plugin reference for `LevelMatchFilter`] [#LevelRangeFilter] -=== LevelRangeFilter +==== `LevelRangeFilter` + +The `LevelRangeFilter` matches log events with a log level within a configured range. + +Besides the <>, +the `LevelRangeFilter` supports the following parameter: + +.`LevelRangeFilter` -- configuration attributes +[cols="1m,1,1,4"] +|=== +| Attribute | Type | Default value | Description + +| minLevel +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#OFF[`OFF`] +| The filter only matches log events at most as severe as this level. -`LevelRangeFilter` allows filtering against a level range, where levels get compared by their associated integral values; `OFF` has an integral value of 0, `FATAL` 100, `ERROR` 200, and so on. +| maxLevel +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#ALL[`ALL`] +| The filter only matches log events at least as severe as this level. -.`LevelRangeFilter` parameters -[cols="1m,1m,4"] |=== -|Parameter Name |Type |Description -|minLevel -|Level -|the minimum level threshold (defaults to `OFF`, which has an integral value of 0) +[TIP] +==== +Make sure not to invert the bounds of the range. +Starting from the smallest level, xref:manual/customloglevels.adoc[the Log4j API defines]: `OFF`, `FATAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE` and `ALL`. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-LevelRangeFilter[📖 Plugin reference for `LevelRangeFilter`] + +[#ThresholdFilter] +==== `ThresholdFilter` -|maxLevel -|Level -|the maximum level threshold (defaults to `ALL`, which has an integral value of `Integer.MAX_VALUE`) +The `ThresholdFilter` matches log events at least as severe as a configured level. -|onMatch -|Filter.Result -|the result to return on a match, where allowed values are `ACCEPT`, `DENY`, or `NEUTRAL` (default) +Besides the <>, +the `ThresholdFilter` supports the following parameter: -|onMismatch -|Filter.Result -|the result to return on a mismatch, where allowed values are `ACCEPT`, `DENY` (default), or `NEUTRAL` +.`ThresholdFilter`—configuration attributes +[cols="1m,1,1,4"] |=== +| Attribute | Type | Default value | Description -In the following example configuration, a `LevelRangeFilter` is configured with `maxLevel` set to `INFO`. -The filter will return `onMismatch` result (i.e., `DENY`, the default) for log events of level with higher integral values than `INFO`; i.e., `DEBUG`, `TRACE`, etc. +| level +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#OFF[`OFF`] +| The filter only matches log events at least as severe as this level. -.Example configuration using `LevelRangeFilter` -[source,xml] ----- - - - - - - - - - - - - - - ----- +|=== -[#MapFilter] -=== MapFilter +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-ThresholdFilter[📖 Plugin reference for `ThresholdFilter`] + +[#DynamicThresholdFilter] +==== `DynamicThresholdFilter` -The MapFilter allows filtering against data elements that are in a MapMessage. +The `DynamicThresholdFilter` is a variant of <>, +which uses a different threshold for each log event. +The effective threshold to use is determined by querying the +xref:manual/thread-context.adoc[context data] +of the log event. +For each log event: -.Map Filter Parameters +. The filter retrieves the value of `key` in the context data map. +. The filter checks the list of nested +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +configuration elements to decide which level to apply. + +Besides the <>, +the `DynamicThresholdFilter` supports the following parameters: + +.`DynamicThresholdFilter`—configuration attributes +[cols="1m,1,1,4"] +|=== +| Attribute | Type | Default value | Description + +| key +| `String` +| +| The key to a value in the context map of the log event. + +**Required** + +| defaultThreshold +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html[`Level`] +| link:../javadoc/log4j-api/org/apache/logging/log4j/Level.html#ERROR[`ERROR`] +| Threshold to apply to log events that don't have a corresponding `KeyValuePair`. + +|=== + +.`DynamicThresholdFilter`—nested elements [cols="1m,1,4"] |=== -|Parameter Name |Type |Description - -|keyValuePair -|KeyValuePair[] -|One or more KeyValuePair elements that -define the key in the map and the value to match. If the same key is -specified more than once then the check for that key will automatically -be an "or" since a Map can only contain a single value. - -|operator -|String -|If the operator is "or" then a match by any one of -the key/value pairs will be considered to be a match, otherwise all the -key/value pairs must match. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +| Type | Multiplicity | Description + +| xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +| One or more +| Associates a log level with the context map value associated with `key`. + |=== -As in this configuration, the MapFilter can be used to log particular events: +For example, if `loginId` contains the login of the current user, +you can use this configuration to apply different thresholds to different users: -[source,xml] +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/DynamicThresholdFilter.xml[`log4j2.xml`] +[source,xml,indent=0] +---- +include::example$manual/filters/DynamicThresholdFilter.xml[tag=filter] ---- - - - - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- - -This sample configuration will exhibit the same behavior as the preceding example since the only logger configured is the root. -[source,xml] +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/DynamicThresholdFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/DynamicThresholdFilter.json[tag=filter] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - - - - - - ----- - -This third sample configuration will exhibit the same behavior as the preceding examples since the only logger configured is the root and the root is only configured with a single appender reference. -[source,xml] +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/DynamicThresholdFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - - - - - - +include::example$manual/filters/DynamicThresholdFilter.yaml[tag=filter] ---- +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/DynamicThresholdFilter.properties[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/DynamicThresholdFilter.properties[tag=filter] +---- +==== + +<1> If the `loginId` is `alice` a threshold level of `DEBUG` will be used. +<2> If the `loginId` is `bob` a threshold level of `INFO` will be used. +<3> For all the other values of `loginId` a threshold level of `ERROR` will be used. + +[TIP] +==== +You can use Log4j Core's +xref:manual/configuration.adoc#configuration-attribute-monitorInterval[automatic reconfiguration feature] +to modify the ``KeyValuePair``s without restarting your application. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-DynamicThresholdFilter[📖 Plugin reference for `DynamicThresholdFilter`] + +[#marker-filters] +=== Marker filters + +The following filters use the +xref:manual/markers.adoc[log event marker] +to filter log events. + +[#NoMarkerFilter] +==== `NoMarkerFilter` + +The `NoMarkerFilter` matches log events that do not have any markers. + +This filter does not have any additional configuration attribute, +except the <>. + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-NoMarkerFilter[📖 Plugin reference for `NoMarkerFilter`] + [#MarkerFilter] -=== MarkerFilter +==== `MarkerFilter` -The MarkerFilter compares the configured Marker value against the Marker that is included in the LogEvent. -A match occurs when the Marker name matches either the Log Event's Marker or one of its parents. +The `MarkerFilter` matches log events marked with a specific marker or **any** of its descendants. -.Marker Filter Parameters -[cols="1m,1,4"] +Besides the <>, +the `MarkerFilter` supports the following parameter: + +.`MarkerFilter`—configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description +|Attribute | Type | Default value | Description -|marker -|String -|The name of the Marker to compare. +| marker +| link:../javadoc/log4j-api/org/apache/logging/log4j/Marker.html[`Marker`] +| +| The filter only matches log events of marker with the given marker or one of its descendants. -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. +**Required** -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. |=== -A sample configuration that only allows the event to be written by the appender if the Marker matches: +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-MarkerFilter[📖 Plugin reference for `MarkerFilter`] -[source,xml] ----- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- +[#message-filters] +=== Message filters -[#MutableThreadContextMapFilter] -=== MutableThreadContextMapFilter +Message filters allow filtering log events based on the +xref:manual/messages.adoc[`Message`] +contained in the log event. -The MutableThreadContextMapFilter or MutableContextMapFilter allows filtering against data elements that are in the current context. -By default, this is the ThreadContext Map. -The values to compare are defined externally and can be periodically polled for changes. +include::partial$manual/log-event.adoc[] -.Mutable Context Map Filter Parameters -[cols="1m,1,4"] +[#RegexFilter] +==== `RegexFilter` + +The `RegexFilter` matches a regular expression against either the result of +link:../javadoc/log4j-api/org/apache/logging/log4j/message/Message.html#getFormat()[`Message.getFormat()`] +or +link:../javadoc/log4j-api/org/apache/logging/log4j/message/Message.html#getFormat()[`Message.getFormattedMessage()`]. +It can be used with all kinds of `Message` implementations. + +Besides the <>, +the `RegexFilter` supports the following parameters: + +.`RegexFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description - -|configLocation -|String -|A file path or URI that points to the configuration. See below for a sample configuration. - -|operator -|String -|If the operator is "or" then a match by any one of -the key/value pairs will be considered to be a match, otherwise all the -key/value pairs must match. - -|pollInterval -|int -|The number of seconds to wait before checking to see if the configuration has been modified. When using HTTP or HTTPS the server must support the If-Modified-Since header and return a Last-Modified header containing the date and time the file was last modified. Note that by default only the https, file, and jar protocols are allowed. Support for other protocols can be enabled by specifying them in the `log4j2.Configuration.allowedProtocols` system property - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +|Attribute | Type | Default value | Description + +| regex +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/util/regex/Pattern.html[`Pattern`] +| +| The regular expression used to match log messages. + +**Required** + +| useRawMsg +| `boolean` +| `false` +| If `true` the result of +link:../javadoc/log4j-api/org/apache/logging/log4j/message/Message.html#getFormat()[`Message.getFormat()`] +will be used. +Otherwise, +link:../javadoc/log4j-api/org/apache/logging/log4j/message/Message.html#getFormat()[`Message.getFormattedMessage()`] +is used. + |=== -A sample configuration that only allows the event to be written by the appender if the Marker matches: +[WARNING] +==== +* This filter only matches if the **whole** log message matches the regular expression. -[source,xml] ----- - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - ----- - -The configuration file supplied to the filter should look similar to: +* Setting `useRawMsg` to `false` decreases performance, since it forces the formatting of all log messages, including the disabled ones. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-RegexFilter[📖 Plugin reference for `RegexFilter`] + +[#StringMatchFilter] +==== `StringMatchFilter` + +The `StringMatchFilter` matches a log event, if its message contains the given string. + +Besides the <>, +the `StringMatchFilter` supports the following parameters: + +.`StringMatchFilter`—configuration attributes +[cols="1m,1,1,4"] +|=== +| Attribute | Type | Default value | Description + +| text +| `String` +| +| The text to look for. + +**Required** + +|=== + +[WARNING] +==== +This filter decreases performance, since it forces the formatting of all log messages, including the disabled ones. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-StringMatchFilter[📖 Plugin reference for `StringMatchFilter`] + +[#map-filters] +=== Map filters + +The following filters match log events based on the content of one of these map structures: + +* The map contained in a +xref:manual/messages.adoc#MapMessage[`MapMessage`] object. +See +link:../javadoc/log4j-api/org/apache/logging/log4j/message/MapMessage.html#getData()[`MapMessage.getData()`] +for details. +* The context data map contained in a +link:../javadoc/log4j-core/org/apache/logging/log4j/core/LogEvent.html[`LogEvent`]. +See +link:../javadoc/log4j-core/org/apache/logging/log4j/core/LogEvent.html#getContextData()[`LogEvent.getContextData()`] +for details. + +[#configuration-map] +==== Configuration map + +These filters are configured with a configuration map of type `Map`, +which, +depending on the filter, +is encoded as either JSON: [source,json] ---- -{ - "configs": { - "loginId": ["rgoers@apache.org", "logging@apache.org"], - "accountNumber": ["30510263"] - } -} +include::example$manual/filters/configs.json[] ---- -[#NoMarkerFilter] -=== NoMarkerFilter +or as a sequence of +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +plugins: + +[source,xml,indent=0] +---- +include::example$manual/filters/ContextMapFilter.xml[tag=kvp] +---- -The NoMarkerFilter checks that there is no marker included in the LogEvent. -A match occurs when there is no marker in the Log Event. +The configuration map associates to each key a list of allowed values for that key. +In the example above the allowed values for the `loginId` key are either `alice` or `bob`. +The only allowed value for the `clientId` key is `1234`. -.No Marker Filter Parameters -[cols="1m,1,3"] +The map filters can work in two matching modes: + +[[matching-mode-and]] +AND:: +A map structure matches +if the value associated with **each** key +that appears in the configuration map is one of the allowed values. + +[[matching-mode-or]] +OR:: +A map structure matches +if the value associated with **at least one** key +that appears in the configuration map is one of the allowed values. + +[#MapFilter] +==== `MapFilter` + +The `MapFilter` allows filtering based on the contents of all +xref:manual/messages.adoc#collection-structured[structured ``Message``s]. + +This filter encodes the <> introduced above as a list of +`KeyValuePair` elements. + +Besides the <>, +the `MapFilter` supports the following parameters: + +.`MapFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description +| Attribute | Type | Default value | Description -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. +| operator +| _enumeration_ +| `AND` +a| Determines the matching mode of the filter. +Can be: + +* <> +* <> -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. |=== -A sample configuration that only allows the event to be written by the appender if no marker is there: +.`MapFilter` -- nested elements +[cols="1m,1,4"] +|=== +| Type | Multiplicity | Description -[source,xml] +| xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +| One or more +| Adds a value as allowed value for a key. +See <> for more details. + +|=== + +For example, +if you want to filter all ``MapMessage``s +that have an `eventType` key with value `authentication` **and** an `eventId` key with value either `login` **or** `logout`, +you can use the following configuration: + +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MapFilter.xml[`log4j2.xml`] +[source,xml,indent=0] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - +include::example$manual/filters/MapFilter.xml[tag=filter] ---- -[#RegexFilter] -=== RegexFilter +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MapFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/MapFilter.json[tag=filter] +---- + +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MapFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/MapFilter.yaml[tag=filter] +---- -The RegexFilter allows the formatted or unformatted message to be compared against a regular expression. +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MapFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/MapFilter.properties[tag=filter] +---- +==== -.Regex Filter Parameters +[TIP] +==== +You can use Log4j Core's +xref:manual/configuration.adoc#configuration-attribute-monitorInterval[automatic reconfiguration feature] +to modify the ``KeyValuePair``s without restarting your application. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-MapFilter[📖 Plugin reference for `MapFilter`] + +[#StructuredDataFilter] +==== `StructuredDataFilter` + +The `StructuredDataFilter` is a variant of <> that only matches +xref:manual/messages.adoc#StructuredDataMessage[`StructureDataMessage`]s. + +In addition to matching the map structure contained in a `StructuredDataMessage` +(which corresponds to https://datatracker.ietf.org/doc/html/rfc5424#section-6.3.3[RFC 5424 `SD-PARAM` elements]) +it provides the following virtual keys: + +.`StructuredDataFilter` -- virtual keys [cols="1m,1,4"] |=== -|Parameter Name |Type |Description - -|regex -|String -|The regular expression. - -|useRawMsg -|boolean -|If true the unformatted message will be used, -otherwise, the formatted message will be used. The default value is -false. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +| Key | RFC5424 field | Description + +| id +| https://datatracker.ietf.org/doc/html/rfc5424#section-6.3.2[`SD-ID`] +| The +link:../javadoc/log4j-api/org/apache/logging/log4j/message/StructuredDataMessage.html#getId()[`id` field] +of the `StructuredDataMessage`. + +| id.name +| +| The +link:../javadoc/log4j-api/org/apache/logging/log4j/message/StructuredDataId.html#getName()[`name` field] +of the `StructuredDataId` element. + +| type +| https://datatracker.ietf.org/doc/html/rfc5424#section-6.2.7[`MSGID`] +| The +link:../javadoc/log4j-api/org/apache/logging/log4j/message/StructuredDataMessage.html#getType()[`type` field] +of a `StructuredDataMessage`. + +| message +| https://datatracker.ietf.org/doc/html/rfc5424#section-6.4[`MSG`] +| The result of a +link:../javadoc/log4j-api/org/apache/logging/log4j/message/Message.html#getFormat()[`Message.getFormat()`] method call. + |=== -A sample configuration that only allows the event to be written by the appender if it contains the word "test": +The `StructuredDataFilter` encodes the <> introduced above as a list of +`KeyValuePair` and supports the following parameters, +besides the <>: -[source,xml] ----- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- - -[[Script]] -The ScriptFilter executes a script that returns true or false. - -.Script Filter Parameters -[cols="1m,1,4"] +.`StructuredDataFilter`—configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description +| Attribute | Type | Default value | Description -|script -|Script, ScriptFile or ScriptRef -|The Script element that specifies the logic to be executed. +| operator +| _enumeration_ +| `AND` +a| Determines the matching mode of the filter. +Can be: -|onMatch -|String -|Action to take when the script returns true. Can be -ACCEPT, DENY or NEUTRAL. The default value is NEUTRAL. +* <> +* <> -|onMismatch -|String -|Action to take when the filter returns false. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. |=== -.Script Parameters +.`StructuredDataFilter` -- nested elements [cols="1m,1,4"] |=== -|Parameter Name |Type |Description - -|configuration -|Configuration -|The Configuration that owns this -ScriptFilter. - -|level -|Level -|The logging Level associated with the event. Only present -when configured as a global filter. - -|loggerName -|String -|The name of the logger. Only present when -configured as a global filter. - -|logEvent -|LogEvent -|The LogEvent being processed. Not present when -configured as a global filter. - -|marker -|Marker -|The Marker passed on the logging call, if any. Only -present when configured as a global filter. - -|message -|Message -|The Message associated with the logging call. Only -present when configured as a global filter. - -|parameters -|Object[] -|The parameters passed to the logging call. Only -present when configured as a global filter. Some Messages include the -parameters as part of the Message. - -|throwable -|Throwable -|The Throwable passed to the logging call, if any. -Only present when configured as a global filter. Som Messages include -Throwable as part of the Message. - -|substitutor -|StrSubstitutor -|The StrSubstitutor is used to replace lookup variables. +| Type | Multiplicity | Description + +| xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +| One or more +| Adds a value as allowed value for a key. +See <> for more details. + |=== -The sample below shows how to declare script fields and then reference them in specific components. -See -xref:manual/appenders.adoc#ScriptCondition[ScriptCondition] for an example of how the `ScriptPlugin` element can be used to embed script code directly in the configuration. +If you want +to match all log messages with an `SD-ID` equal to `authentication` and the value of the `userId` `SD-PARAM` equal to either `alice` or `bob`, +you can use the following configuration: -[source,xml] +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/StructuredDataFilter.xml[`log4j2.xml`] +[source,xml,indent=0] ---- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +include::example$manual/filters/StructuredDataFilter.xml[tag=filter] ---- -[#StructuredDataFilter] -=== StructuredDataFilter +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/StructuredDataFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/StructuredDataFilter.json[tag=filter] +---- -The StructuredDataFilter is a MapFilter that also allows filtering on the event id, type and message. +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/StructuredDataFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/StructuredDataFilter.yaml[tag=filter] +---- -.StructuredData Filter Parameters -[cols="1m,1,4"] -|=== -|Parameter Name |Type |Description - -|keyValuePair -|KeyValuePair[] -|One or more KeyValuePair elements that -define the key in the map and the value to match on. "id", "id.name", -"type", and "message" should be used to match the StructuredDataId, -the name portion of the StructuredDataId, the type, and the formatted -message respectively. If the same key is specified more than once then -the check for that key will automatically be an "or" since a Map can -only contain a single value. - -|operator -|String -|If the operator is "or" then a match by any one of -the key/value pairs will be considered to be a match, otherwise all the -key/value pairs must match. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/StructuredDataFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/StructuredDataFilter.properties[tag=filter] +---- +==== + +[TIP] +==== +You can use Log4j Core's +xref:manual/configuration.adoc#configuration-attribute-monitorInterval[automatic reconfiguration feature] +to modify the ``KeyValuePair``s without restarting your application. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-StructuredDataFilter[📖 Plugin reference for `StructuredDataFilter`] + +[#ContextMapFilter] +==== `ContextMapFilter` + +The `ContextMapFilter` works in the same way as the <> above, +except it checks the +xref:manual/thread-context.adoc[context map data] +of the log event instead of the log message. + +This filter also encodes the <> introduced above as a list of +`KeyValuePair` elements. + +Besides the <>, +the `ContextMapFilter` supports the following parameters: + +.`ContextMapFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== +| Attribute | Type | Default value | Description -As in this configuration, the StructuredDataFilter can be used to log particular events: +| operator +| _enumeration_ +| `AND` +a| Determines the matching mode of the filter. +Can be: -[source,xml] ----- - - - - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- - -[#ThreadContextMapFilter] -=== ThreadContextMapFilter - -The ThreadContextMapFilter or ContextMapFilter allows filtering against data elements that are in the current context. -By default, this is the ThreadContext Map. - -.Context Map Filter Parameters +* <> +* <> + +|=== + +.`ContextMapFilter` -- nested elements [cols="1m,1,4"] |=== -|Parameter Name |Type |Description - -|keyValuePair -|KeyValuePair[] -|One or more KeyValuePair elements that -define the key in the map and the value to match. If the same key is -specified more than once then the check for that key will automatically -be an "or" since a Map can only contain a single value. - -|operator -|String -|If the operator is "or" then a match by any one of -the key/value pairs will be considered to be a match, otherwise all the -key/value pairs must match. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +| Type | Multiplicity | Description + +| xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-util-KeyValuePair[`KeyValuePair`] +| One or more +| Adds a value as allowed value for a key. +See <> for more details. + |=== -A configuration containing the ContextMapFilter might look like: +For example, +if the `clientId` and `userId` keys in the context data map identify your client and his end users, +you can filter the log events generated by users `alice` and `bob` of client `1234` using this configuration: -[source,xml] +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ContextMapFilter.xml[`log4j2.xml`] +[source,xml,indent=0] +---- +include::example$manual/filters/ContextMapFilter.xml[tag=filter] ---- - - - - - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - ----- - -The ContextMapFilter can also be applied to a logger for filtering: -[source,xml] +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ContextMapFilter.json[`log4j2.json`] +[source,json,indent=0] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - - - - - - +include::example$manual/filters/ContextMapFilter.json[tag=filter] ---- -[#ThresholdFilter] -=== ThresholdFilter +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ContextMapFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/ContextMapFilter.yaml[tag=filter] +---- -This filter returns the onMatch result if the level in the LogEvent is the same or more specific than the configured level and the `onMismatch` -value otherwise. -For example, if the ThresholdFilter is configured with Level ERROR and the LogEvent contains Level DEBUG then the `onMismatch` -value will be returned since ERROR events are more specific than DEBUG. +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ContextMapFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/ContextMapFilter.properties[tag=filter] +---- +==== -.Threshold Filter Parameters -[cols="1m,1,4"] +[TIP] +==== +You can use Log4j Core's +xref:manual/configuration.adoc#configuration-attribute-monitorInterval[automatic reconfiguration feature] +to modify the ``KeyValuePair``s without restarting your application. +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-ThreadContextMapFilter[📖 Plugin reference for `ContextMapFilter`] + +[#MutableThreadContextMapFilter] +==== `MutableContextMapFilter` + +The `MutableContextMapFilter` is an alternative version of <> that also uses the +xref:manual/thread-context.adoc[context data map] +to filter messages, but externalizes the <>, so it can be kept in a separate location. + +This filter encodes the <> as JSON. +The configuration map must be stored in an **external** location and will be regularly polled for changes. + +Besides the <>, +the `MutableContextMapFilter` supports the following parameters: + +.`MutableContextMapFilter` -- configuration attributes +[cols="1m,1,1,4"] |=== -|Parameter Name |Type |Description +| Attribute | Type | Default value | Description -|level -|String -|A valid Level name to match. +| configLocation +| https://docs.oracle.com/javase/{java-target-version}/docs/api/java/nio/file/Path.html[`Path`] +or +https://docs.oracle.com/javase/{java-target-version}/docs/api/java/net/URI.html[`URI`] +| +| The location of the JSON <>. -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. +**Required** + +| pollInterval +| `long` +| `0` +| +Determines the polling interval used by Log4j to check for changes to the configuration map. + +If set to `0`, polling is disabled. -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. |=== -A sample configuration that only allows the event to be written by the appender if the level matches: +[WARNING] +==== +Unlike other map filters that have a configurable matching mode, +this filter always uses the <> matching mode. +==== + +To use this filter, you need to: -[source,xml] +. Create a JSON configuration map and place it at a known location (e.g. `++https://server.example/configs.json++`): ++ +[source,json] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - +include::example$manual/filters/configs2.json[] ---- ++ +<1> The filter will match all events for client `1234` regardless of the `userId`. +<2> The filter will match all events for the `root` account regardless of the `clientId`. -[#TimeFilter] -=== TimeFilter +. Reference the configuration map location in your configuration file: ++ +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MutableContextMapFilter.xml[`log4j2.xml`] +[source,xml,indent=0] +---- +include::example$manual/filters/MutableContextMapFilter.xml[tag=filter] +---- + +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MutableContextMapFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/MutableContextMapFilter.json[tag=filter] +---- + +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MutableContextMapFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/MutableContextMapFilter.yaml[tag=filter] +---- + +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/MutableContextMapFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/MutableContextMapFilter.properties[tag=filter] +---- +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-MutableThreadContextMapFilter[📖 Plugin reference for `MutableContextMapFilter`] + +[#other-filters] +=== Other filters + +[#deny-filter] +==== `DenyFilter` + +The `DenyFilter` always returns `DENY`. +It does not support **any** configuration attribute, even the common configuration attributes. + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-DenyAllFilter[📖 Plugin reference for `DenyAllFilter`] + +[#Script] +==== `ScriptFilter` + +The `ScriptFilter` executes a script that must return `true` if the event matches and `false` otherwise. + +Besides the <>, +it accepts a single nested element: + +.`ScriptFilter` -- nested elements +[cols="3,1,4"] +|=== +| Type | Multiplicity | Description + +| +xref:manual/scripts.adoc#Script[`Script`], +xref:manual/scripts.adoc#ScriptFile[`ScriptFile`] +or +xref:manual/scripts.adoc#ScriptRef[`ScriptRef`] +| one +| A reference to the script to execute. -The time filter can be used to restrict the filter to only a certain portion of the day. +See xref:manual/scripts.adoc[Scripts] for more details about scripting. +|=== + +The bindings available to the script depend +on whether the `ScriptFilter` is used as a global filter in the <> or in the remaining stages. +For global filters, the following bindings are available: -.Time Filter Parameters +.Script Bindings -- global filter [cols="1m,1,4"] |=== -|Parameter Name |Type |Description - -|start -|String -|A time in HH:mm:ss format. - -|end -|String -|A time in HH:mm:ss format. Specifying an end time less -than the start time will result in no log entries being written. - -|timezone -|String -|The timezone to use when comparing to the event -timestamp. - -|onMatch -|String -|Action to take when the filter matches. Can be ACCEPT, -DENY or NEUTRAL. The default value is NEUTRAL. - -|onMismatch -|String -|Action to take when the filter does not match. Can -be ACCEPT, DENY or NEUTRAL. The default value is DENY. +| Binding name | Type | Description + +| logger +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/Logger.html[`Logger`] +| The logger used in the log statement. + +| level +| xref:manual/customloglevels.adoc[`Level`] +| The level used in the log statement. + +| marker +| xref:manual/markers.adoc[`Marker`] +| The marker used in the log statement. + +| message +| xref:manual/messages.adoc[`Message`] +a| The message used in the log event if the user directly supplied one. +Otherwise: + +* If the logging statement contained an `Object` argument, it is wrapped in a +xref:manual/messages.adoc#ObjectMessage[`ObjectMessage`]. +* If the logging statement contained a format `String`, it is wrapped in a +xref:manual/messages.adoc#SimpleMessage[`SimpleMessage`]. + +| parameters +| `Object[]` +| The parameters passed to the logging call. +Some logging calls include the parameters as part of `message`. + +| throwable +| `Throwable` +| The `Throwable` passed to the logging call, if any. +Some logging calls include the `Throwable` as part of `message`. + +| substitutor +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/lookup/StrSubstitutor.html[`StrSubstitutor`] +| The `StrSubstitutor` used to replace lookup variables. + |=== -A sample configuration that only allows the event to be written by the appender from 5:00 to 5:30 am each day using the default timezone: +For the remaining filters, only these bindings are available: -[source,xml] +.Script Bindings -- internal filter +[cols="1m,1,4"] +|=== +| Binding name | Type | Description + +| logEvent +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/LogEvent.html[`LogEvent`] +| The log event being processed. + +| substitutor +| link:../javadoc/log4j-core/org/apache/logging/log4j/core/lookup/StrSubstitutor.html[`StrSubstitutor`] +| The `StrSubstitutor` used to replace lookup variables. + + +|=== + +As an example, if you wish to match only log events that contain a certain exception, +you can use a simple Groovy script: + +.`scripts/local.groovy` +[source,groovy] +---- +include::example$manual/filters/local.groovy[lines=17..-1] +---- + +You can then integrate the script in a Log4j configuration: + +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.xml[`log4j2.xml`] +[source,xml,indent=0] +---- +include::example$manual/filters/ScriptFilter.xml[tag=local] ---- - - - - - - - %d %p %c{1.} [%t] %m%n - - - - - - - - - - + +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/ScriptFilter.json[tag=local] +---- + +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] ---- +include::example$manual/filters/ScriptFilter.yaml[tag=local] +---- + +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/ScriptFilter.properties[tag=local] +---- +==== + +Writing an equivalent **global** script is a little bit more complex, +since you need to take into account all the places where a throwable can be passed as a parameter. +The script becomes: + +.`scripts/global.groovy` +[source,groovy] +---- +include::example$manual/filters/global.groovy[lines=17..-1] +---- + +You can use it as a global filter: + +[tabs] +==== +XML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.xml[`log4j2.xml`] +[source,xml,indent=0] +---- +include::example$manual/filters/ScriptFilter.xml[tag=global] +---- + +JSON:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.json[`log4j2.json`] +[source,json,indent=0] +---- +include::example$manual/filters/ScriptFilter.json[tag=global] +---- + +YAML:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.yaml[`log4j2.yaml`] +[source,yaml,indent=0] +---- +include::example$manual/filters/ScriptFilter.yaml[tag=global] +---- + +Properties:: ++ +.Snippet from an example {antora-examples-url}/manual/filters/ScriptFilter.yaml[`log4j2.properties`] +[source,properties,indent=0] +---- +include::example$manual/filters/ScriptFilter.properties[tag=global] +---- +==== + +xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-filter-ScriptFilter[📖 Plugin reference for `ScriptFilter`] [#extending] == Extending @@ -1069,8 +1481,12 @@ include::partial$manual/plugin-preliminaries.adoc[] [#extending-filters] === Extending filters -Filter are xref:manual/plugins.adoc[plugins] implementing link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.html[the `Filter` interface]. -We recommend users to extend from link:../javadoc/log4j-core/org/apache/logging/log4j/core/filter/AbstractFilter.html[`AbstractFilter`], which provides implementation convenience. +Filters are xref:manual/plugins.adoc[plugins] +implementing link:../javadoc/log4j-core/org/apache/logging/log4j/core/Filter.html[the `Filter` +interface]. +We recommend users +to extend from link:../javadoc/log4j-core/org/apache/logging/log4j/core/filter/AbstractFilter.html[`AbstractFilter`], +which provides implementation convenience. While annotating your filter with `@Plugin`, you need to make sure that * It has a unique `name` attribute across all available `Filter` plugins diff --git a/src/site/antora/modules/ROOT/pages/manual/lookups.adoc b/src/site/antora/modules/ROOT/pages/manual/lookups.adoc index 052f5d575db..76aaecb77de 100644 --- a/src/site/antora/modules/ROOT/pages/manual/lookups.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/lookups.adoc @@ -69,42 +69,25 @@ be formatted as specified. [#DockerLookup] == Docker Lookup -The DockerLookup can be used to lookup attributes from the Docker container the application is running in. - -Log4j Docker provides access to the following container attributes: -[cols="1m,4a"] +[cols="1h,4"] |=== -|Key |Description - -|containerId -|The full id assigned to the container. - -|containerName -|The name assigned to the container. - -|imageId -|The id assigned to the image. - -|imageName -|The name assigned to the image. - -|shortContainerId -|The first 12 characters of the container id. - -|shortImageId -|The first 12 characters of the image id. +|Lookup ID |`docker` +|Dependency |xref:log4j-docker.adoc[] |=== -[source,xml] ----- - - - - - ----- +Docker Lookup queries https://docs.docker.com/engine/api/[the API of the Docker Engine] running your container. +It supports the retrieval of following container attributes: -This Lookup is subject to the requirements listed at xref:log4j-docker.adoc[Log4j Docker Support] +[%header,cols="1m,4"] +|=== +|Key |Description +|containerId |Container ID +|containerName |Container name +|imageId |Container image ID +|imageName |Container image name +|shortContainerId |The first 12 characters of the container ID +|shortImageId |The first 12 characters of the container image ID +|=== [id=environment-lookup] == [[EnvironmentLookup]] Environment Lookup @@ -310,7 +293,14 @@ https://docs.oracle.com/javase/{java-target-version}/docs/api/java/lang/manageme [#KubernetesLookup] == Kubernetes Lookup -For retrieving attributes using Fabric8's Kubernetes Client, see their https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md[Kubernetes Log4j Lookup]. +[cols="1h,4"] +|=== +|Lookup ID |`k8s` +|Dependency |{log4j-kubernetes-url}[Log4j Kubernetes of Fabric8] +|=== + +Kubernetes Lookup queries https://kubernetes.io/docs/concepts/overview/kubernetes-api/[the Kubernetes API] to retrieve certain information about the current container and its environment. +Kubernetes Lookup is distributed as a part of Fabric8's Kubernetes Client, refer to {log4j-kubernetes-url}[its website] for details. [#Log4jConfigLookup] == Log4j Configuration Location Lookup diff --git a/src/site/antora/modules/ROOT/pages/manual/messages.adoc b/src/site/antora/modules/ROOT/pages/manual/messages.adoc index 1c7ba84aef0..187b6d37e0c 100644 --- a/src/site/antora/modules/ROOT/pages/manual/messages.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/messages.adoc @@ -106,7 +106,7 @@ Due to checks involved, `FormattedMessage` has an extra performance overhead com ==== [#LocalizedMessage] -==== LocalizedMessage +==== `LocalizedMessage` link:../javadoc/log4j-api/org/apache/logging/log4j/message/LocalizedMessage.html[`LocalizedMessage`] incorporates a `ResourceBundle`, and allows the message pattern parameter to be the key to the message pattern in the bundle. If no bundle is specified, `LocalizedMessage` will attempt to locate a bundle with the name of the `Logger` used to log the event. diff --git a/src/site/antora/modules/ROOT/pages/manual/scripts.adoc b/src/site/antora/modules/ROOT/pages/manual/scripts.adoc index eb25abceada..e231b8f70bf 100644 --- a/src/site/antora/modules/ROOT/pages/manual/scripts.adoc +++ b/src/site/antora/modules/ROOT/pages/manual/scripts.adoc @@ -30,6 +30,7 @@ configuration property. Each component that allows scripts can contain on of the following configuration elements: +[[Script]] Script:: + This element specifies the content of the script directly and has: @@ -46,6 +47,7 @@ The element can be assigned a name using the `name` configuration attribute. See also xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-script-Script[Plugin reference]. +[[ScriptFile]] ScriptFile:: + This element points to an external script file and has: @@ -63,6 +65,7 @@ The element can be assigned a name using the `name` configuration attribute. See also xref:plugin-reference.adoc#org-apache-logging-log4j_log4j-core_org-apache-logging-log4j-core-script-ScriptFile[Plugin reference]. +[[ScriptRef]] ScriptRef:: + This element references a **named** script from the global diff --git a/src/site/antora/modules/ROOT/partials/manual/systemproperties/properties-async.adoc b/src/site/antora/modules/ROOT/partials/manual/systemproperties/properties-async.adoc index cbbcb4cfba8..01f6386ff8f 100644 --- a/src/site/antora/modules/ROOT/partials/manual/systemproperties/properties-async.adoc +++ b/src/site/antora/modules/ROOT/partials/manual/systemproperties/properties-async.adoc @@ -68,5 +68,5 @@ Discard:: when the queue is full, it drops the events whose level is equal or le |=== Determines the threshold level used by a `Discard` queue full policy. -Log events whose level is equal or less specific than the threshold level will be discarded during a queue full event. +Log events whose level is not more severe than the threshold level will be discarded during a queue full event. See also <>. diff --git a/src/site/resources/.htaccess b/src/site/resources/.htaccess index 3787ea1b64d..b40af8fb968 100644 --- a/src/site/resources/.htaccess +++ b/src/site/resources/.htaccess @@ -42,6 +42,7 @@ RewriteRule "^log4j(-jakarta)?-web(/index)?\.html$" "manual/webapp.html" [R=perm RewriteRule "^log4j-jcl(/index)?\.html$" "manual/installation.html#impl-core-bridge-jcl" [R=permanent,NE] RewriteRule "^log4j-jmx-gui(/index)?\.html$" "/log4j/jmx-gui/latest/index.html" [R=permanent] RewriteRule "^log4j-jpl(/index)?\.html$" "manual/installation.html#impl-core-bridge-jpl" [R=permanent,NE] +RewriteRule "^log4j-kubernetes(/index)?\.html$" "https://github.com/fabric8io/kubernetes-client/blob/main/doc/KubernetesLog4j.md" [R=permanent] RewriteRule "^log4j-mongodb3(/index)?\.html$" "manual/appenders.html#NoSQLAppenderMongoDB" [R=permanent,NE] RewriteRule "^log4j-mongodb4(/index)?\.html$" "manual/appenders.html#log4j-mongodb4" [R=permanent,NE] RewriteRule "^log4j-slf4j2?-impl(/index)?\.html$" "manual/installation.html#impl-core-bridge-slf4j" [R=permanent,NE]